hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
sequence | cell_types
sequence | cell_type_groups
sequence |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e73fb066d24a1ad276b7dfeb7ad05ab1bba543b0 | 6,681 | ipynb | Jupyter Notebook | test_install.ipynb | Team36-ntds2018/ntds_2018 | 0fad373410fd7dc393887f31dc195dc6ff88169b | [
"MIT"
] | null | null | null | test_install.ipynb | Team36-ntds2018/ntds_2018 | 0fad373410fd7dc393887f31dc195dc6ff88169b | [
"MIT"
] | null | null | null | test_install.ipynb | Team36-ntds2018/ntds_2018 | 0fad373410fd7dc393887f31dc195dc6ff88169b | [
"MIT"
] | null | null | null | 22.647458 | 413 | 0.51609 | [
[
[
"# [NTDS'18]: test your installation\n[ntds'18]: https://github.com/mdeff/ntds_2018\n\n[Michaël Defferrard](http://deff.ch), [EPFL LTS2](http://lts2.epfl.ch)",
"_____no_output_____"
],
[
"This is a mini \"test\" Jupyter notebook to make sure the main packages we'll use are installed.\nRun it after following the [installation instructions](https://github.com/mdeff/ntds_2018#installation).",
"_____no_output_____"
],
[
"## Standalone dependencies\n\nIf you get a `command not found` error, try to run `conda install <package-name>` (in the `ntds_2018` environment, i.e., after `conda activate ntds_2018`).",
"_____no_output_____"
]
],
[
[
"!git --version",
"git version 2.18.0.windows.1\n"
],
[
"!python --version",
"Python 3.7.0\n"
],
[
"!jupyter --version\n!jupyter-notebook --version\n# !jupyter-lab --version",
"4.4.0\n5.6.0\n"
],
[
"!ipython --version",
"6.5.0\n"
]
],
[
[
"## Python packages\n\nIf you get a `ModuleNotFoundError` error, try to run `conda install <package-name>` (in the `ntds_2018` environment, i.e., after `conda activate ntds_2018`).",
"_____no_output_____"
]
],
[
[
"import numpy as np\nnp.__version__",
"_____no_output_____"
],
[
"import scipy\nscipy.__version__",
"_____no_output_____"
],
[
"import pandas as pd\npd.__version__",
"_____no_output_____"
],
[
"import matplotlib as mpl\nmpl.__version__",
"_____no_output_____"
],
[
"import networkx as nx\nnx.__version__",
"_____no_output_____"
],
[
"import pygsp\npygsp.__version__",
"_____no_output_____"
]
],
[
[
"## Small test",
"_____no_output_____"
]
],
[
[
"%matplotlib inline",
"_____no_output_____"
],
[
"graph = pygsp.graphs.Logo()\ngraph.estimate_lmax()\nfilt = pygsp.filters.Heat(graph, tau=100)\nDELTAS = [20, 30, 1090]\nsignal = np.zeros(graph.N)\nsignal[DELTAS] = 1\nsignal = filt.filter(signal)\ngraph.plot_signal(signal, highlight=DELTAS)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e73fc209878659ef51d1d60efd507dabd96e61dd | 636,744 | ipynb | Jupyter Notebook | fitting.ipynb | st3107/19st_tio2b_notebooks | ff1f87cbfebaef2fe298ea50e4f94de1e96e8ef6 | [
"BSD-3-Clause"
] | null | null | null | fitting.ipynb | st3107/19st_tio2b_notebooks | ff1f87cbfebaef2fe298ea50e4f94de1e96e8ef6 | [
"BSD-3-Clause"
] | null | null | null | fitting.ipynb | st3107/19st_tio2b_notebooks | ff1f87cbfebaef2fe298ea50e4f94de1e96e8ef6 | [
"BSD-3-Clause"
] | null | null | null | 823.730918 | 84,892 | 0.947866 | [
[
[
"from pathlib import Path\n\nimport pdfstream.parsers as ps\nimport matplotlib.pyplot as plt\nimport pdfstream.modeling as md\nimport pdfstream.io as io\nimport pymongo\n\nimport numpy as np\nimport pandas as pd\n\n# matplotlib settings\nstyle_file = Path(\"/Users/sst/PycharmProjects/mpl-stylesheets/bg_style\")\nif style_file.is_file():\n plt.style.use(str(style_file))\ndel style_file\n\ndef readDF():\n \"\"\"read metadata of analysis.\"\"\"\n return pd.read_csv(\"data_info.csv\", index_col=0)\n\nDF = readDF()\nclient = pymongo.MongoClient()\ndb = client.tio2b\nfits = db.fits\ndata = db.data",
"_____no_output_____"
],
[
"data0 = data.find_one({\"meta_data.sample\": \"JBNP33L\"})",
"_____no_output_____"
],
[
"def ligand(r, A, sigma, phi, wavelength):\n \"\"\"A simulated PDF for ligands.\"\"\"\n return A * np.exp(- r ** 2 / (2 * sigma ** 2)) * np.cos(2 * np.pi * r / wavelength + phi)",
"_____no_output_____"
],
[
"bronze = io.load_crystal(\"stru/TiO2_Bronze_mp.cif\")\nanatase = io.load_crystal(\"stru/TiO2_Anatase.cif\")\ndata = io.load_parser(data0['gr_file'], meta={\"qdamp\":0.0313, \"qbroad\": 0.0131})",
"_____no_output_____"
],
[
"recipe0 = md.create(\n \"JBNP33L\",\n data,\n (1.6, 15.0, 0.01),\n \"f0 * G0\",\n {\"f0\": md.F.lognormalSphericalCF},\n {\"G0\": bronze}\n)\nmd.initialize(recipe0)\nmd.set_values(\n recipe0,\n {\"f0_psize\": 40., \"f0_psig\": 10.}\n)\nmd.bound_ranges(\n recipe0,\n {\"f0_psize\": (0.,), \"f0_psig\": (0.,)}\n);",
"_____no_output_____"
],
[
"STEPS0 = [\n (\"f0\", \"G0_scale\"),\n \"G0_lat\",\n (\"G0_adp\", \"G0_delta2\"),\n \"G0_xyz\"\n]\nmd.optimize(recipe0, STEPS0, verbose=1)\nmd.view_fits(recipe0)\nmd.set_range(recipe0, rmax=50.)\nmd.optimize(recipe0, STEPS0, verbose=1)\nmd.view_fits(recipe0)",
"Start fit with all parameters fixed.\nFree f0, G0_scale ...\n`ftol` termination condition is satisfied.\nFunction evaluations 16, initial cost 5.0000e-01, final cost 1.7326e-01, first-order optimality 8.24e-05.\nFree G0_lat ...\n`ftol` termination condition is satisfied.\nFunction evaluations 48, initial cost 1.7326e-01, final cost 4.5695e-02, first-order optimality 1.75e-03.\nFree G0_adp, G0_delta2 ...\n`ftol` termination condition is satisfied.\nFunction evaluations 20, initial cost 4.5695e-02, final cost 3.7567e-02, first-order optimality 2.26e-04.\nFree G0_xyz ...\n`xtol` termination condition is satisfied.\nFunction evaluations 20, initial cost 3.7567e-02, final cost 1.5169e-02, first-order optimality 9.97e-03.\n"
]
],
[
[
"dct = ps.recipe_to_dict(recipe0)\ndct[\"data_id\"] = data0[\"_id\"]\nfits.insert_one(dct)",
"_____no_output_____"
]
],
[
[
"recipe1 = md.create(\n \"JBNP33L\",\n data,\n (1.6, 15.0, 0.01),\n \"f0 * G0 + f1 * G1\",\n {\"f0\": md.F.lognormalSphericalCF, \"f1\": md.F.sphericalCF},\n {\"G0\": bronze, \"G1\": anatase}\n)\nmd.initialize(recipe1)\nmd.set_values(recipe1, {\"f1_psize\": 60.})\nmd.bound_ranges(recipe1, {\"f1_psize\": (0.,)})\nmd.set_values(recipe1, md.get_value_dct(recipe0))\nmd.bound_ranges(recipe1, md.get_bound_dct(recipe0))",
"_____no_output_____"
],
[
"STEPS1 = [\n (\"f1\", \"G1_scale\"),\n \"G1_lat\",\n (\"G1_adp\", \"G1_delta2\"),\n \"G1_xyz\"\n]\nmd.optimize(recipe1, STEPS0 + STEPS1, verbose=1)\nmd.view_fits(recipe1)\nmd.set_range(recipe1, rmax=50.)\nmd.optimize(recipe1, STEPS0 + STEPS1, verbose=1)\nmd.view_fits(recipe1)",
"Start fit with all parameters fixed.\nFree f0, G0_scale ...\n`gtol` termination condition is satisfied.\nFunction evaluations 4, initial cost 1.5771e-02, final cost 1.5765e-02, first-order optimality 7.05e-07.\nFree G0_lat ...\n`xtol` termination condition is satisfied.\nFunction evaluations 12, initial cost 1.5765e-02, final cost 1.5524e-02, first-order optimality 1.21e-01.\nFree G0_adp, G0_delta2 ...\nBoth `ftol` and `xtol` termination conditions are satisfied.\nFunction evaluations 10, initial cost 1.5524e-02, final cost 1.5346e-02, first-order optimality 2.72e-03.\nFree G0_xyz ...\n`xtol` termination condition is satisfied.\nFunction evaluations 11, initial cost 1.5346e-02, final cost 1.5074e-02, first-order optimality 2.61e-02.\nFree f1, G1_scale ...\n`ftol` termination condition is satisfied.\nFunction evaluations 23, initial cost 1.5074e-02, final cost 8.4057e-03, first-order optimality 2.23e-03.\nFree G1_lat ...\n`ftol` termination condition is satisfied.\nFunction evaluations 14, initial cost 8.4057e-03, final cost 8.3215e-03, first-order optimality 2.68e-03.\nFree G1_adp, G1_delta2 ...\n"
]
],
[
[
"dct = ps.recipe_to_dict(recipe1)\ndct[\"data_id\"] = data0[\"_id\"]\nfits.insert_one(dct)",
"_____no_output_____"
]
],
[
[
"recipe2 = md.create(\n \"JBNP33L\",\n data,\n (1.6, 15.0, 0.01),\n \"f0 * G0 + f1 * G1 + f2\",\n {\"f0\": md.F.lognormalSphericalCF, \"f1\": md.F.sphericalCF, \"f2\": ligand},\n {\"G0\": bronze, \"G1\": anatase}\n)\nmd.initialize(recipe2)\nmd.set_values(\n recipe2,\n {\n \"f2_A\": 0.,\n \"f2_sigma\": 10,\n \"f2_phi\": np.pi,\n \"f2_wavelength\": 4.\n }\n)\nmd.set_values(recipe2, md.get_value_dct(recipe1))\nmd.bound_ranges(\n recipe2,\n {\n \"f2_A\": (0.,),\n \"f2_sigma\": (0.,),\n \"f2_phi\": (0., 2 * np.pi),\n \"f2_wavelength\": (0., 10.)\n }\n)\nmd.bound_ranges(recipe2, md.get_bound_dct(recipe1));",
"_____no_output_____"
],
[
"STEPS2 = [\n \"f2\"\n]\nmd.optimize(recipe2, STEPS0 + STEPS1 + STEPS2, verbose=1)\nmd.view_fits(recipe2)\nmd.set_range(recipe2, rmax=50.)\nmd.optimize(recipe2, STEPS0 + STEPS1 + STEPS2, verbose=1)\nmd.view_fits(recipe2)",
"Start fit with all parameters fixed.\nFree f0, G0_scale ...\n`ftol` termination condition is satisfied.\nFunction evaluations 14, initial cost 8.1971e-03, final cost 8.1425e-03, first-order optimality 1.14e-04.\nFree G0_lat ...\n`ftol` termination condition is satisfied.\nFunction evaluations 4, initial cost 8.1425e-03, final cost 8.0765e-03, first-order optimality 6.57e-05.\nFree G0_adp, G0_delta2 ...\n`ftol` termination condition is satisfied.\nFunction evaluations 7, initial cost 8.0765e-03, final cost 8.0339e-03, first-order optimality 6.59e-05.\nFree G0_xyz ...\n`xtol` termination condition is satisfied.\nFunction evaluations 13, initial cost 8.0339e-03, final cost 7.9539e-03, first-order optimality 3.19e-03.\nFree f1, G1_scale ...\n`ftol` termination condition is satisfied.\nFunction evaluations 21, initial cost 7.9539e-03, final cost 7.7242e-03, first-order optimality 4.37e-04.\nFree G1_lat ...\nBoth `ftol` and `xtol` termination conditions are satisfied.\nFunction evaluations 8, initial cost 7.7242e-03, final cost 7.7108e-03, first-order optimality 9.63e-04.\nFree G1_adp, G1_delta2 ...\n`xtol` termination condition is satisfied.\nFunction evaluations 10, initial cost 7.7108e-03, final cost 7.7083e-03, first-order optimality 1.24e-02.\nFree G1_xyz ...\n`xtol` termination condition is satisfied.\nFunction evaluations 11, initial cost 7.7083e-03, final cost 7.6971e-03, first-order optimality 5.56e-02.\nFree f2 ...\n`xtol` termination condition is satisfied.\nFunction evaluations 26, initial cost 7.6971e-03, final cost 5.0666e-03, first-order optimality 5.00e-02.\n"
]
],
[
[
"dct = ps.recipe_to_dict(recipe2)\ndct[\"data_id\"] = data0[\"_id\"]\nfits.insert_one(dct)",
"_____no_output_____"
],
[
"con = recipe2.JBNP33L\nr = con.evaluateEquation(\"r\")\ng0 = con.evaluateEquation(\"f0 * G0\")\ng1 = con.evaluateEquation(\"f1 * G1\")\ng2 = con.evaluateEquation(\"f2\")\ncomponents = np.stack((r, g0, g1, g2))\nnp.savetxt(\"info/JBNP33L_grs.gr\", components.T, header=\"r bronze anatase ligand\")",
"_____no_output_____"
]
],
[
[
"recipe3 = md.create(\n \"JBNP33L\",\n data,\n (1.6, 50.0, 0.01),\n \"f0 * G0 + f1 * G1 + f2\",\n {\"f0\": md.F.sphericalCF, \"f1\": md.F.shellCF, \"f2\": ligand},\n {\"G0\": bronze, \"G1\": anatase}\n)\nmd.initialize(recipe3)\nrecipe3.constrain(\"f0_psize\", \"f1_radius\")\nmd.set_values(\n recipe3,\n {\"f1_radius\": 45., \"f1_thickness\":45}\n)\nmd.bound_ranges(\n recipe3,\n {\"f1_radius\": (0.,), \"f1_thickness\": (0.,)}\n)\nmd.set_values(recipe3, md.get_value_dct(recipe2), ignore=True)\nmd.bound_ranges(recipe3, md.get_bound_dct(recipe2), ignore=True)",
"_____no_output_____"
],
[
"STEPS3 = [\n (\"f0\", \"f1\", \"f2\", \"scale\"),\n \"lat\",\n (\"adp\", \"delta\"),\n \"xyz\"\n]\nmd.optimize(recipe3, STEPS3, verbose=1)\nmd.view_fits(recipe3)",
"Start fit with all parameters fixed.\nFree f0, f1, f2, scale ...\n`gtol` termination condition is satisfied.\nFunction evaluations 1, initial cost 6.8843e-03, final cost 6.8843e-03, first-order optimality 9.03e-07.\nFree lat ...\n`ftol` termination condition is satisfied.\nFunction evaluations 2, initial cost 6.8843e-03, final cost 6.8843e-03, first-order optimality 1.19e-04.\nFree adp, delta ...\n`ftol` termination condition is satisfied.\nFunction evaluations 7, initial cost 6.8843e-03, final cost 6.8345e-03, first-order optimality 8.78e-05.\nFree xyz ...\n`ftol` termination condition is satisfied.\nFunction evaluations 9, initial cost 6.8345e-03, final cost 6.7532e-03, first-order optimality 7.23e-04.\n"
]
],
[
[
"con = recipe3.JBNP33L\nr = con.evaluateEquation(\"r\")\ng0 = con.evaluateEquation(\"f0 * G0\")\ng1 = con.evaluateEquation(\"f1 * G1\")\ng2 = con.evaluateEquation(\"f2\")\ncomponents = np.stack((r, g0, g1, g2))\nnp.savetxt(\"info/JBNP33L_core_shell_grs.gr\", components.T, header=\"r bronze anatase ligand\")",
"_____no_output_____"
],
[
"dct = ps.recipe_to_dict(recipe3)\ndct[\"data_id\"] = data0[\"_id\"]\nfits.insert_one(dct)",
"_____no_output_____"
]
],
[
[
"recipe4 = md.create(\n \"JBNP33L\",\n data,\n (1.6, 50.0, 0.01),\n \"f0 * G0\",\n {\"f0\": md.F.sphericalCF},\n {\"G0\": bronze}\n)\nmd.initialize(recipe4)\nmd.set_values(\n recipe4,\n md.get_value_dct(recipe0),\n ignore=True\n)\nmd.set_values(\n recipe4,\n {\"f0_psize\": 50.}\n)\nmd.bound_ranges(\n recipe4,\n {\"f0_psize\": (0.,)}\n);",
"_____no_output_____"
],
[
"STEPS4 = [\n (\"f0\", \"G0_scale\"),\n \"G0_lat\",\n (\"G0_adp\", \"G0_delta2\"),\n \"G0_xyz\"\n]\nmd.optimize(recipe4, STEPS4, verbose=1)\nmd.view_fits(recipe4)",
"Start fit with all parameters fixed.\nFree f0, G0_scale ...\n`gtol` termination condition is satisfied.\nFunction evaluations 4, initial cost 3.6737e-02, final cost 3.5882e-02, first-order optimality 2.76e-06.\nFree G0_lat ...\n`ftol` termination condition is satisfied.\nFunction evaluations 4, initial cost 3.5882e-02, final cost 3.5287e-02, first-order optimality 4.86e-04.\nFree G0_adp, G0_delta2 ...\n`ftol` termination condition is satisfied.\nFunction evaluations 7, initial cost 3.5287e-02, final cost 2.6095e-02, first-order optimality 4.35e-04.\nFree G0_xyz ...\n`xtol` termination condition is satisfied.\nFunction evaluations 14, initial cost 2.6095e-02, final cost 2.1162e-02, first-order optimality 1.46e-02.\n"
]
],
[
[
"dct = ps.recipe_to_dict(recipe4)\ndct[\"data_id\"] = data0[\"_id\"]\nfits.insert_one(dct)",
"_____no_output_____"
]
]
] | [
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw",
"code",
"raw"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"raw"
],
[
"code",
"code"
],
[
"raw"
],
[
"code",
"code"
],
[
"raw",
"raw"
],
[
"code",
"code"
],
[
"raw",
"raw"
],
[
"code",
"code"
],
[
"raw"
]
] |
e73fc6f0b92d4f5640c2b7ef39d180401c8888d4 | 159,276 | ipynb | Jupyter Notebook | notebooks/05_odhadovanie.ipynb | cedeerwe/slobodna-akademia | 10ea3ac5935da19419af5600934cc5df8b45a4f6 | [
"MIT"
] | 3 | 2020-04-11T18:53:55.000Z | 2020-04-20T13:48:19.000Z | notebooks/05_odhadovanie.ipynb | cedeerwe/slobodna-akademia | 10ea3ac5935da19419af5600934cc5df8b45a4f6 | [
"MIT"
] | 7 | 2019-07-21T17:54:16.000Z | 2020-02-24T20:37:25.000Z | notebooks/05_odhadovanie.ipynb | cedeerwe/brutalna-akademia | 10ea3ac5935da19419af5600934cc5df8b45a4f6 | [
"MIT"
] | null | null | null | 80.809741 | 17,359 | 0.543089 | [
[
[
"<a href=\"https://colab.research.google.com/github/cedeerwe/brutalna-akademia/blob/master/notebooks/05_odhadovanie.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Pomocný kód\n\nNeotvárajte, obsahuje časti riešení.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport io \nimport numpy as np\n\ndef get_age_data():\n csv_data = io.StringIO(\"\"\"\nUkazovate�;;;1996;1997;1998;1999;2000;2001;2002;2003;2004;2005;2006;2007;2008;2009;2010;2011;2012;2013;2014;2015;2016;2017;2018\n;;;Spolu;Spolu;Spolu;Spolu;Spolu;Spolu;Spolu;Spolu;Spolu;Spolu;Spolu;Spolu;Spolu;Spolu;Spolu;Spolu;Spolu;Spolu;Spolu;Spolu;Spolu;Spolu;Spolu\nStav trvale b�vaj�ceho obyvate�stva k 31.12. (Osoba);Slovensk� republika;Spolu;5378932;5387650;5393382;5398657;5402547;5378951;5379161;5380053;5384822;5389180;5393637;5400998;5412254;5424925;5435273;5404322;5410836;5415949;5421349;5426252;5435343;5443120;5450421\n;;Nula rokov;59594;58675;57157;55831;54741;50861;50519;51384;53451;54110;53637;54194;57151;60981;60164;60598;55828;55175;55513;56110;58277;58735;58445\n;;1 rok;60762;59516;58586;57064;55758;53924;50805;50459;51355;53386;54086;53627;54221;57167;60952;57712;61091;56389;55747;56119;56967;59054;59484\n;;2 roky;65623;60738;59499;58574;57043;55632;53919;50804;50458;51353;53383;54108;53648;54231;57189;59722;57903;61242;56576;55942;56394;57279;59358\n;;3 roky;72487;65612;60693;59466;58556;56386;55635;53905;50803;50472;51370;53398;54121;53659;54270;57077;59805;58025;61365;56678;56156;56604;57499\n;;4 roky;73665;72483;65578;60695;59464;57845;56395;55651;53914;50822;50469;51390;53409;54138;53663;54439;57141;59825;58073;61426;56811;56274;56746\n;;5 rokov;77423;73647;72488;65569;60697;59967;57858;56403;55642;53936;50830;50469;51412;53423;54149;54008;54464;57122;59854;58113;61481;56905;56360\n;;6 rokov;77329;77413;73636;72488;65552;61038;59961;57863;56400;55644;53939;50842;50472;51403;53432;54527;54015;54485;57082;59818;58108;61494;56950\n;;7 rokov;77498;77335;77403;73636;72476;65833;61035;59970;57878;56407;55631;53941;50845;50485;51403;54020;54516;53958;54460;57040;59836;58151;61526\n;;8 rokov;80736;77493;77330;77399;73644;72234;65821;61047;59968;57896;56413;55636;53950;50842;50483;51694;54003;54528;53935;54444;57021;59839;58156\n;;9 rokov;81383;80726;77480;77316;77406;73797;72213;65840;61057;59970;57901;56415;55636;53960;50835;50915;51673;54024;54502;53922;54440;57009;59830\n;;10 rokov;84299;81377;80720;77477;77308;77316;73778;72221;65833;61073;59972;57904;56418;55639;53962;51547;50889;51681;53991;54471;53908;54445;57002\n;;11 rokov;87553;84296;81365;80707;77464;78778;77319;73788;72229;65842;61075;59976;57916;56421;55638;55037;51540;50879;51656;53965;54458;53903;54438\n;;12 rokov;88112;87546;84278;81361;80718;79019;78780;77324;73798;72246;65841;61066;59974;57920;56430;55943;55028;51567;50857;51634;53955;54452;53892\n;;13 rokov;88991;88102;87531;84259;81356;81940;79016;78783;77335;73805;72256;65832;61061;59982;57911;56763;55925;55048;51541;50821;51614;53941;54435\n;;14 rokov;89442;88991;88097;87532;84242;82400;81937;79014;78794;77346;73819;72246;65835;61069;59976;58570;56755;55977;55029;51540;50802;51616;53921\n;;15 rokov;89908;89460;88995;88110;87528;86016;82390;81947;79012;78796;77343;73828;72255;65836;61045;59618;58546;56848;55938;54989;51526;50796;51608\n;;16 rokov;91492;89904;89464;88991;88105;88661;86003;82385;81936;79030;78799;77330;73820;72260;65836;60945;59596;58573;56824;55918;54969;51522;50792\n;;17 rokov;96177;91458;89882;89443;88960;88771;88622;85994;82384;81918;79025;78799;77330;73826;72262;65684;60930;59702;58552;56806;55899;54963;51517\n;;18 rokov;95853;96138;91409;89852;89406;89251;88726;88596;85982;82386;81919;79105;78861;77353;73852;72237;65680;60988;59685;58544;56816;55888;54960\n;;19 rokov;95458;95801;96081;91370;89816;90582;89202;88681;88575;85976;82387;82064;79246;78911;77385;73411;72240;65797;60991;59712;58544;56814;55900\n;;20 rokov;95166;95411;95742;96043;91321;90787;90531;89154;88659;88569;85989;82551;82241;79302;78938;76873;73419;72344;65799;61026;59749;58543;56810\n;;21 rokov;93600;95095;95349;95673;95984;92154;90750;90461;89127;88661;88616;86133;82726;82320;79334;78299;76875;73547;72349;65825;61072;59764;58575\n;;22 rokov;92999;93551;95050;95291;95601;96113;92068;90701;90463;89130;88707;88770;86280;82820;82355;78366;78320;77030;73557;72393;65865;61087;59798\n;;23 rokov;88274;92947;93502;95014;95213;95152;96023;92030;90689;90489;89172;88871;88961;86368;82876;80907;78382;78465;77053;73599;72421;65895;61111\n;;24 rokov;83445;88257;92880;93423;94965;94433;95042;95951;91999;90680;90548;89349;89040;89049;86424;81759;80929;78459;78487;77122;73647;72493;65943\n;;25 rokov;78731;83418;88207;92858;93378;94647;94358;94984;95932;91991;90712;90680;89461;89130;89092;84175;81756;80948;78465;78506;77146;73678;72495\n;;26 rokov;76250;78725;83408;88161;92777;92123;94553;94292;94949;95965;92052;90818;90812;89521;89214;87339;84172;81765;80927;78488;78547;77158;73738\n;;27 rokov;74300;76219;78694;83346;88114;91704;92052;94495;94284;94941;95967;92196;90986;90861;89589;87476;87324;84055;81737;80962;78501;78559;77169\n;;28 rokov;71048;74293;76213;78674;83298;86938;91617;91969;94475;94297;94938;96083;92350;91056;90888;87808;87456;87131;84035;81752;80987;78484;78513\n;;29 rokov;71713;71032;74263;76181;78610;82191;86864;91574;91938;94446;94332;95062;96216;92399;91069;88266;87785;87188;87020;84009;81726;80996;78470\n;;30 rokov;74788;71692;70996;74241;76123;77077;82131;86819;91589;91933;94467;94439;95144;96282;92412;88282;88252;87522;87082;86971;83990;81680;80942\n;;31 rokov;77162;74731;71656;70967;74209;74586;77009;82079;86816;91566;91958;94558;94542;95180;96315;89607;88223;87840;87415;86993;86924;83959;81639\n;;32 rokov;79701;77153;74683;71615;70924;73352;74552;76980;82087;86795;91597;92068;94652;94589;95206;93930;89574;88031;87714;87328;86930;86847;83907\n;;33 rokov;79098;79650;77083;74634;71548;69899;73305;74524;76974;82089;86794;91698;92181;94677;94580;93099;93876;89088;87894;87653;87247;86865;86776\n;;34 rokov;76229;79060;79572;77032;74581;70647;69837;73278;74534;76957;82085;86841;91757;92197;94685;92698;93036;93440;88894;87803;87528;87140;86772\n;;35 rokov;78893;76174;78989;79509;76951;73999;70566;69788;73276;74538;76961;82155;86906;91768;92155;92212;92651;92699;93282;88730;87681;87431;87005\n;;36 rokov;79536;78797;76080;78922;79455;76239;73941;70503;69743;73253;74562;77060;82217;86899;91777;90165;92136;92268;92524;93138;88623;87546;87317\n;;37 rokov;78580;79425;78678;75999;78857;78691;76186;73890;70445;69733;73212;74624;77163;82210;86847;90133;90106;91915;92059;92360;92967;88477;87399\n;;38 rokov;82528;78449;79321;78588;75908;77804;78586;76078;73846;70407;69714;73260;74668;77119;82196;85249;90066;89887;91738;91933;92189;92809;88348\n;;39 rokov;85154;82383;78268;79166;78447;75292;77686;78462;76007;73803;70367;69795;73291;74651;77086;80361;85142;89835;89712;91612;91827;92057;92664\n;;40 rokov;86806;84971;82196;78105;79036;77801;75166;77556;78372;75926;73744;70400;69843;73249;74610;75743;80281;85091;89625;89546;91441;91678;91891\n;;41 rokov;86441;86640;84741;81993;77936;78142;77622;75028;77452;78276;75822;73697;70405;69776;73176;73460;75660;80147;84930;89455;89410;91268;91523\n;;42 rokov;84774;86214;86405;84516;81821;77278;77931;77424;74916;77358;78186;75761;73696;70345;69703;72297;73379;75596;79980;84768;89284;89227;91087\n;;43 rokov;84022;84522;85916;86154;84291;80762;77093;77739;77283;74805;77245;78101;75701;73562;70274;68808;72190;73316;75439;79854;84643;89131;89052\n;;44 rokov;84172;83758;84235;85637;85880;83325;80522;76862;77564;77136;74636;77122;78015;75528;73410;69414;68720;72044;73152;75280;79683;84470;88946\n;;45 rokov;82669;83887;83422;83937;85343;85331;83013;80223;76644;77346;76976;74509;77032;77868;75339;71993;69276;68577;71882;72973;75116;79502;84290\n;;46 rokov;78799;82335;83528;83065;83619;84431;85032;82662;79938;76386;77127;76795;74364;76868;77674;74262;71798;69169;68362;71668;72792;74951;79314\n;;47 rokov;71698;78418;81952;83188;82702;82627;84083;84708;82336;79638;76099;76891;76585;74161;76668;76806;74048;71575;68962;68168;71452;72569;74761\n;;48 rokov;70789;71359;78006;81530;82793;81353;82211;83680;84353;81977;79309;75812;76634;76297;73892;75727;76545;73770;71346;68752;67968;71236;72349\n;;49 rokov;67611;70400;70960;77577;81102;81928;80906;81767;83304;83953;81565;78971;75511;76336;76010;72787;75448;76265;73487;71099;68512;67721;70996\n;;50 rokov;58416;67158;69928;70490;77101;80100;81418;80436;81358;82886;83571;81193;78609;75177;75974;75085;72467;75172;75940;73193;70847;68269;67491\n;;51 rokov;53590;58026;66739;69422;70037;75973;79597;80906;79929;80918;82420;83114;80728;78181;74823;74962;74752;72087;74781;75623;72868;70500;67951\n;;52 rokov;56019;53187;57582;66270;68904;69380;75435;79040;80430;79418;80381;81977;82592;80207;77724;73918;74547;74365;71731;74404;75278;72446;70135\n;;53 rokov;52287;55555;52772;57134;65756;67985;68849;74860;78512;79844;78876;79858;81481;82040;79665;76891;73496;74153;73970;71288;74034;74863;72055\n;;54 rokov;51733;51815;55030;52295;56625;64896;67414;68297;74269;77907;79233;78337;79299;80923;81461;78904;76354;72972;73693;73464;70842;73583;74390\n;;55 rokov;51309;51246;51323;54459;51865;56011;64318;66821;67730;73645;77253;78559;77751;78687;80302;80184;78344;75835;72474;73173;72982;70368;73080\n;;56 rokov;50874;50781;50704;50778;53914;51434;55421;63695;66275;67113;72995;76584;77890;77078;78042;79378;79543;77730;75225;71883;72632;72471;69841\n;;57 rokov;47954;50286;50189;50159;50209;53426;50888;54778;63060;65603;66480;72255;75859;77139;76383;77168;78690;78881;77006;74537;71316;72055;71909\n;;58 rokov;45990;47335;49651;49593;49587;49332;52830;50360;54208;62370;64912;65748;71476;75049;76472;75730;76491;77954;78123;76332;73886;70677;71433\n;;59 rokov;44414;45364;46692;48987;48993;48934;48736;52160;49718;53583;61647;64193;64996;70645;74234;75438;74975;75708;77191;77349;75581;73173;70012\n;;60 rokov;43570;43768;44720;46012;48322;48224;48250;48059;51465;49068;52907;60846;63410;64231;69776;73358;74543;74099;74869;76333;76559;74756;72387\n;;61 rokov;44583;42877;43059;43994;45246;47206;47514;47549;47440;50729;48411;52221;60072;62550;63382;69052;72481;73578;73183;73934;75478;75593;73837\n;;62 rokov;43662;43822;42134;42307;43279;44178;46449;46747;46847;46727;49987;47712;51481;59181;61637;62615;68110;71516;72577;72212;73008;74537;74675\n;;63 rokov;43544;42830;42919;41349;41542;42016;43421;45659;45990;46121;46027;49209;46970;50640;58241;60743;61711;67128;70495;71480;71133;72001;73525\n;;64 rokov;45540;42662;41943;42042;40556;40114;41226;42655;44844;45152;45344;45170;48374;46169;49802;57466;59717;60795;66133;69333;70390;70068;70962\n;;65 rokov;44550;44558;41757;40995;41083;39476;39250;40379;41811;43979;44293;44525;44388;47522;45370;48661;56478;58765;59818;65047;68218;69276;68962\n;;66 rokov;43796;43461;43452;40753;40009;39646;38563;38379;39531;40955;43073;43412;43700;43519;46566;44506;47794;55506;57789;58814;63905;67075;68042\n;;67 rokov;42056;42614;42312;42304;39738;38711;38674;37641;37523;38587;40010;42109;42517;42816;42608;45690;43671;46927;54444;56708;57777;62834;65871\n;;68 rokov;40680;40864;41334;41143;41157;38331;37704;37644;36673;36600;37593;39061;41129;41605;41889;41974;44727;42800;46022;53311;55549;56589;61547\n;;69 rokov;38936;39408;39535;40101;39884;39607;37220;36603;36593;35666;35628;36531;38030;40144;40588;40943;41007;43765;41885;44953;52164;54367;55378\n;;70 rokov;38226;37572;38045;38208;38779;38202;38427;36040;35447;35469;34655;34618;35462;36996;39152;39894;39943;40058;42710;40979;43890;51007;53154\n;;71 rokov;36123;36774;36139;36690;36842;36880;36926;37138;34857;34313;34279;33598;33437;34419;35881;38419;38840;38875;39039;41591;39923;42740;49798\n;;72 rokov;35522;34551;35268;34688;35218;35033;35480;35541;35795;33618;33097;33115;32471;32332;33298;35173;37350;37766;37849;37998;40491;38811;41584\n;;73 rokov;36008;33872;33059;33797;33211;33291;33604;34055;34134;34368;32291;31865;31876;31280;31154;32604;34049;36204;36668;36731;36822;39291;37699\n;;74 rokov;33759;34227;32155;31488;32204;31360;31754;32148;32532;32651;32889;30969;30587;30656;30143;30609;31485;32841;35051;35465;35630;35580;38026\n;;75 rokov;31758;31940;32347;30459;29773;30288;29860;30178;30567;30976;31026;31397;29557;29298;29342;28995;29375;30315;31615;33796;34210;34425;34374\n;;76 rokov;25319;29840;30033;30433;28733;27840;28654;28326;28618;28958;29365;29432;29896;28206;27865;28382;27732;28148;29072;30362;32464;32896;33094\n;;77 rokov;23033;23696;27861;28148;28514;26760;26215;26909;26681;26983;27333;27734;27837;28311;26705;26885;26999;26564;26863;27744;29061;31067;31485\n;;78 rokov;10207;21429;22047;25933;26240;26273;24985;24401;25129;24966;25206;25591;26034;26242;26637;25571;25437;25714;25257;25543;26463;27664;29576\n;;79 rokov;9654;9409;19777;20319;24004;24199;24368;23280;22609;23355;23206;23437;23922;24347;24558;25274;24090;23960;24338;23804;24158;25025;26216\n;;80 rokov;9615;8845;8575;18128;18566;21807;22234;22374;21425;20789;21537;21387;21714;22219;22558;23243;23622;22588;22429;22817;22478;22796;23615\n;;81 rokov;11876;8734;8048;7773;16420;16703;19899;20256;20426;19467;19016;19666;19672;20001;20294;21213;21514;21948;21032;20811;21288;21002;21248\n;;82 rokov;15357;10679;7857;7291;6953;14613;15049;18044;18376;18401;17630;17150;17825;17895;18167;18870;19435;19722;20169;19369;19237;19666;19448\n;;83 rokov;13219;13711;9444;6950;6432;6009;13051;13454;16138;16447;16396;15743;15486;15969;16101;16668;17057;17741;18049;18371;17771;17581;18072\n;;84 rokov;11667;11614;12013;8257;6093;5506;5323;11468;11861;14152;14570;14418;13908;13722;14107;14667;14865;15346;16047;16276;16713;16098;15923\n;;85 rokov;9239;10199;10111;10534;7201;5236;4847;4626;10013;10308;12382;12685;12615;12163;11977;12700;12950;13212;13824;14296;14601;14986;14424\n;;86 rokov;8040;7901;8710;8678;9041;6056;4477;4170;3998;8549;8823;10693;10887;10928;10479;10621;11070;11308;11627;12099;12637;12855;13259\n;;87 rokov;7139;6785;6624;7348;7313;7413;5152;3788;3560;3332;7201;7425;9117;9289;9360;9218;9021;9623;9827;10073;10531;10977;11145\n;;88 rokov;5451;5905;5595;5501;6149;5813;6105;4302;3203;2975;2755;5984;6232;7621;7710;8186;7832;7784;8185;8344;8624;9079;9455\n;;89 rokov;3948;4412;4787;4576;4439;4867;4756;5012;3528;2606;2434;2290;4903;5113;6267;6484;6779;6658;6608;6843;7036;7177;7681\n;;90 rokov;3120;3160;3535;3865;3687;3518;3940;3788;4034;2814;2103;1944;1898;3901;4089;5107;5310;5539;5506;5512;5673;5797;5963\n;;91 rokov;2300;2465;2511;2825;3040;2754;2756;3088;2985;3167;2231;1654;1526;1493;3067;3324;4075;4310;4475;4476;4523;4582;4707\n;;92 rokov;1808;1786;1948;1964;2199;2076;2157;2134;2407;2260;2464;1735;1301;1186;1164;2541;2580;3225;3477;3505;3611;3594;3643\n;;93 rokov;1340;1418;1448;1514;1563;1418;1597;1619;1638;1807;1709;1913;1333;960;904;953;1964;2008;2532;2729;2725;2843;2865\n;;94 rokov;992;1028;1139;1162;1246;947;1055;1177;1234;1238;1340;1267;1423;1022;740;764;723;1497;1545;1934;2070;2075;2230\n;;95 rokov;688;776;839;898;917;632;654;744;870;912;928;1009;962;1059;776;585;576;561;1155;1182;1461;1543;1603\n;;96 rokov;464;555;634;689;740;504;448;479;544;629;652;689;747;723;782;552;450;446;449;847;927;1095;1216\n;;97 rokov;280;383;473;561;582;326;363;330;325;394;459;484;497;572;560;476;398;338;350;330;640;724;875\n;;98 rokov;192;234;311;411;485;188;228;259;238;236;289;355;363;382;464;330;341;300;287;279;265;502;585\n;;99 rokov;132;172;200;265;379;153;123;170;195;171;167;234;262;286;312;259;239;243;245;227;231;222;407\n;;100 rokov;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;152;193;184;186;199;185;198;190\n;;100 rokov alebo viac;192;284;407;538;750;249;327;367;451;540;609;663;791;937;1090;0;0;0;0;0;0;0;0\n;;101 rokov;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;60;119;161;151;153;174;156;182\n;;102 rokov;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;36;44;99;136;133;142;149;140\n;;103 rokov;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;27;26;31;94;121;120;132;142\n;;104 rokov;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;14;21;16;26;92;114;113;128\n;;105 rokov;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;6;11;13;11;24;88;108;106\n;;106 rokov;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;9;5;6;11;10;23;86;105\n;;107 rokov;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;8;8;5;6;12;9;24;84\n;;108 rokov;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;4;7;6;4;6;11;11;24\n;;109 rokov;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;2;4;4;6;4;6;11;10\n;;110 rokov alebo viac;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;3;6;6;10;16;20;26;38\n\"\"\")\n\n data = pd.read_csv(csv_data, delimiter=\";\")\n data = data[data.columns[2:]].iloc[1:, :]\n index = data.iloc[:, 0].values\n data = data[data.columns[1:]]\n data.index = index\n return data.applymap(int)",
"_____no_output_____"
]
],
[
[
"# Úloha č.1\n\nNa obrázkoch vidíte krabicu s kačičkami.\n\n\n<a href=\"https://ibb.co/2nkKDrm\"><img src=\"https://i.ibb.co/2nkKDrm/IMG-20200107-202200.jpg\" alt=\"IMG-20200107-202200\" border=\"0\" /></a>\n<a href=\"https://ibb.co/vsNkvmV\"><img src=\"https://i.ibb.co/vsNkvmV/IMG-20200107-202121.jpg\" alt=\"IMG-20200107-202121\" border=\"0\" /></a>\n<a href=\"https://ibb.co/FbCgxLW\"><img src=\"https://i.ibb.co/FbCgxLW/IMG-20200107-202129.jpg\" alt=\"IMG-20200107-202129\" border=\"0\" /></a>\n<a href=\"https://ibb.co/g3KRmpg\"><img src=\"https://i.ibb.co/g3KRmpg/IMG-20200107-202140.jpg\" alt=\"IMG-20200107-202140\" border=\"0\" /></a>\n<a href=\"https://ibb.co/86xJJjZ\"><img src=\"https://i.ibb.co/86xJJjZ/IMG-20200107-202148.jpg\" alt=\"IMG-20200107-202148\" border=\"0\" /></a>\n\n1. Odhadnite čo najpresnejšie počet kačičiek v krabici.\n1. Určte interval, ktorému na 95% veríte, že sa v ňom nachádza správny výsledok na časť 1. (Ak by som vám dal podobnú úlohu 20 krát, mali by ste sa mýliť vo svojom intervale v priemere raz.) \n\n",
"_____no_output_____"
],
[
"## Riešenie",
"_____no_output_____"
],
[
"Takýto typ úlohy sa veľmi často vyskytuje v praktickom využití matematiky. Máme hodnotu, ktorú sa snažíme odhadnúť / modelovať, ale máme k dispozícii iba obmedzené množstvo informácií. V praxi to môžu byť výsledky volieb, počet predajov v našom obchode v ďalšom mesiaci, celkové náklady na výlet do Paríža, v princípe hocičo. V každom z týchto scenárov by sme odhadovanú hodnotu chceli použiť iným spôosobom.\n\nNapríklad pri výlete do Paríža je pre nás dôležité zhora ohraničiť, koľko nás tento výlet vyjde, aby sme zistili, či na to máme peniaze. Pri počte predajov spotrebného tovaru by sme mohli chcieť vedieť dolné ohraničenie predaja, aby sme nekúpili viac a vyhli sme sa pokazenému tovaru. Vo volebných prieskumoch väčšinu ľudí zaujíma konkrétne čislo a žiadne ohraničenie. \n\nV niektorých prípadoch nám stačí **bodový odhad** (časť 1), ale vo veľa prípadoch sa nám viac hodí uvažovať ohraničenia tejto hodnoty, takzvaný **intervalový odhad** (časť 2). Interval je síce širší odhad, ale vieme priamo pracovať s pravdepodobnosťou jeho presnosti. V praxi je veľmi populárny. Viac informácií o intervalovom odhade (resp. intervale spoľahlivosti) sa môžte dočítať [tu](https://en.wikipedia.org/wiki/Confidence_interval).",
"_____no_output_____"
],
[
"V úlohách takéhoto typu sa budeme stretávať s problémom spokojnosti. Nebude jasné, či máme správny výsledok a nebudeme sa o tom ani vedieť presvedčiť. V tejto úlohe ešte môžme manuálne spočítať kačičky (ak ich máme k dispozícii), ale pri ostatných vôbec nie je jasné, kedy by sme mali byť s úlohou škončený.\n\nBudeme sa preto snažiť spraviť viacero rozličných odhadov a na záver si porovnáme ich všetky výsledky.",
"_____no_output_____"
],
[
"### Odhad 1\n",
"_____no_output_____"
],
[
"Náš prvý odhad spravíme nasledovne:\n1. spočítame si všetky kačičky, ktoré sú jasne viditeľné na obrázku č.4.\n1. odhadneme pomocou obrázku 2, akú časť všetkých kačičiek sme spočítali.\n1. dopočítame priamou úmerov počet kačičiek v celej krabičke.\n\nOpísali sme teraz metodiku nášho odhadu, skúsime si ho teraz spraviť. Spravte si ho sami predtým, ako ho spravíme za vás my.\n\n",
"_____no_output_____"
],
[
"#### Počítanie kačičiek\n",
"_____no_output_____"
],
[
"<a href=\"https://ibb.co/ZWWtd1S\"><img src=\"https://i.ibb.co/MMMWn5f/kacicky-zvrchu.jpg\" alt=\"kacicky-zvrchu\" border=\"0\" /></a>\n\nNa obrázku sme zaznačili 33 kačičiek. Niektorým kačičkám je ešte vidieť hlava alebo chvost, ale tie sme teraz nepočítali.",
"_____no_output_____"
],
[
"#### Odhad zlomku",
"_____no_output_____"
],
[
"V tejto časti si budeme zaznačovať aj náš interval odhadu. Nie je totiž vôbec jasné, akú veľkú časť kačičiek sme spočítali. Pozrime sa na obrázok č. 2 a zakreslime si, pokiaľ siahajú naše vyznačené kačičky.\n\n<a href=\"https://ibb.co/tLR06tY\"><img src=\"https://i.ibb.co/GHmZDLC/kacicky-siahanie.jpg\" alt=\"kacicky-siahanie\" border=\"0\" /></a>\n\nPrvá čiara je horná hranica nášho odhadu, fialová dolná hranica a čeverná je náš bodový odhad. Pravdaže sa môžme mýliť, ale takto nám to od pohľadu vyšlo. Pomocou fialových čiar teraz dostaneme aj intervalový odhad.\n\nSpočítajme si teraz v akej časti celej výšky krabičky sa tieto čiary nachádzajú. V grafickom programe sme si pozreli počet pixelov od vrchu krabice po spodok. Napočítali sme 930. Po prvú fialovú čiaru je 230 pixelov, po červenú 330 a po poslednú 410. \n\nKačičky ale pravdepodobne budú hustejšie nakopené na spodku ako na vrchu. Aj z ostatných obrázkov môžme vidieť, že je na vrchu na niektorých miestach prázdno a kačičky moc nepresahujú výšku krabičky. Môžme teda upraviť výšku našej krabičky o niekoľko pixelov, aby sme toto zobrali v úvahu. Odpočítame teda od všetkých hodnôt 50 pixelov, čo sme si odhadli okom podľa grafického programu.",
"_____no_output_____"
],
[
"#### Odhad výsledku",
"_____no_output_____"
],
[
"V tejto časti iba dáme dokopy čísla, ktoré sme dostali. Robíme to separátne od prvých dvoch častí, aby sme napríklad nevolili čiary podľa toho, čo sa nám zdá ako dobrý výsledok.\n\nAk by sme nerátali s úpravou o prázdny priestor na vrchu krabičky, dostali by sme bodový odhad \n\n$$\\tfrac{930}{330} \\cdot 33 = 93$$ \n\nkačičiek s intervalom spoľahlivosti \n\n$$[\\tfrac{930}{410} \\cdot 33 \\approx 74.85 , \\tfrac{930}{230} \\cdot 33 \\approx 133.43].$$ \n\nSo zapracovaním korekcie za prázdny priestor dostávame bodový odhad \n\n$$\\tfrac{880}{280} \\cdot 33 \\approx 103.71$$ \n\nkačičiek s intervalom spoľahlivosti \n\n$$[\\tfrac{880}{360} \\cdot 33 \\approx 80.66, \\tfrac{880}{180} \\cdot 33 \\approx 161.33].$$",
"_____no_output_____"
],
[
"#### Zamyslenie sa nad výsledkom",
"_____no_output_____"
],
[
"Naše bodové odhady vyzerajú na prvý pohľad rozumne. Netušíme, či sú správne, ale rádovo sedia s našimi prvotnými pocitmi. Čo sa týka intervalov, spodné ohraničenia vyzerajú tiež rádovo okej, ale vrchné vyzerá byť ustrelené, hlavne v druhom prípade. Keď sa nad tým zamyslíme, po korekcii o 50 pixelov sa naša vrchná fialová čiara veľmi priblížila k vrchu krabičky a teda výpočet $\\tfrac{880}{180}$ očividne nezodpovedá nášmu reálnemu odhadu. Mali by sme prekresliť naše čiary s tým, že rátame s korekciou od začiatku. Získanie lepšieho intervalu týmto spôsobom už necháme na čitateľa.\n\nIntervaly spoľahlivosti sú často veľmi široké, ako si môžme všimnúť aj v tomto príklade. Sme si *dosť istý* tým, že skutočný počet kačičiek sa naozaj v tomto intervale bude nachádzať, takže úloha je splnená. Pre praktické využitie je najlepšie, keď sú intervaly čo najužšie. Keby naša vláda odhadla, že na Slovensku aktuálne žije medzi 100 tisíc a 100 miliónov ľudí, ťažko by sa jej robil štátny rozpočet. Preto je dôležité spraviť tak úzky interval ako to ide, ale aby sme zároveň nestratili našu silnú istotu (95%), že je interval správny. \n\nOhľadom čísla 95%: v praxi sa veľmi často používa 95%. Vyjadruje istú mieru pochybnosti, ktorá sa dá schovať do rozumne úzkeho intervalu a zároveň vyjadruje dostatočnú istotu, nad ktorou sa dá budovať. Vo väčšine prípadov nikto netuší, či sa skutočne pohybujeme okolo 95% - to je ale v poriadku. Dôležité je, že si všetci uvedomujú málopercentnú šancu na chybu a veľkopercentnú istotu správneho ohraničenia. V niektorých situáciách sa vyžaduje aj väčšia spoľahlivosť, keď riziko nesprávneho výsledku môže znamenať katastrofu. Vtedy sa môžu všetci oháňať číslom napríklad 99.9% a omnoho širšími intervalmi.\n\nPosledná poznámka je k bodovému odhadu 103.71. Dáva nám zmysel odhadnúť neceločíselné riešenie, keď je počet kačičiek určite celočíselný? Zamyslite sa a prediskutujte.",
"_____no_output_____"
],
[
"### Odhad 2\n",
"_____no_output_____"
],
[
"Aby sme dostali čo najinformatívnejší druhý odhad, mala by sa jeho metodika čo najviac líšiť od predošlého. Takýmto spôsobom zaručíme, že naše odhady budú čo najviac nezávislé a teda znížime šancu tomu, že spravíme dvakrát takú istú chybu.\n\nSpravíme teda odhad na základe objemu. Odhadneme objem krabice, objem každej kačičky a spočítame, koľko by sa ich tam zmestilo. Pravdaže, v krabičke je aj veľa vzduchu, na čo budeme musieť myslieť.\n\nMetodika teda bude vyzerať takto:\n1. Odhadneme objem krabičky\n1. Odhadneme objem kačičky\n1. Odhadneme počet kačičiek v krabičke\n\nOpäť by sme vás radi vyzvali, aby ste si tieto kroky skúsili spraviť sami.",
"_____no_output_____"
],
[
"#### Odhad veľkosti krabičky\n",
"_____no_output_____"
],
[
"Použijeme obrázok č. 4 na vypočítanie obsahu prierezu a následne obrázok č.5 na spočítanie výšky krabičky. \n\nNa obrázku č. 4 sme spočítali nasledovné rozmery:\n\n<a href=\"https://ibb.co/0Z4X9VG\"><img src=\"https://i.ibb.co/6b9ZtJW/krabicka-rozmery-vrch.jpg\" alt=\"krabicka-rozmery-vrch\" border=\"0\" /></a>\n\nCelá dĺžka (horizontálna) 3880 pixelov, šírka (vertikálna) 2300 pixelov a dĺžka medzi oblúkovými časťami 1700 pixelov. Oblúkové časti budeme považovať za kružnice. Máme teda dva poloblúky, ktoré dokopy vytvárajú kruh o priemere 2600 pixelov. Obsah tohto kruhu je teda $1150^2 \\cdot \\pi \\approx 4\\,154\\,756$ pixelov štvorcových. Obdĺžniková časť medzi polkruhami má obsah $2300 \\cdot 1700 = 3\\,910\\,000$ pixelov štvorcových. Dokopy je to $8\\,064\\,756~\\text{px}^2$.\n\nTeraz skúsime vyrátať výšku krabičky. Na obrázku č. 5 sme zrátali nasledovné rozmery:\n\n<a href=\"https://ibb.co/17bVX3F\"><img src=\"https://i.ibb.co/DzLXMN0/krabicka-rozmery-zboku.jpg\" alt=\"krabicka-rozmery-zboku\" border=\"0\" /></a>\n\nHorizontálna dĺžka 2960 pixelov a výška 1530 pixelov. Treba si dávať pozor na perspektívu a uvedomiť si, ktoré z čiar treba naozaj merať. Musíme si tiež dávať pozor na jednotky. Pixel na prvom obrázku má inú veľkosť ako pixel na druhom obrázku. Keďže budeme merať veľkosti kačičiek podľa pixelov na prvom obrázku, prevedieme si pixely z druhého obrázka na pixely prvého obrázka. Na to nám slúži dĺžka krabičky. Dĺžka bola na prvom obrázku 3880 pixelov a na druhom 2960 pixelov. Výška krabičky v jednotkách prvého obrázka bude $\\tfrac{3880}{2960} \\cdot 1530 \\approx 2005$ pixelov.\n\nCelý objem krabičky sme vyrátali ako \n$$\n8\\,064\\,756 ~\\text{px}^2 \\cdot 2005 ~\\text{px} = 16\\,169\\,835\\,780 ~\\text{px}^{3}\n$$\n\nTo je celkom dosť 3D pixelov, ktoré sa zmestia do tejto krabičky.",
"_____no_output_____"
],
[
"#### Odhad veľkosti kačičky\n\n",
"_____no_output_____"
],
[
"Na prvom obrázku sme zmerali potrebné rozmery kačičky na viacerých príkladoch a zobrali sme priemerné číslo.\n\n<a href=\"https://ibb.co/H7N1K1V\"><img src=\"https://i.ibb.co/TcKpwpk/kacicky-rozmery.jpg\" alt=\"kacicky-rozmery\" border=\"0\" /></a>\n\nDostali sme sa k číslam dĺžka 565 pixelov, šírka 470 pixelov (bez krídel, ktoré budeme ignorovať) a výška 520 pixelov. Náročná otázka ale je, ako budeme modelovať objem kačičky. Je zjavné, že nie je všetok priestor krabičky vyplnený kačičkami. Horný odhad spravíme ako kváder, teda \n\n$$\n565 ~\\text{px} \\cdot 470 ~ \\text{px} \\cdot 520~\\text{px} = 138\\,086\\,000 ~\\text{px}^3.\n$$\n\nPre tento odhad platí, že sme si celkom istý, že kačička aj so vzduchom nebude viac ako takýto kváder. \n\nČo sa týka spodného odhadu, môžme použiť ihlan rovnakej výšky, ale je celkom jasné, že samotná kačička bude mať aspoň taký objem (bez vzduchu). Takýto ihlan by mal tretinu objemu kvádra, čo nám príde ako veľmi slabý odhad. Spravíme teda spodný odhad kačičky so vzduchom ako polovicu kvádra. Okom to vyzerá, že by to naozaj mohol byť spodný odhad, v ktorom sme si celkom istý. Náš spodný odhad je teda $69\\,043\\,000 ~\\text{px}^3$. \n\nAko skutočný odhad použijeme číslo presne medzi našimi odhadmi, teda $\\tfrac{3}{4}$ kvádra. Od pohľadu nevieme spraviť lepší odhad. Dalo by sa argumentovať aj za $\\tfrac{2}{3}$. Bohužiaľ, odhadnúť presný objem kačičky aj so vzduchom, ktorý zaberá, je veľmi náročné a bude to zjavne nepresné. Naše spodné a vrchné odhady by ale mali zachytiť výsledky z minulého odhadu, ak im naozaj veríme. Bodový odhad teda bude $103\\,564\\,500 ~\\text{px}^3$.",
"_____no_output_____"
],
[
"#### Odhad výsledku",
"_____no_output_____"
],
[
"Teraz nám ostáva spraviť záverečné výpočty. Je dôležité, že ich nerobíme priebežne s určovaním objemu kačičky, pretože by sme boli ovplyvnený výsledkami a odhad by bol závislý od nášho prvého odhadu.\n\nDostávame bodový výsledok\n\n$$\n\\frac{16\\,169\\,835\\,780}{103\\,564\\,500} \\approx 156.13\n$$\n\na interval ako \n\n$$\n\\left[ \\frac{16\\,169\\,835\\,780}{138\\,086\\,000} \\approx 117.1 , \\frac{16\\,169\\,835\\,780}{69\\,043\\,000} \\approx 234.2 \\right].\n$$",
"_____no_output_____"
],
[
"#### Zamyslenie sa nad výsledkom",
"_____no_output_____"
],
[
"Výsledky nám vyšli celkom výrazne vyššie ako v predchádzajúcom prístupe. Dokonca aj spodný interval odhadu, kde sme kačičku odhadovali plným kvádrom, nám vyšiel viac ako bodový odhad v predchádzajúcom prístupe.\n\nZamyslite sa, čo sme mohli spraviť zle v týchto prístupoch. Ktorý výsledok by sme mali preferovať? Prečo? Na základe týchto dvoch odhadov, ký by sme si mali tipnúť konečný výsledok?",
"_____no_output_____"
],
[
"### Odhad 3",
"_____no_output_____"
],
[
"Keď sa dve metodiky natoľko líšia vo výsledkoch, je dobrý nápad skúsiť spraviť ešte tretí odhad. Ak ste tak ešte nespravili, navrhnite aspoň jednu ďalšiu metodiku a vypočítajte na jej základe odhad.",
"_____no_output_____"
],
[
"### Výsledok\n\n",
"_____no_output_____"
],
[
"Ostáva nám spočítať kačičky.\n\n<a href=\"https://ibb.co/tq7Cxv4\"><img src=\"https://i.ibb.co/D9xK8vt/IMG-20200107-202750.jpg\" alt=\"IMG-20200107-202750\" border=\"0\" /></a>\n\nJe ich dokopy 102.",
"_____no_output_____"
],
[
"#### Ponaučenia",
"_____no_output_____"
],
[
"Iste ste si všimli dvoch kačičkových *generálov*, ktorý sú výrazne väčší, ako všetkých 100 zvyšných kačičiek. Na základe našich obmedzených dát sme chybne predpokladali, že majú všetky kačičky rovnakú veľkosť. Bol to dobrý predpoklad na základe našich informácií, ale nikdy sme na to nemali záruku. Takto to bohužiaľ býva aj v reálnom svete. *Naše dáta nemusia byť nikdy dokonale výpovedné*.\n\nTakisto si pozornejší z vás možno všimli, že jedna z kačičiek mala na sebe zo spodu napísané číslo 103, čo napovedalo tomu, že ich môže byť viac ako 103. Vaše odhady sa na základe tejto indície možno podvedome pohybovali výhradne nad týmto číslo. O pôvode týchto čisel sme nemali žiadnu informáciu (aj keď je pravda, že kačičky boli očíslované v poradí - neboli ale všetky použité). Je dôležité vediet odfiltrovať spoľahlivé informácie od nespoľahlivých. *Nie všetky informácie sú nutne relevantné.*\n\nNiektoré naše bodové odhady boli naozaj mimo. To sa stáva. Preto sú v praxi tak populárne intervalové odhady, pretože aspoň dostávame rozumnú záruku na počet. Bodový odhad môže byť veľmi mimo. Interval môže byť síce široký, ale dáva nám celú informáciu typu: \"Výsledok je z nášho pohľadu na 95% v tomto intervale a nevieme ho zúžiť, lebo je možné, že je výsledok blízko jeho hraníc\". Hovorí to niečo o výsledku, ktorý hľadáme. Je neistý. *Neistota sa dá veľmi prakticky zachytiť intervalom*.",
"_____no_output_____"
],
[
"## Úloha č.2\n\nKoľko občanov Slovenska sa tento rok stane plnoletými? \n\nSpravte bodový odhad (časť 1 predchádzajúcej úlohy) aj 95% odhad intervalu (časť 2 predchádzajúcej úlohy).\n\nSkúste odpovedať túto otázku:\n1. bez použitia akýchkoľvek zdrojov\n1. s použitím ľubovoľných zdrojov na internete",
"_____no_output_____"
],
[
"## Riešenie",
"_____no_output_____"
],
[
"Na rozdiel od predchádzajúcej časti nemáme žiadne dáta k dispozícii, budeme si ich musieť nájsť. Zatiaľ nám nie je jasné, na základe akého modelu budeme budovať náš odhad, lebo nevieme, čo máme k dispozícií. Prvý krok je teda jasný, ideme sa prehrabať internetom.\n",
"_____no_output_____"
],
[
"### Hľadanie dát",
"_____no_output_____"
],
[
"Nachádzanie správnych a relevantných dát je výzvou. Použijeme na to určite google, otázkou ostáva, aké pojmy budeme googliť. \n\n\n\n",
"_____no_output_____"
],
[
"#### Prvý pokus\n",
"_____no_output_____"
],
[
"Na prvý pokus sme vyskúšali hľadať \"populacia slovensko podla veku\". Medzi prvými odkazmi sme našli [tento](http://obyvatelstvo.population.city/slovensko/). Tvári sa, že má aktuálne štatistiky počtu obyvateľska v rôznych vekových skupinách. Naša záujmová skupina sa bude nachádzať v kategórii 15-19. Náš prvý odhad teda môže byť počet mužov + počet žien v tejto kategórii, vydelené piatimi - lebo nás zaujíma iba jeden z piatich vekov v tejto kategórii. Náš prvý bodový odhad je teda \n\n$$\\frac{122\\,579 + 116\\,980}{5} = 47911.8.$$ \n\nNevieme ale odkiaľ presne tieto dáta sú a či sa na ne dá spoľahnúť. Na stránke sa dá prekliknuť ku zdrojom, čo nás prenesie na stránku OSN. Po rozkliknutí tabuliek *Population by age* a navolení niekoľkých základných údajov sa vieme dostať k číslu $263\\,000$ ako k súčtu mužov a žien v roku 2020 medzi 15 a 19. To nás vedie k druhému odhadu\n\n$$\\frac{263\\,000}{5} = 52600.$$\n\nStránke OSN asi môžme celkom veriť. Radi by sme dostali ale nejakú presnejšiu štatistiku, pretože v skupine 15-19 sa nemusia vyskytovať 18-ročný rovnomerne s ostatnými. Mohli by sme využiť historické dáta týchto skupín a odhadnúť pomery 15, 16, 17, 18 a 19 ročných v tejto skupine cez ich prechody medzi iné skupiny (premyslite si, vyskúšajte). Pre teraz ale skúsime nájsť relevantnejšie dáta.",
"_____no_output_____"
],
[
"#### Druhý pokus",
"_____no_output_____"
],
[
"Vyskúšali sme preformulovanie: \"Vekové rozloženie obyvateľstva slovensko\". Prvý odkaz vyzerá veľmi sľubne, prišli sme na [túto stránku](http://statdat.statistics.sk/cognosext/cgi-bin/cognos.cgi?b_action=cognosViewer&ui.action=run&ui.object=storeID%28%22i40A03AF2150C41DE8BE98D0C0C41A764%22%29&ui.name=Vekov%C3%A9%20zlo%C5%BEenie%20-%20SR%2C%20oblasti%2C%20kraje%2C%20okresy%2C%20mesto%2C%20vidiek%20%5Bom7009rr%5D&run.outputFormat=&run.prompt=true&cv.header=false&ui.backURL=%2Fcognosext%2Fcps4%2Fportlets%2Fcommon%2Fclose.html&run.outputLocale=sk). Skvelé, to je presne to, čo sme potrebovali. V ľavom stĺpci sa nám ukazuje *Stav trvale bývajúceho obyvateľstva k 30.6.(1.7.) (Osoba)* respektíve *Stav trvale bývajúceho obyvateľstva k 31.12. (Osoba)*. Nás by zaujímalo druhé z týchto čísel, keďže počet 17 ročných 31.12.2019 v roku 2019 by mohol veľmi blízko zodpovedať počtu obyvateľov nadobúdajúcich plnoletosť v roku 2020. Na spodku stránky si môžme všimnúť poznámku, že stav v strede roka sa počíta ako aritmetický priemer najbližších koncov roka a teda nemá pre nás až tak silnú výpovednú hodnotu.\n\nZdroj vyzerá byť veľmi spoľahlivý, keďže ide o štatistický úrad Slovenskej Republiky. Všetky tieto údaje by sme mali brať ako najpresnejšie odhady, aké vie Slovensko spraviť. Budeme sa na ne spoliehať.\n\nBohužiaľ, údaje o roku 2019 nie sú k dispozícií. Keďže posledná aktualizácia bola 10.4.2019, môžme predpokladať, že niekedy v apríli tohto roka sa stane nasledujúca aktualizácia. Budeme teda musieť pracovať s rok starými dátami. To nie je problém, často musíme v praxi pracovať s omnoho staršími dátami a \"predpovedať\" ich správanie do budúcnosti (predpovedaniu na základe historických dát sa vo vedeckom svete hovorí **extrapolácia**). Ak ste sa k týmto dátam počas svojho hľadania nedostali, dajte si chvíľku na predpovedanie výsledku na základe tejto tabuľky. ",
"_____no_output_____"
],
[
"### Extrapolácia",
"_____no_output_____"
],
[
"Po preklikaní niektorých nastavení sme zo stránky v predchádzajúcom odseku získali nasledovnú tabuľku:",
"_____no_output_____"
]
],
[
[
"data = get_age_data() # vyčistí a spracuje data, ktoré ste si pozreli na webe\nrelevant_data = data.iloc[1:20,:] # ukáže prvý až dvadsiaty riadok dát\nrelevant_data",
"_____no_output_____"
]
],
[
[
"Skúsime si tieto dáta aj vykresliť, aby sme v nich lepšie videli vzory. Vedieť správne vizualizovať (a vyčistiť) dáta je kľúčová schopnosť, ktorá je často medzi matematikmi podceňovaná.",
"_____no_output_____"
]
],
[
[
"import plotly.graph_objects as go\nfig = go.Figure( go.Heatmap(\n x=relevant_data.columns, \n y=relevant_data.index[::-1],\n z=relevant_data.values[::-1,:],\n colorscale=\"viridis\"\n ) )\nfig.show()",
"_____no_output_____"
]
],
[
[
"Je celkom jasne vidieť, že veľkosti skupín sa z roka na rok moc nemenia. Dobrý odhad na náš výsledok by teda bol počet 16 ročných z roku 2018, čo je 50792. Je to číslo niekde v strede medzi našimi prvými dvoma odhadmi, ktoré sme získali.\n\nKeď sme sa už zoznámili s našími dátami, je na čase si skúsiť napísať, čo sa snažíme predpovedať a ako to budeme modelovať. Naším cieľom je predpovedať počet 17 ročných v roku 2019. Náš odhad bude založený na počte 16 ročných v roku 2018 a korekcii za pridaný rok. Za tento rok mohli niektorý zo 17 ročných odísť zo Slovenska, umrieť, alebo sa dokonca prisťahovať. Túto korekciu sa budeme snažiť zachytiť na základe našich dát.\n\nPrvý model, ktorý skúsime aplikovať je nasledovný. Budeme predpokladať, že naša skupina narodených v roku 2002 nie je ničím špeciálna a ich chovanie medzi 16tym a 17tym rokom bude zodpovedať chovaniu všetkých v minulosti v tejto skupine. Vypočítame si teda priemernú hodnotu \n\n$$\n\\frac{17~\\text{ročný v roku}~(N + 1)}{16~\\text{ročný v roku}~N}.\n$$\nZ našich dát to dostaneme nasledovne:",
"_____no_output_____"
]
],
[
[
"age16 = relevant_data.loc[\"16 rokov\"].values[:-1] # všetci 16 ročný okrem posledného\nage17 = relevant_data.loc[\"17 rokov\"].values[1:] # všetci 17 ročný okrem prvého\n\npomery = np.divide(age17, age16)\npomery",
"_____no_output_____"
]
],
[
[
"Priemerný pomer teraz dostaneme vypočítaním priemeru týchto čísel:",
"_____no_output_____"
]
],
[
[
"np.mean(pomery)",
"_____no_output_____"
]
],
[
[
"Tento výsledok je kúsok prekvapivý. Keď sa pozrieme na pomery, ktoré nám vyšli, 16 z nich je pod 1, dva sú presne 1 a iba 4 sú väčšie ako 1. Tie štyri ale majú väčšiu váhu, ako všetky menšie ako 1 dokopy, takže sa žiadna chyba v matematike nestala. Vo väčšine prípadov ale počet 17 ročných je kúsok menší ako počet 16 ročných, takže ak predpovieme počet 17 ročných na základe tohto výsledného čísla, vo väčšine prípadov sa budeme mýliť.\n\nZ tohto dôvodu sa v podobných situáciách často používa namiesto aritmetického priemeru *medián*. Medián získame tak, že si zoradíme všetky hodnoty a zoberieme tú v strede (v prípade párneho počtu hodnôt priemer dvoch stredných). Skúsme teda spraviť vypočítať medián týchto pomerov.",
"_____no_output_____"
]
],
[
[
"np.median(pomery)",
"_____no_output_____"
]
],
[
[
"To je už uveriteľnejšie. Ak na základe tohto čísla predpovieme počet 17 ročných, dostaneme \n\n$$50792 \\cdot 0.9998356 \\approx 50783.65.$$\n\nTakže iba na základe predpokladu, že skupina narodená roku 2002 nie je ničím odlišná od skupín narodených iné deti, dostali sme prvý prepracovaný odhad nášho výsledku. \n\nAk by sme chceli spraviť ešte intervalový odhad týmto modelom, mohli by sme zobrať najmenší a najväčší pomer, ktorý sme za tie roky videli a použiť tie. Dostaneme takto interval",
"_____no_output_____"
]
],
[
[
"50792 * np.min(pomery), 50792 * np.max(pomery)",
"_____no_output_____"
]
],
[
[
"#### Extrapolácia pokračovanie\n",
"_____no_output_____"
],
[
"Posledný krok je overiť, či je náš predpoklad správny - či skupina z roku 2002 je niečím špeciálna alebo nie. Môžme to overiť viacerými spôsobmi. Skúsime sa pozrieť na postupne na úbytky / prírastky medzi vekmi 0 a 1, 1 a 2, 2 a 3, 3 a 4, atď.. Ak sa vo všetkých týchto úbytkoch umiestni naša skupina 2002 ako priemerná, môžme začať lepšie veriť nášmu minulému odhadu. Spravíme to zase vizualizáciou, aj keď to teraz bude mierne komplikovanejšie.",
"_____no_output_____"
]
],
[
[
"results = {}\nfor i_vek in range(1, len(relevant_data.index)):\n for i_rok in range(1, len(relevant_data.columns)):\n pomer = relevant_data.iloc[i_vek, i_rok] / relevant_data.iloc[i_vek-1, i_rok-1]\n narodeni = int(relevant_data.columns[i_rok]) - i_vek\n if narodeni not in results:\n results[narodeni] = {'x': [], 'y': []}\n results[narodeni]['x'].append(i_vek)\n results[narodeni]['y'].append(pomer)\n\nfig = go.Figure()\nfor key in results.keys():\n fig.add_trace(go.Scatter(x=results[key]['x'], \n y=results[key]['y'],\n name=key,\n mode='lines',\n line=dict(color='rgba(255,0,0,1)', width=4) if key == 2002 else (dict(color='rgba(10,10,10,0.2)', width=2))\n ))\n \nfig.update_layout(\n title=\"Prírastok / úbytok v obyvateľstve podľa roku narodenia a veku\",\n xaxis_title=\"vek\",\n yaxis_title=\"prírastok / úbytok\",\n)\nfig.show()",
"_____no_output_____"
]
],
[
[
"Na prvý pohľad vyzerá červená čiara (pomery počtu v nasledovných vekoch) úplne štandardne, ale potom si všimneme obrovský skok v deviatom roku života skupinky 2002, rovnako ako mierne väčší spád v 13-tom roku života. Stalo sa teda niečo špeciálne v 9. roku života ročníka 2002? ",
"_____no_output_____"
],
[
"#### Extrapolácia pokračovanie II.",
"_____no_output_____"
],
[
"Môžme si všimnúť, že ročník 2002 nie je jediný, ktorý takto poskočil v niektorom bode. Keď si myšou prejdeme všetky takéto kopce, všimneme si, že ročníky robiace tieto skoky idú po sebe a sú klesajúce. Kým ročník 2002 spravil vo svojich 9 rokoch veľký skok, ročník 2001 spravil vo svojich 10 rokoch veľký skor, ročník 2000 spravil vo svojich 11 rokoch veľký skok, atď. Vyzerá to na prvý pohľad, že v roku 2011 sa stalo niečo špeciálne. Skúsme si spraviť podobný graf, kde ale neporovnávame veky ale porovnávame priamo roky. Malá zmena kódu by nám mala pomôcť.",
"_____no_output_____"
]
],
[
[
"results = {}\nfor i_vek in range(1, len(relevant_data.index)):\n for i_rok in range(1, len(relevant_data.columns)):\n pomer = relevant_data.iloc[i_vek, i_rok] / relevant_data.iloc[i_vek-1, i_rok-1]\n narodeni = int(relevant_data.columns[i_rok]) - i_vek\n if narodeni not in results:\n results[narodeni] = {'x': [], 'y': []}\n results[narodeni]['x'].append(relevant_data.columns[i_rok]) # tu je zmena\n results[narodeni]['y'].append(pomer)\n\nfig = go.Figure()\nfor key in results.keys():\n fig.add_trace(go.Scatter(x=results[key]['x'], \n y=results[key]['y'],\n name=key,\n mode='lines',\n line=dict(color='rgba(255,0,0,1)', width=4) if key == 2002 else (dict(color='rgba(10,10,10,0.2)', width=2))\n ))\nfig.update_layout(\n title=\"Prírastok / úbytok v obyvateľstve podľa roku narodenia a roku\",\n xaxis_title=\"rok\",\n yaxis_title=\"prírastok / úbytok\",\n)\nfig.show()",
"_____no_output_____"
]
],
[
[
"Teraz si môžme všimnúť, že špic v roku 2011 sa naozaj týka všetkých. Niektoré zmeny sú kladné a niektoré záporné. Ak si chvíľu budeme googliť, čo sa vtedy stalo, dostaneme sa k záveru, že v rokoch xxx1 sa robieva na Slovensku sčítanie obyvateľstva. Takže v týchto dátach vyzerajú byť v skutočnosti všetky dáta iba odhadované medzi rokmi končiacimi na 1. To nám trocha kazí dôveru v tento model, ale stále platí, že nemáme aktuálne nič lepšie.\n\nAby sme dokončili myšlienkový pochod, všimnime si, že úpadok v 13-tich rokoch je naozaj mierne výnimočný, pretože v tom roku ani v tom veku žiadna skupina nemala taký úpadok. Na tomto grafe vidíme špeciálny úpadok v prvom roku života, ale keď sa pozrieme na pôvodný graf, všimneme si, že takýto úpadok je celkom častý a teda by sme mu nemali prikladať špeciálny význam.\n\nDošli sme teda k záveru, že náš ročník 2002 je z viacerých pohľadov v princípe normálnym ročníkom, až na mierny úpadok v roku 2015. ",
"_____no_output_____"
],
[
"### Zamyslenie a námety",
"_____no_output_____"
],
[
"Zdá sa vám, že sme ešte niečo nevzali v úvahu? Ak áno, určite sa nad tým zamyslite a skúste to vyhodnotiť.\n\nUkázaný postup pravdaže nie je jediný a budeme radi, ak nám dáte vedieť o iných zaujímavých postupoch, ktoré ste použili. V apríli sa možno dozvieme správne výsledky :)\n\nAk vás táto téma a dáta zaujali, budeme radi, ak sa skúsite zamyslieť a prediskutovať aj nasledovné otázky / prístupy:\n- Čo by sa stalo, ak by sme sa snažili modelovať priamo prisťahovania, odsťahovania a úmrtia v jednotlivých vekoch, ako by sa líšili výsledky? Skúste nájsť relevantné dáta a použite ich.\n- Ako súvisí istota vášho odhadu s vekom, ktorý sa snažíte odhadnúť? Je nejaký vek, pre ktorý by ste si svojim odhadom boli najistejší?\n- Vedeli by ste podobnú analýzu zopakovať napríklad pre Ghanu?\n- Predstavte si, že ste autobazár a rozhodujete sa, či spravíte reklamnú kampaň pre čerstvo plnoletých občanov. Ako by ste odhadli, aká časť ľudí dosahujúcich plnoletosť má na účte aspoň 3000 eur, za ktoré by si mohli kúpiť auto?",
"_____no_output_____"
],
[
"# Domáca úloha\n\nSpoločnosť Anonymous International sa rozhodla, že o dva roky postaví nové obchodné centrum v Bratislave. V záujme šetrenia miesta sa spoločnosť rozhodla, že niektoré parkovacie miesta budú dĺžky 5m a niektoré budú dĺžky iba 4m.\n1. Spravte analýzu, ktorá predpovie o dva roky pomer áut v Bratislave spadajúcich do týchto dvoch kategórií.\n1. Anonymous International by chcela ísť s dobou a chceli by na tieto miesta zakomponovať aj miesta na nabíjanie elektromobilov. Koľko percent 4m, respektíve 5m áut budú o dva roky elektromobily?\n\nPoskytnite bodové aj intervalové odhady. Navyše, navrhnite a vytvorte graf, ktorý by ste odprezentovali spoločnosti Anonymous International v rámci prezentácie svojho výsledku.\n\n",
"_____no_output_____"
],
[
"## Rady k vytváraniu grafu",
"_____no_output_____"
],
[
"Spraviť dobrý a informatívny graf je náročná úloha a vyžaduje si veľa cviku. Nasledovné otázky vám môžu byť nápomocné:\n1. Akú hlavnú myšlienku / hypotézu má graf znázorňovať? *Príklad: v súčasnej dobe sa rodí na Slovensku menej detí ako v minulosti.*\n1. Aké dáta nám pomôžu prezentovať túto myšlienku? *Príklad: počet narodených detí v minulosti a v súčasnosti.*\n1. Ako tieto dáta najlepšie znázorním? *Príklad: klasickým grafom, rok na jednej osi a počet narodených detí na druhej osi*\n1. Ako toto znázornenie prevediem? *Príklad: dám do googlu \"line chart plotly python\", kliknem na prvý link a dostanem sa [sem](https://plot.ly/python/line-charts/), kde mám všetko vysvetlené*\n\nOdporúčame vám používať knižnicu *plotly* na vizualizáciu interaktívnych grafov. Príklady používania nájdete vo veľmi dobrej a podrobnej dokumentácii tejto knižnice, alebo aj priamo v tejto hodine.\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e73fd0c4bd9afe495c6fe74236d44ba2a77294a1 | 151,492 | ipynb | Jupyter Notebook | 5_ML_Classification.ipynb | jhonsonlee/basics-of-machine-learning | cb3ab1e534e5e202fdf93911bdb81ab24ba26dda | [
"MIT"
] | null | null | null | 5_ML_Classification.ipynb | jhonsonlee/basics-of-machine-learning | cb3ab1e534e5e202fdf93911bdb81ab24ba26dda | [
"MIT"
] | 1 | 2020-09-06T08:01:03.000Z | 2020-09-06T08:34:36.000Z | 5_ML_Classification.ipynb | jhonsonlee/basics-of-machine-learning | cb3ab1e534e5e202fdf93911bdb81ab24ba26dda | [
"MIT"
] | null | null | null | 151,492 | 151,492 | 0.92826 | [
[
[
"%matplotlib inline\nimport matplotlib\nimport seaborn as sns\nsns.set()\nmatplotlib.rcParams['figure.dpi'] = 144",
"/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n import pandas.util.testing as tm\n"
],
[
"import matplotlib.pyplot as plt\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"# Classification",
"_____no_output_____"
],
[
"In classification, we predict categorical labels. In regression, we predict quantitative/numerical labels. The critical difference is that we can't take a difference between the predicted and actual category in classification, while we can take a difference between the predicted and actual numerical values in regression. Because of these differences between regression and classification, we use different metrics to evaluate machine learning models trained for classification.\n\nAs with regression, we're trying to determine the model $f$ that can best describes the relationship\n\n$$ y_j = f(X_j). $$\n\nFor classification, $y_j$ can only take a finite set of values. If there are only two such values, we are dealing with **binary** classification. Examples of binary classification are predicting whether it will rain or not and whether someone will default on their loan. If we have more than two classes, we have a **multiclass** problem. For example, image classification is usually multiclass as we are trying to identify an image among a set of values, e.g., a person, a road sign, a car, etc.",
"_____no_output_____"
],
[
"## Accuracy\n\nA natural choice for a metric for classification is accuracy. Accuracy is equal to the number of observations you correctly classified over all observations. For example, if your model properly identified 77 out of 100 images, you have an accuracy of 77%. Accuracy is an easy metric to both understand and calculate. Mathematically, it is simply\n\n$$ \\frac{\\text{number of correct observations}}{\\text{number of observations}}.$$\n\nHowever, accuracy may not always be a good metric. Consider the case of disease detection where only 10% of the observations have the disease. A naive classifier that always predicts the majority class will achieve 90% accuracy. \nWhile the naive model correctly identifies everyone without the disease, it fails to identify any person with the disease. We need a metric that will tell us how well our model performs for a particular class.",
"_____no_output_____"
],
[
"## Precision and recall\n\nFor the example of disease detection, we are more interested in determining our model's performance with regards to the class representing having the disease. Let's call this class **positive** and not having the disease as **negative**. Particularly, we want to know what fraction of all positive predictions were correct and what fraction of positive observations did we identify. The two metrics that describe these values are precision and recall. Precision is the fraction of true positives over all positive predictions. It is a measure of how \"precise\" our model was with regards to labeling observations as positive. Recall, on the other hand, is equal to the fraction of true positives over all positive observations. It is a measure of our model's ability to \"catch\" and properly label observations that are positive.\n\nA confusion matrix is a table summarizing the performance of the model by enumerating true and false positives and the true and false negatives.\n\n| | Positive Observation | Negative Observation |\n|---------------------|:------------------------:|:-----------------------:|\n| Positive Prediction | True Positive (TP) | False Positive (FP) |\n| Negative Prediction | False Negative (FN) | True Negative (TN) |\n\nGiven the definitions used earlier, the equation for precision and recall are\n\n$$ \\text{precision} = \\frac{\\text{TP}}{TP + FP}$$\nand\n$$ \\text{recall} = \\frac{\\text{TP}}{TP + FN}. $$\n\nNote, the difference between the metrics is their denominator. In our disease detection example, if we labeled 12 observations as positive but only 8 were actually true positives, our precision is 0.667. If our data set had 10 positive observations, since we correctly identified or \"recalled\" 8 of them, our recall is 0.8. If we had used the naive model that predicts the majority class, the recall would be 0 and our precision would be undefined.",
"_____no_output_____"
],
[
"What is more important, precision or recall? The answer depends on the specifics of the problem. Having a model that prioritizes less false positives will have a higher precision and a model that aims to reduce the number of false negatives will have a higher recall. You must decide whether your model should prioritize reducing false positives or false negatives. It is often helpful to consider the cost, whether financial, societal, etc., of your model making false positives and false negatives.",
"_____no_output_____"
],
[
"**Questions**\n* For disease detection, is it better to have a higher precision or recall?\n* Does our answer change if we need to have diagnosed patients undergo invasive and risky procedures?\n* Is admissions to a top-tier university a high recall or high precision process?",
"_____no_output_____"
],
[
"We can easily calculate classification metrics using the `sklearn.metrics` module. Let's first generate the result of our fictitious disease detection model. In addition to precision and recall, there is that $F_1$ score which is the harmonic mean of precision and recall. It is a nice metric to use when we don't have a preference over precision and recall. Note, the function `metrics.classification_report` will calculate the metrics for both scenarios of what class is considered positive or negative.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom sklearn import metrics\n\n# generate our results\ny_pred = np.zeros(100, dtype=np.int32)\ny_pred[:12] = 1\ny = np.zeros(100)\ny[:8] = 1\ny[-2:] = 1\n\nprint(\"precision: {:g}\".format(metrics.precision_score(y, y_pred)))\nprint(\"recall: {:g}\".format(metrics.recall_score(y, y_pred)))\nprint(metrics.classification_report(y, y_pred))",
"precision: 0.666667\nrecall: 0.8\n precision recall f1-score support\n\n 0.0 0.98 0.96 0.97 90\n 1.0 0.67 0.80 0.73 10\n\n accuracy 0.94 100\n macro avg 0.82 0.88 0.85 100\nweighted avg 0.95 0.94 0.94 100\n\n"
]
],
[
[
"## Probabilistic Classification Models\nSome classification models do not directly predict a class for an observation but instead reports a probability. For example, it might predict that there's a 75% chance the observation is positive. For the preceding example, should we assign a positive or negative label? The natural choice is to assign the observation as positive since the predicted probability is greater than 50%. However, we don't have to stick to 50%; we can adjust our **threshold** and only classify observations as positive if our models predicts a greater than 90% probability. By increasing the threshold, we will make our model only make positive predictions when it is very certain and confident. Conversely, if we lower our threshold, our model will more liberally assign positive labels. Adjusting threshold affects the models precision and recall. \n\nAs we started to see earlier, there is tradeoff between precision and recall that becomes more apparent with probabilistic models. Let's explore and visualize the tradeoff between precision and recall. We'll generate some data ",
"_____no_output_____"
]
],
[
[
"# generate data\nnp.random.seed(0)\ny_proba = np.linspace(0, 1, 1000)\ny_pred = (y_proba > 0.5).astype(np.int32)\ny = np.random.binomial(1, y_proba)\n\nprint(\"accuracy: {}\".format(metrics.accuracy_score(y, y_pred)))",
"accuracy: 0.75\n"
],
[
"precision, recall, threshold = metrics.precision_recall_curve(y, y_proba)\nf1_score = 2*precision*recall/(precision + recall)\nthreshold = np.hstack((0, threshold))\n\nplt.plot(threshold, precision)\nplt.plot(threshold, recall)\nplt.plot(threshold, f1_score)\nplt.xlabel('threshold')\nplt.legend(['precision', 'recall', '$F_1$']);",
"_____no_output_____"
]
],
[
[
"In the above figure, we see how increasing the threshold led to higher precision but lower recall. The threshold that yielded the largest $F_1$ score was about 0.36. Any probabilistic model can achieve any arbitrary level of precision and recall by adjusting the threshold. As such, when comparing the performance of probabilistic classifiers, we need a single metric that is not dependent on threshold.",
"_____no_output_____"
],
[
"## Area under the curve\nThe precision-recall curve illustrates the tradeoff for a particular classifier. While there will always be a tradeoff between these two metrics, ideally the tradeoff should not be severe. In other words, the model should not sacrifice a large amount of precision to slightly improve recall. We can visualize the degree of the tradeoff by plotting what is known as a precision-recall curve.",
"_____no_output_____"
]
],
[
[
"plt.plot(recall, precision)\nplt.xlabel('recall')\nplt.ylabel('precision')\nplt.xlim([0, 1])\nplt.ylim([0, 1]);",
"_____no_output_____"
]
],
[
[
"We want a model that has less tradeoff between precision and recall, resulting in a curve with less of a drop with increasing recall. Geometrically, it is better to have a model with a larger area under the curve, **AUC**, of its precision-recall plot. In `scikit-learn`, the AUC can be calculated using the `metrics.auc` function. In addition to **AUC**, there is the **ROC-AUC** metric which is based on the receiver-operator curve (ROC). The ROC plots the true positive rate against the false negative rate.",
"_____no_output_____"
]
],
[
[
"print(\"precision-recall AUC: {}\".format(metrics.auc(recall, precision)))\nprint(\"receiver-operator AUC: {}\".format(metrics.roc_auc_score(y, y_proba)))",
"precision-recall AUC: 0.833677363943477\nreceiver-operator AUC: 0.834057379672299\n"
]
],
[
[
"In the example, the resulting model had similar values for AUC and ROC. In general, if your data is imbalanced (more observation of the negative class) or if you care more about false positives you should rely on AUC of the precision-recall curve. Note, the number of true negatives are not factored in calculating either precision or recall.",
"_____no_output_____"
],
[
"## Log loss\nA metric often used for optimizing probabilistic classifiers is the log loss function, sometimes referred to as cross entropy. Log loss takes into account uncertainty of your models predictions, something accuracy does not do. The equation for the log loss is\n\n$$ -\\sum_j \\left[y_j \\log(p_j) + (1-y_j)\\log(1 - p_j) \\right], $$\n\nwhere $y_j$ is the class label of an observation, either 0 or 1, and $p_j$ is the probability the observation is in class 1. The lower the log loss, the better the model.\n\nThe log loss is harder to interpret than other metrics such as accuracy. It measures not only whether the model will correctly classify an observation but rewards the model if it's confidence of a correct prediction is high. Conversely, it will severely penalize the model for being overly confident in a prediction that is wrong. For example, a model will have a lower log loss if it predicts a probability of 95% for an observation of class 1 than if it had predicted 60%. Thus, it is possible for two models to have the same accuracy yet have different log loss. Despite being slightly harder to interpret, it reveals more information of a model's performance than accuracy. The figure below displays the reduction of log loss as a model becomes more certain in its prediction.",
"_____no_output_____"
]
],
[
[
"p = np.linspace(1E-6, 1-1E-6, 1000)\ny = 1\nlog_loss = -(y*np.log(p) + (1 - y)*np.log(1 - p))\n\nplt.plot(p, log_loss)\nplt.xlabel('probability')\nplt.ylabel('log loss')\nplt.legend(['$y$ = 1']);",
"_____no_output_____"
]
],
[
[
"## Logistic regression\nThe logistic regression model is the classifier version of linear regression. It is a probabilistic model; it will predict probability values that can then be used to assign class labels. The model works by taking the output of a linear regression model and feeds it into a sigmoid or logistic function. Mathematically\n\n$$ p_j = S\\left( \\sum_{i} X_{ji}\\beta_i + \\beta_0 \\right), $$\nwhere\n$$ S(x) = \\frac{1}{1 + e^{-x}} $$",
"_____no_output_____"
],
[
"The reason for the sigmoid function is that it maps values that range from positive to negative infinity to values that only range from 0 to 1. Thus, the output of the sigmoid function can be interpreted as a probability. An example of the sigmoid function is shown below.",
"_____no_output_____"
]
],
[
[
"x = np.linspace(-10, 10, 100)\ns = 1/(1 + np.exp(-x))\n\nplt.plot(x, s)\nplt.xlabel('$x$')\nplt.ylabel('$S(x)$');",
"_____no_output_____"
]
],
[
[
"The $\\beta$ coefficients of the model are chosen to minimize the log loss. Unlike linear regression, there is no closed-form solution to the optimal coefficient. Instead, the coefficients are solved using gradient descent.\n\nLet's train a logistic regression model through `scikit-learn`. We'll first train a model and plot it's **decision boundary**. The decision boundary is a boundary (or hypersurface in larger dimensional spaces) that illustrates how the model classifies observations. A decision boundary is a nice way to visualize how the model is making predictions.",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import make_blobs\nX, y = make_blobs(centers=[[1, 1], [-1, -1]], cluster_std=1.5, random_state=0)\n\nplt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.bwr)\nplt.xlabel('$x_1$')\nplt.ylabel('$x_2$');",
"_____no_output_____"
],
[
"from sklearn.linear_model import LogisticRegression\n\nclf = LogisticRegression(solver='lbfgs')\nclf.fit(X, y)\ny_pred = clf.predict(X)\n\nprint(\"accuracy: {}\".format(metrics.accuracy_score(y, y_pred)))",
"accuracy: 0.84\n"
],
[
"X1, X2 = np.meshgrid(np.linspace(-5, 5), np.linspace(-5, 5))\ny_proba = clf.predict_proba(np.hstack((X1.reshape(-1, 1), X2.reshape(-1, 1))))[:, 1]\nplt.contourf(X1, X2, y_proba.reshape(50, 50), cmap=plt.cm.bwr, alpha=0.75, vmin=0, vmax=0.95)\nplt.colorbar()\n\nplt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='white', cmap=plt.cm.bwr)\nplt.xlabel('$x_1$')\nplt.ylabel('$x_2$');",
"_____no_output_____"
]
],
[
[
"Notice that the classifier forms a linear decision boundary; logistic regression models are referred to as linear classifiers. The model forms a linear boundary because there is a linear relationship between the features and the input of the sigmoid function.",
"_____no_output_____"
],
[
"## Multiclass classification for binary classifier\nSome classifiers can only model problems with two classes. For example, logistic regression is a binary classifier. However, there are ways to modify binary classifiers to extend them to predict multiple classes. Two common methods are the **one-vs-all** and the **one-vs-one** scheme.\n\nIn one-vs-all, you train $k$ binary classifiers, where $k$ is the number of classes. Each binary classifier represents training with observation of class $k$ and class not $k$. The probability of being in each class is calculated using an equation that normalizes the output of each classifier\n\n$$ p_k = \\frac{f_k}{\\sum_{k} f_k} , $$\n\nwhere $f_k$ is the output of classifier $k$ and $p_k$ is the probability of the observation being in class $k$. Given the normalization, the sum of $p_k$ for all values of $k$ is equal to 1.\n\nIn one-vs-one, we training classifiers of all possible pairings between the classes. For example, if we have classes A, B, C, and D we train classifiers for A vs. B, A vs. C, A vs. D, B vs. C, B vs. D, and C vs. D. If we have $k$ classes, we train $k(k-1)/2$ classifiers.\n\n** Questions**\n* If the one-vs-all scheme usually requires training less classifiers, when would it be better to deploy one-vs-one? ",
"_____no_output_____"
],
[
"*Copyright © 2019 The Data Incubator. All rights reserved.*",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
e73fd92063ca9256160758b1c33051fc6e78b8d5 | 13,251 | ipynb | Jupyter Notebook | IMDB Movie Reviews Sentiment Analysis using LSTM, GRU and CNN.ipynb | sproboticworks/ml-course | dabd81bd9062a9e10f22738497b03bb47446c13b | [
"Apache-2.0"
] | null | null | null | IMDB Movie Reviews Sentiment Analysis using LSTM, GRU and CNN.ipynb | sproboticworks/ml-course | dabd81bd9062a9e10f22738497b03bb47446c13b | [
"Apache-2.0"
] | null | null | null | IMDB Movie Reviews Sentiment Analysis using LSTM, GRU and CNN.ipynb | sproboticworks/ml-course | dabd81bd9062a9e10f22738497b03bb47446c13b | [
"Apache-2.0"
] | null | null | null | 27.838235 | 304 | 0.481549 | [
[
[
"<a href=\"https://colab.research.google.com/github/sproboticworks/ml-course/blob/master/IMDB%20Movie%20Reviews%20Sentiment%20Analysis%20using%20LSTM%2C%20GRU%20and%20CNN.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Import Packages",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nimport numpy as np\nfrom tensorflow import keras\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nimport tensorflow_datasets as tfds",
"_____no_output_____"
]
],
[
[
"# Load IMDB dataset",
"_____no_output_____"
]
],
[
[
"imdb, info = tfds.load(\"imdb_reviews\", with_info=True, as_supervised=True)",
"_____no_output_____"
],
[
"train_data, test_data = imdb['train'], imdb['test']",
"_____no_output_____"
],
[
"training_sentences = []\ntraining_labels = []\ntesting_sentences = []\ntesting_labels = []\n\nfor s,l in train_data:\n training_sentences.append(str(s.numpy()))\n training_labels.append(l.numpy())\n \nfor s,l in test_data:\n testing_sentences.append(str(s.numpy()))\n testing_labels.append(l.numpy())",
"_____no_output_____"
],
[
"training_labels_final = np.array(training_labels)\ntesting_labels_final = np.array(testing_labels)",
"_____no_output_____"
]
],
[
[
"# Tokenization",
"_____no_output_____"
]
],
[
[
"vocab_size = 10000\nembedding_dim = 16\nmax_length = 120\ntrunc_type='post'\noov_tok = \"<OOV>\"",
"_____no_output_____"
],
[
"tokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok)\ntokenizer.fit_on_texts(training_sentences)\n\nword_index = tokenizer.word_index",
"_____no_output_____"
],
[
"sequences = tokenizer.texts_to_sequences(training_sentences)\npadded = pad_sequences(sequences,maxlen=max_length, truncating=trunc_type)\n\ntesting_sequences = tokenizer.texts_to_sequences(testing_sentences)\ntesting_padded = pad_sequences(testing_sequences,maxlen=max_length)",
"_____no_output_____"
],
[
"reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n\ndef decode_review(text):\n return ' '.join([reverse_word_index.get(i, '?') for i in text])\n\nprint(decode_review(padded[0]))\nprint(training_sentences[0])",
"_____no_output_____"
]
],
[
[
"# Build LSTM Model",
"_____no_output_____"
]
],
[
[
"model = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)),\n tf.keras.layers.Dense(6, activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\nmodel.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])\nmodel.summary()",
"_____no_output_____"
]
],
[
[
"# Train Model",
"_____no_output_____"
]
],
[
[
"num_epochs = 10\nhistory = model.fit(padded, \n training_labels_final, \n epochs=num_epochs, \n validation_data=(testing_padded, testing_labels_final))",
"_____no_output_____"
]
],
[
[
"# Visualize the training graph",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n\ndef plot_graphs(history, string):\n plt.plot(history.history[string])\n plt.plot(history.history['val_'+string])\n plt.xlabel(\"Epochs\")\n plt.ylabel(string)\n plt.legend(['training '+string, 'validation '+string])\n plt.show()\n\nplot_graphs(history, 'accuracy')\nplot_graphs(history, \"loss\")",
"_____no_output_____"
]
],
[
[
"# Using GRU Model",
"_____no_output_____"
]
],
[
[
"model = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.Bidirectional(tf.keras.layers.GRU(32)),\n tf.keras.layers.Dense(6, activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\nmodel.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])\nmodel.summary()",
"_____no_output_____"
]
],
[
[
"# Using CNN Model",
"_____no_output_____"
]
],
[
[
"model = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.Conv1D(128, 5, activation='relu'),\n tf.keras.layers.GlobalAveragePooling1D(),\n tf.keras.layers.Dense(6, activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\nmodel.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])\nmodel.summary()",
"_____no_output_____"
]
],
[
[
"# Download Embedding files",
"_____no_output_____"
]
],
[
[
"e = model.layers[0]\nweights = e.get_weights()[0]\nprint(weights.shape) # shape: (vocab_size, embedding_dim)",
"_____no_output_____"
],
[
"import io\n\nout_v = io.open('vecs.tsv', 'w', encoding='utf-8')\nout_m = io.open('meta.tsv', 'w', encoding='utf-8')\nfor word_num in range(1, vocab_size):\n word = reverse_word_index[word_num]\n embeddings = weights[word_num]\n out_m.write(word + \"\\n\")\n out_v.write('\\t'.join([str(x) for x in embeddings]) + \"\\n\")\nout_v.close()\nout_m.close()",
"_____no_output_____"
],
[
"try:\n from google.colab import files\nexcept ImportError:\n pass\nelse:\n files.download('vecs.tsv')\n files.download('meta.tsv')",
"_____no_output_____"
]
],
[
[
"# Predicting Sentiment in new Reviews",
"_____no_output_____"
]
],
[
[
"# Use the model to predict a review \nfake_reviews = [\"Awesome movie\", \"It's been a long time since I watched a good movie like this\",\n \"It was very dragging and boring till first half but it picked the pace during 2nd half\",\n \"Waste of money!!\", \"Sci-Fi movie of the year\"]\n\nprint(fake_reviews) \n\n# Create the sequences\npadding_type='post'\nsample_sequences = tokenizer.texts_to_sequences(fake_reviews)\nfakes_padded = pad_sequences(sample_sequences, padding=padding_type, maxlen=max_length) \n\nprint('\\nHOT OFF THE PRESS! HERE ARE SOME NEWLY MINTED, ABSOLUTELY GENUINE REVIEWS!\\n') \n\nclasses = model.predict(fakes_padded)\n\n# The closer the class is to 1, the more positive the review is deemed to be\nfor x in range(len(fake_reviews)):\n print(fake_reviews[x])\n print(classes[x])\n print('\\n')\n\n# Try adding reviews of your own\n# Add some negative words (such as \"not\") to the good reviews and see what happens\n# For example:\n# they gave us free chocolate cake and did not charge us",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e73fe7ac2d98577f8d00536bedde4da3eada30b0 | 10,635 | ipynb | Jupyter Notebook | Deep Learning/Tutorials/Alexnet_in_keras.ipynb | surajch77/ML-Examples | 10f68c0a1b5367b63ec0505e74bfb40aff437125 | [
"Apache-2.0"
] | 2 | 2018-10-07T09:33:50.000Z | 2020-09-12T14:02:29.000Z | Deep Learning/Tutorials/Alexnet_in_keras.ipynb | surajch77/ML-Examples | 10f68c0a1b5367b63ec0505e74bfb40aff437125 | [
"Apache-2.0"
] | null | null | null | Deep Learning/Tutorials/Alexnet_in_keras.ipynb | surajch77/ML-Examples | 10f68c0a1b5367b63ec0505e74bfb40aff437125 | [
"Apache-2.0"
] | null | null | null | 30.299145 | 135 | 0.530418 | [
[
[
"# AlexNet in Keras ",
"_____no_output_____"
],
[
"Build a deep convolutional neural network in to classify MNIST digits",
"_____no_output_____"
],
[
"#### Set seed for reproducibility",
"_____no_output_____"
]
],
[
[
"import numpy as np\nnp.random.seed(42)",
"_____no_output_____"
]
],
[
[
"#### Load dependencies",
"_____no_output_____"
]
],
[
[
"import keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.layers import Flatten, MaxPooling2D, Conv2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.activations import softmax, relu, tanh, sigmoid\nfrom keras.callbacks import TensorBoard",
"Using TensorFlow backend.\n"
]
],
[
[
"#### Load and preprocess the data",
"_____no_output_____"
]
],
[
[
"import tflearn.datasets.oxflower17 as oxflower17\nX, Y = oxflower17.load_data(one_hot=True)",
"_____no_output_____"
]
],
[
[
"#### Design Neural Network architecture",
"_____no_output_____"
]
],
[
[
"X.shape",
"_____no_output_____"
],
[
"Y.shape",
"_____no_output_____"
],
[
"Y[0]",
"_____no_output_____"
],
[
"model = Sequential()\nmodel.add(Conv2D(96, kernel_size=(11, 11), strides=(4, 4), activation='relu', input_shape=(224, 224, 3)))\nmodel.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))\nmodel.add(BatchNormalization())\n\nmodel.add(Conv2D(256, kernel_size=(5, 5), strides=(1, 1), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))\nmodel.add(BatchNormalization())\n\nmodel.add(Conv2D(256, kernel_size=(3, 3), strides=(1, 1), activation='relu'))\nmodel.add(Conv2D(384, kernel_size=(3, 3), strides=(1, 1), activation='relu'))\nmodel.add(Conv2D(384, kernel_size=(3, 3), strides=(1, 1), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))\nmodel.add(BatchNormalization())\n\nmodel.add(Flatten())\nmodel.add(Dense(4096, activation='tanh'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(4096, activation='tanh'))\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(17, activation='softmax'))",
"_____no_output_____"
],
[
"model.summary()",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_1 (Conv2D) (None, 54, 54, 96) 34944 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 26, 26, 96) 0 \n_________________________________________________________________\nbatch_normalization_1 (Batch (None, 26, 26, 96) 384 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 22, 22, 256) 614656 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 10, 10, 256) 0 \n_________________________________________________________________\nbatch_normalization_2 (Batch (None, 10, 10, 256) 1024 \n_________________________________________________________________\nconv2d_3 (Conv2D) (None, 8, 8, 256) 590080 \n_________________________________________________________________\nconv2d_4 (Conv2D) (None, 6, 6, 384) 885120 \n_________________________________________________________________\nconv2d_5 (Conv2D) (None, 4, 4, 384) 1327488 \n_________________________________________________________________\nmax_pooling2d_3 (MaxPooling2 (None, 1, 1, 384) 0 \n_________________________________________________________________\nbatch_normalization_3 (Batch (None, 1, 1, 384) 1536 \n_________________________________________________________________\nflatten_1 (Flatten) (None, 384) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 4096) 1576960 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 4096) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 4096) 16781312 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 4096) 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 17) 69649 \n=================================================================\nTotal params: 21,883,153\nTrainable params: 21,881,681\nNon-trainable params: 1,472\n_________________________________________________________________\n"
]
],
[
[
"#### Compile the neural network",
"_____no_output_____"
]
],
[
[
"model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])",
"_____no_output_____"
]
],
[
[
"#### Configure the Tensorboard",
"_____no_output_____"
]
],
[
[
"tb = TensorBoard(\"/home/suraj/Desktop/Anaconda/TensorflowLiveLessons/Tutorials/logs/alexnet\")",
"_____no_output_____"
]
],
[
[
"#### Train the model ",
"_____no_output_____"
]
],
[
[
"model.fit(X, Y, batch_size=64, epochs=10, verbose=1, validation_split=0.1, shuffle=True)",
"Train on 1224 samples, validate on 136 samples\nEpoch 1/10\n1224/1224 [==============================] - 50s 41ms/step - loss: 4.8063 - acc: 0.2018 - val_loss: 7.4674 - val_acc: 0.1985\nEpoch 2/10\n1224/1224 [==============================] - 47s 39ms/step - loss: 3.2963 - acc: 0.2794 - val_loss: 4.2938 - val_acc: 0.1029\nEpoch 3/10\n1224/1224 [==============================] - 45s 36ms/step - loss: 2.5559 - acc: 0.3391 - val_loss: 3.1172 - val_acc: 0.2500\nEpoch 4/10\n1224/1224 [==============================] - 51s 42ms/step - loss: 2.1345 - acc: 0.4077 - val_loss: 3.4244 - val_acc: 0.2279\nEpoch 5/10\n1224/1224 [==============================] - 49s 40ms/step - loss: 2.3575 - acc: 0.4036 - val_loss: 3.6819 - val_acc: 0.3235\nEpoch 6/10\n1224/1224 [==============================] - 49s 40ms/step - loss: 2.2321 - acc: 0.4060 - val_loss: 2.8063 - val_acc: 0.3235\nEpoch 7/10\n1224/1224 [==============================] - 51s 41ms/step - loss: 2.3155 - acc: 0.3938 - val_loss: 2.3873 - val_acc: 0.4191\nEpoch 8/10\n1224/1224 [==============================] - 50s 41ms/step - loss: 2.0727 - acc: 0.4461 - val_loss: 2.8679 - val_acc: 0.3162\nEpoch 9/10\n1224/1224 [==============================] - 51s 42ms/step - loss: 2.2253 - acc: 0.4404 - val_loss: 2.9421 - val_acc: 0.3971\nEpoch 10/10\n1224/1224 [==============================] - 49s 40ms/step - loss: 2.0571 - acc: 0.4665 - val_loss: 2.1597 - val_acc: 0.4559\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e73ff576d01f0a418b7436637bc3f3708efae50c | 287,993 | ipynb | Jupyter Notebook | Chapter04/Linear regression/Chapter 3 - Linear Regression.ipynb | YMandCL/Ensemble-Machine-Learning-Cookbook | fda0aa11cb80d1481f9b597f903f0ee438ac7778 | [
"MIT"
] | 42 | 2019-02-13T08:18:21.000Z | 2021-12-23T10:14:16.000Z | Chapter04/Linear regression/Chapter 3 - Linear Regression.ipynb | YMandCL/Ensemble-Machine-Learning-Cookbook | fda0aa11cb80d1481f9b597f903f0ee438ac7778 | [
"MIT"
] | 3 | 2019-05-16T17:16:38.000Z | 2020-12-03T05:09:44.000Z | Chapter04/Linear regression/Chapter 3 - Linear Regression.ipynb | YMandCL/Ensemble-Machine-Learning-Cookbook | fda0aa11cb80d1481f9b597f903f0ee438ac7778 | [
"MIT"
] | 47 | 2019-01-30T01:59:25.000Z | 2022-03-27T09:59:59.000Z | 140.759042 | 77,348 | 0.825958 | [
[
[
"# Linear Regression - to be submitted",
"_____no_output_____"
]
],
[
[
"# import required libraries\nimport os\nimport pandas as pd\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"os.chdir(\".../Chapter 3/Linear Regression\")\nos.getcwd()",
"_____no_output_____"
]
],
[
[
"#### Read data",
"_____no_output_____"
]
],
[
[
"df_housingdata = pd.read_csv(\"Final_HousePrices.csv\")",
"_____no_output_____"
],
[
"df_housingdata.head(5)",
"_____no_output_____"
]
],
[
[
"#### We start by identifying our numeric and categorical variables.",
"_____no_output_____"
]
],
[
[
"df_housingdata.dtypes",
"_____no_output_____"
],
[
"df_housingdata.corr(method='pearson')",
"_____no_output_____"
]
],
[
[
"#### Besides the correlation between the variables, we'd also like to study the correlation between the predictor variables and the response variable.",
"_____no_output_____"
]
],
[
[
"correlation = df_housingdata.corr(method='pearson')\n\n# Our response variable \"SalePrice\" is in the last. We remove correlation with itself.\ncorrelation_response = correlation.iloc[-1][:-1]\n\n# variables sorted in descending manner\ncorrelation_response.sort_values(ascending=False)",
"_____no_output_____"
]
],
[
[
"#### To sort correlations by absolute values",
"_____no_output_____"
]
],
[
[
"correlation_response[abs(correlation_response).argsort()[::-1]]",
"_____no_output_____"
]
],
[
[
"#### Correlation",
"_____no_output_____"
]
],
[
[
"# Generate a mask for the upper triangle\n# np.zeros_like - Returns an array of zeros with the same shape and type as per given array\n# In this case we pass the correlation matrix\n# we create a variable \"mask\" which is a 14 X 14 numpy array\nmask = np.zeros_like(correlation, dtype=np.bool)\n\n# We create a tuple with triu_indices_from() by passing the \"mask\" array\n# k is used to offset diagonal\n# with k=0, we offset all diagnoals\n# If we put k=13, means we offset 14-13=1 diagonal \n\n# triu_indices_from() Return the indices for the upper-triangle of arr.\nmask[np.triu_indices_from(mask, k=0)] = True\n\n# Setting the plot size\nfig, axis = plt.subplots(figsize=(11, 11))\n\n# cbar_kws={\"shrink\": 0.5} is shrinking the legend color bar\nsns.heatmap(correlation, mask=mask, cmap=\"YlGnBu\", vmax=.3, center=0,\n square=True, linewidths=.1, cbar_kws={\"shrink\": 0.5})",
"_____no_output_____"
]
],
[
[
"#### See distribution of the target variable",
"_____no_output_____"
]
],
[
[
"# Setting the plot size\nfig, axis = plt.subplots(figsize=(7, 7))\n\n# We use kde=True to plot the gaussian kernel density estimate\nsns.distplot(df_housingdata['SalePrice'], bins=50, kde=True)",
"_____no_output_____"
]
],
[
[
"#### We can also use JointGrid() from our seaborn package to plot combination of plots",
"_____no_output_____"
]
],
[
[
"from scipy import stats\ng = sns.JointGrid(df_housingdata['GarageArea'], df_housingdata['SalePrice'])\ng = g.plot(sns.regplot, sns.distplot)\ng = g.annotate(stats.pearsonr)",
"_____no_output_____"
]
],
[
[
"#### Let us now scale our numeric variables",
"_____no_output_____"
]
],
[
[
"# create a variable to hold the names of the data types viz int16, in32 and so on\nnum_cols = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']\n\n# Filter out variables with numeric data types\ndf_numcols_only = df_housingdata.select_dtypes(include=num_cols)",
"_____no_output_____"
],
[
"# Importing MinMaxScaler and initializing it\nfrom sklearn.preprocessing import MinMaxScaler\nmin_max=MinMaxScaler()\n\n# Scaling down the numeric variables\n# We exclude SalePrice using iloc() on df_numcols_only data frame\ndf_housingdata_numcols=pd.DataFrame(min_max.fit_transform(df_numcols_only.iloc[:,0:36]), \\\n columns=df_numcols_only.iloc[:,0:36].columns.tolist())",
"_____no_output_____"
]
],
[
[
"#### Perform one-hot encoding on our categorical variables",
"_____no_output_____"
]
],
[
[
"# We exclude all numeric columns\ndf_housingdata_catcol = df_housingdata.select_dtypes(exclude=num_cols)\n\n# Steps to one-hot encoding:\n# We iterate through each categorical column name\n# Create encoded variables for each categorical columns\n# Concatenate the encoded variables to the data frame\n# Remove the original categorical variable\nfor col in df_housingdata_catcol.columns.values:\n one_hot_enoded_variables = pd.get_dummies(df_housingdata_catcol[col],prefix=col)\n df_housingdata_catcol = pd.concat([df_housingdata_catcol,one_hot_enoded_variables],axis=1)\n df_housingdata_catcol.drop([col],axis=1, inplace=True)",
"_____no_output_____"
],
[
"df_housedata = pd.concat([df_housingdata_numcols, df_housingdata_catcol], axis=1)",
"_____no_output_____"
],
[
"# Concatenate SalePrice to the final data frame\ndf_housedata_final = pd.concat([df_housedata, df_numcols_only.iloc[:,36]], axis=1)",
"_____no_output_____"
],
[
"df_housedata_final.shape",
"_____no_output_____"
],
[
"# Create feature and response variable set\n# We create train & Test sample from our dataset\nfrom sklearn.model_selection import train_test_split\n\n# create feature & response variables\nX = df_housedata_final.iloc[:,0:302]\nY = df_housedata_final['SalePrice']\n\n# Create train & test sets\nX_train, X_test, Y_train, Y_test = \\\ntrain_test_split(X, Y, test_size=0.30, random_state=1)",
"_____no_output_____"
]
],
[
[
"### Linear model fitted by minimizing a regularized empirical loss with SGD",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom sklearn.linear_model import SGDRegressor\n\nlin_model = SGDRegressor()\n\n# We fit our model with train data\nlin_model.fit(X_train, Y_train)\n\n# We use predict() to predict our values\nlin_model_predictions = lin_model.predict(X_test)\n\n# We check the coefficient of determination with score()\nprint(lin_model.score(X_test, Y_test))\n\n# We can also check the coefficient of determination with r2_score() from sklearn.Metrics\nfrom sklearn.metrics import r2_score\nprint(r2_score(Y_test, lin_model_predictions))",
"0.805760693173\n0.805760693173\n"
],
[
"from sklearn.metrics import mean_squared_error\nmse = mean_squared_error(lin_model_predictions, Y_test)\nrmse = np.sqrt(mse)\nprint(rmse)",
"37241.4751428\n"
],
[
"# We can plot the actuals and the predicted values \nplt.figure(figsize=(8, 8))\nplt.scatter(Y_test, lin_model_predictions)\nplt.xlabel('Actual Median value of house prices ($1000s)')\nplt.ylabel('Predicted Median value of house prices ($1000s)')\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"#### We change the hyper-parameters and compare the results",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom sklearn.linear_model import SGDRegressor\n\nlin_model = SGDRegressor(alpha=0.0000001, max_iter=2000)\n\n# We fit our model with train data\nlin_model.fit(X_train, Y_train)\n\n# We use predict() to predict our values\nlin_model_predictions = lin_model.predict(X_test)\n\n# We check the coefficient of determination with score()\nprint(lin_model.score(X_test, Y_test))\n\n# We can also check the coefficient of determination with r2_score() from sklearn.Metrics\nfrom sklearn.metrics import r2_score\nprint(r2_score(Y_test, lin_model_predictions))",
"0.867391743008\n0.867391743008\n"
],
[
"from sklearn.metrics import mean_squared_error\nmse = mean_squared_error(lin_model_predictions, Y_test)\nrmse = np.sqrt(mse)\nprint(rmse)",
"30771.1408906\n"
],
[
"# We can plot the actuals and the predicted values \nplt.figure(figsize=(8, 8))\nplt.scatter(Y_test, lin_model_predictions)\n#plt.plot([0, 50], [0, 50], '--r')\nplt.xlabel('Actual Median value of owner-occupied homes ($1000s)')\nplt.ylabel('Predicted Median value of owner-occupied homes ($1000s)')\nplt.tight_layout()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7400c7ff742be6263adb947dac937edb9c6de98 | 18,402 | ipynb | Jupyter Notebook | toturial/201801_Nvidia Training Data/案例分享-Jupyter with TensorFlow 展示解說程式/Industrial Defect Inspection with image segmentation - AI tech sharing.ipynb | TW-NCHC/TWGC | 9e4262221d5cdeb16eeaf0a12a4feab123293f8d | [
"MIT"
] | 13 | 2018-01-30T03:47:55.000Z | 2019-04-27T03:39:28.000Z | toturial/201801_Nvidia Training Data/案例分享-Jupyter with TensorFlow 展示解說程式/Industrial Defect Inspection with image segmentation - AI tech sharing.ipynb | TW-NCHC/TWGC | 9e4262221d5cdeb16eeaf0a12a4feab123293f8d | [
"MIT"
] | 3 | 2018-02-01T09:22:53.000Z | 2018-04-18T12:51:37.000Z | toturial/201801_Nvidia Training Data/案例分享-Jupyter with TensorFlow 展示解說程式/Industrial Defect Inspection with image segmentation - AI tech sharing.ipynb | TW-NCHC/TWGC | 9e4262221d5cdeb16eeaf0a12a4feab123293f8d | [
"MIT"
] | 3 | 2019-07-26T03:58:21.000Z | 2021-01-20T02:21:31.000Z | 25.91831 | 576 | 0.554885 | [
[
[
"# Industrial Defect Inspection with image segmentation",
"_____no_output_____"
],
[
"In order to satisfy customers' needs, companies have to guarantee the quality of their products, which can often be achieved only by inspection of the finished product. Automatic visual defect detection has the potential to reduce the cost of quality assurance significantly.",
"_____no_output_____"
],
[
"## Data description",
"_____no_output_____"
],
[
"[`29th Annual Symposium of the German Association for Pattern Recognition, Weakly Supervised Learning for Industrial Optical Inspection, 2007.`](http://resources.mpi-inf.mpg.de/conferences/dagm/2007/prizes.html)",
"_____no_output_____"
],
[
"This dataset is artificially generated, but similar to real world problems. It consists of multiple data sets, each consisting of 1000 images showing the background texture without defects, and of 150 images with one labeled defect each on the background texture. The images in a single data set are very similar, but each data set is generated by a different texture model and defect model.\n\nNot all deviations from the texture are necessarily defects. The algorithm will need to use the weak labels provided during the training phase to learn the properties that characterize a defect.\n\nBelow are sample images from 6 data sets. In these examples, defects are weakly labeled by a surrounding ellipse, shown in red. ",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\n%matplotlib inline\nImage('./userdata/images/WeaklySpervisedLearningforIndustrialOpticalInspection.jpg')",
"_____no_output_____"
]
],
[
[
"### labeling data",
"_____no_output_____"
],
[
"Defect exists inside an image was bounded with an ellipse. The ellipse-parameters are provided in a separate .txt-file with a format as shown below. ",
"_____no_output_____"
],
[
"[filename] \\t \\n\n[semi-major axis] \\t [semi-minor axis] \\t [rotation angle] \\t\n[x-position of the centre of the ellipsoid] \\t [y-position of the centre of the ellipsoid] \\n\n[filename] \\t ... ",
"_____no_output_____"
]
],
[
[
"!cat './dataset/public_defects/Class1_def/labels.txt'",
"_____no_output_____"
]
],
[
[
"## Data Preprocessing/Exploration/Inspection",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"from coslib import plot_ellipse_seg_test\nplot_ellipse_seg_test('./dataset/public_defects/Class1_def/1.png')",
"_____no_output_____"
],
[
"plot_ellipse_seg_test('./dataset/public_defects/Class2_def/1.png')",
"_____no_output_____"
],
[
"plot_ellipse_seg_test('./dataset/public_defects/Class3_def/1.png')",
"_____no_output_____"
],
[
"plot_ellipse_seg_test('./dataset/public_defects/Class4_def/3.png')",
"_____no_output_____"
],
[
"plot_ellipse_seg_test('./dataset/public_defects/Class5_def/1.png')",
"_____no_output_____"
],
[
"plot_ellipse_seg_test('./dataset/public_defects/Class6_def/50.png')",
"_____no_output_____"
],
[
"from coslib import load_images_masks",
"_____no_output_____"
],
[
"X, y = load_images_masks('./dataset/public_defects/Class1_def/', img_type='png', img_format='gray', resize=(512, 512), ellipse=True)",
"_____no_output_____"
],
[
"X.shape",
"_____no_output_____"
],
[
"y.shape",
"_____no_output_____"
],
[
"plt.imshow(X[0,:,:,0], cmap='gray')",
"_____no_output_____"
],
[
"plt.imshow(y[0,:,:,0], cmap='gray')",
"_____no_output_____"
],
[
"import sklearn",
"_____no_output_____"
],
[
"sklearn.__version__",
"_____no_output_____"
],
[
"from sklearn.cross_validation import train_test_split",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)",
"_____no_output_____"
],
[
"X_train.shape",
"_____no_output_____"
],
[
"X_test.shape",
"_____no_output_____"
]
],
[
[
"## Unet - Fully Convolutional Neuralnetwork",
"_____no_output_____"
],
[
"The u-net is convolutional network architecture for fast and precise segmentation of images. Up to now it has outperformed the prior best method (a sliding-window convolutional network) on the ISBI challenge for segmentation of neuronal structures in electron microscopic stacks. It has won the Grand Challenge for Computer-Automated Detection of Caries in Bitewing Radiography at ISBI 2015, and it has won the Cell Tracking Challenge at ISBI 2015 on the two most challenging transmitted light microscopy categories (Phase contrast and DIC microscopy) by a large margin.",
"_____no_output_____"
]
],
[
[
"Image('./userdata/images/Unet-model.jpg')",
"_____no_output_____"
],
[
"img_rows = 512\nimg_cols = 512",
"_____no_output_____"
],
[
"from keras.models import Model\nfrom keras.layers import Input, merge, Conv2D, MaxPooling2D, UpSampling2D,Lambda, Conv2DTranspose, concatenate\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras import backend as K\nfrom scipy.ndimage.measurements import label\nimport time",
"_____no_output_____"
],
[
"### Defining a small Unet\n### Smaller Unet defined so it fits in memory\n\ndef get_small_unet():\n inputs = Input((img_rows, img_cols,1))\n inputs_norm = Lambda(lambda x: x/127.5 - 1.)\n conv1 = Conv2D(8, (3, 3), activation='relu', padding='same')(inputs)\n conv1 = Conv2D(8, (3, 3), activation='relu', padding='same')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n conv2 = Conv2D(16, (3, 3), activation='relu', padding='same')(pool1)\n conv2 = Conv2D(16, (3, 3), activation='relu', padding='same')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n\n conv3 = Conv2D(32, (3, 3), activation='relu', padding='same')(pool2)\n conv3 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n\n conv4 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool3)\n conv4 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n\n conv5 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool4)\n conv5 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv5)\n\n up6 = concatenate([Conv2DTranspose(64, kernel_size=(2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)\n conv6 = Conv2D(64, (3, 3), activation='relu', padding='same')(up6)\n conv6 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv6)\n\n up7 = concatenate([Conv2DTranspose(32, kernel_size=(2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)\n conv7 = Conv2D(32, (3, 3), activation='relu', padding='same')(up7)\n conv7 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv7)\n\n up8 = concatenate([Conv2DTranspose(16, kernel_size=(2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)\n conv8 = Conv2D(16, (3, 3), activation='relu', padding='same')(up8)\n conv8 = Conv2D(16, (3, 3), activation='relu', padding='same')(conv8)\n\n up9 = concatenate([Conv2DTranspose(8, kernel_size=(2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)\n conv9 = Conv2D(8, (3, 3), activation='relu', padding='same')(up9)\n conv9 = Conv2D(8, (3, 3), activation='relu', padding='same')(conv9)\n\n conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)\n\n model = Model(inputs=inputs, outputs=conv10)\n\n \n return model",
"_____no_output_____"
],
[
"model = get_small_unet()",
"_____no_output_____"
],
[
"### IOU or dice coeff calculation\ndef IOU_calc(y_true, y_pred):\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n \n return 2*(intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\n\n\ndef IOU_calc_loss(y_true, y_pred):\n return -IOU_calc(y_true, y_pred)",
"_____no_output_____"
],
[
"smooth = 1.\nmodel.compile(optimizer=Adam(lr=1e-4), loss=IOU_calc_loss, metrics=[IOU_calc])",
"_____no_output_____"
],
[
"history = model.fit(X_train, y_train, batch_size=10, epochs=50, verbose=1, validation_split=0.1)",
"_____no_output_____"
]
],
[
[
"## Learning curves",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(20, 5))\nplt.plot(model.history.history['loss'], label='Train loss')\nplt.plot(model.history.history['val_loss'], label='Val loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()",
"_____no_output_____"
],
[
"plt.figure(figsize=(20, 5))\nplt.plot(model.history.history['IOU_calc'], label='Train IOU')\nplt.plot(model.history.history['val_IOU_calc'], label='Val IOU')\nplt.xlabel('Epochs')\nplt.ylabel('IOU')\nplt.legend()",
"_____no_output_____"
]
],
[
[
"## Predict on testing data",
"_____no_output_____"
]
],
[
[
"predict = model.predict(X_test)",
"_____no_output_____"
],
[
"import numpy as np\nimport cv2\ndef predict_evaluation(pred, image, label):\n '''\n '''\n # transform gray image to rgb\n img = np.array(image, np.uint8)\n rgb_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\n # scale pred and mask's pixel range to 0~255\n im_label = np.array(255*label, dtype=np.uint8)\n im_pred = np.array(255*pred, dtype=np.uint8)\n \n # transform both of them to rgb\n rgb_label = cv2.cvtColor(im_label, cv2.COLOR_GRAY2RGB)\n rgb_pred = cv2.cvtColor(im_pred, cv2.COLOR_GRAY2RGB)\n \n rgb_label[:,:,1:3] = 0*rgb_label[:,:,1:2]\n rgb_pred[:,:,0] = 0*rgb_pred[:,:,0]\n rgb_pred[:,:,2] = 0*rgb_pred[:,:,2]\n \n img_pred = cv2.addWeighted(rgb_img, 1, rgb_pred, 0.3, 0)\n img_label = cv2.addWeighted(rgb_img, 1, rgb_label, 0.3, 0)\n \n plt.figure(figsize=(10, 10))\n \n plt.subplot(1,3,1)\n plt.imshow(rgb_img)\n plt.title('Original image')\n plt.axis('off')\n plt.subplot(1,3,2)\n plt.imshow(img_pred)\n plt.title('Prediction')\n plt.axis('off')\n plt.subplot(1,3,3)\n plt.imshow(img_label)\n plt.title('Ground truth')\n plt.axis('off')",
"_____no_output_____"
],
[
"predict_evaluation(predict[0,:,:,0], X_test[0,:,:,0], y_test[0,:,:,0])",
"_____no_output_____"
],
[
"predict_evaluation(predict[1,:,:,0], X_test[1,:,:,0], y_test[1,:,:,0])",
"_____no_output_____"
],
[
"predict_evaluation(predict[2,:,:,0], X_test[2,:,:,0], y_test[2,:,:,0])",
"_____no_output_____"
],
[
"predict_evaluation(predict[3,:,:,0], X_test[3,:,:,0], y_test[3,:,:,0])",
"_____no_output_____"
],
[
"predict_evaluation(predict[4,:,:,0], X_test[4,:,:,0], y_test[4,:,:,0])",
"_____no_output_____"
],
[
"predict_evaluation(predict[5,:,:,0], X_test[5,:,:,0], y_test[5,:,:,0])",
"_____no_output_____"
],
[
"predict_evaluation(predict[6,:,:,0], X_test[6,:,:,0], y_test[6,:,:,0])",
"_____no_output_____"
],
[
"predict_evaluation(predict[7,:,:,0], X_test[7,:,:,0], y_test[7,:,:,0])",
"_____no_output_____"
],
[
"predict_evaluation(predict[8,:,:,0], X_test[8,:,:,0], y_test[8,:,:,0])",
"_____no_output_____"
],
[
"predict_evaluation(predict[9,:,:,0], X_test[9,:,:,0], y_test[9,:,:,0])",
"_____no_output_____"
]
],
[
[
"## Save model for later use",
"_____no_output_____"
]
],
[
[
"model_json_string = model.to_json()",
"_____no_output_____"
],
[
"with open('./userdata/model.json', 'w') as f:\n f.write(model_json_string)",
"_____no_output_____"
],
[
"model.save_weights('./userdata/model.h5')",
"_____no_output_____"
],
[
"!ls ./userdata/",
"_____no_output_____"
],
[
"from coslib import convert_keras_to_pb",
"_____no_output_____"
],
[
"convert_keras_to_pb('./userdata/', 'conv2d_19/Sigmoid')",
"_____no_output_____"
],
[
"!ls ./userdata/",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e740120064eba1a43f2ad593dfdcdcca935724fa | 30,374 | ipynb | Jupyter Notebook | examples/SyntheticExample.ipynb | tazitoo/global-attribution-mapping | 3b4f3e5e5f7a613742aff7f1d7af2e945abbbc1e | [
"Apache-2.0"
] | 19 | 2019-02-06T20:09:10.000Z | 2021-09-10T13:39:31.000Z | examples/SyntheticExample.ipynb | tazitoo/global-attribution-mapping | 3b4f3e5e5f7a613742aff7f1d7af2e945abbbc1e | [
"Apache-2.0"
] | 34 | 2019-06-27T16:00:43.000Z | 2022-03-15T01:15:24.000Z | examples/SyntheticExample.ipynb | tazitoo/global-attribution-mapping | 3b4f3e5e5f7a613742aff7f1d7af2e945abbbc1e | [
"Apache-2.0"
] | 21 | 2019-02-07T21:05:17.000Z | 2022-02-11T03:50:53.000Z | 114.18797 | 6,436 | 0.89254 | [
[
[
"import sys\nsys.path.insert(0, '../')\n\nimport numpy as np\nimport pandas as pd\n\nfrom gam import gam\n",
"_____no_output_____"
],
[
"# Balanced Class Dataset\n\nlime_attributions_balanced_path = 'data/lime-synthetic-balanced.csv'",
"_____no_output_____"
],
[
"g = gam.GAM(attributions_path = lime_attributions_balanced_path)\ng.generate()",
"Max Iterations: 1000\nStarting Iteration: 1\nStarting Iteration: 2\n"
],
[
"g.plot(num_features=2)",
"_____no_output_____"
],
[
"g.subpopulation_sizes",
"_____no_output_____"
],
[
"g.explanations",
"_____no_output_____"
],
[
"# Unbalanced Class Dataset\n\nlocal_attribution_path = 'data/lime-synthetic-75-class1.csv'",
"_____no_output_____"
],
[
"g = gam.GAM(attributions_path = local_attribution_path)\ng.generate()",
"Max Iterations: 1000\nStarting Iteration: 1\n"
],
[
"g.plot(num_features=2)",
"_____no_output_____"
],
[
"g.subpopulation_sizes",
"_____no_output_____"
],
[
"g.explanations",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74012c4f52e315a7f1d8e45bd95bbe4f66df934 | 250,553 | ipynb | Jupyter Notebook | 5_Validate_3D_CNN_xVal_wb_mwp1_CAT12_MNI_ADNI3.ipynb | kashikoiga13/InteractiveVis | 84f82ccfa0f9562d92d5cd00b00fee480314d714 | [
"MIT"
] | null | null | null | 5_Validate_3D_CNN_xVal_wb_mwp1_CAT12_MNI_ADNI3.ipynb | kashikoiga13/InteractiveVis | 84f82ccfa0f9562d92d5cd00b00fee480314d714 | [
"MIT"
] | null | null | null | 5_Validate_3D_CNN_xVal_wb_mwp1_CAT12_MNI_ADNI3.ipynb | kashikoiga13/InteractiveVis | 84f82ccfa0f9562d92d5cd00b00fee480314d714 | [
"MIT"
] | null | null | null | 368.460294 | 23,904 | 0.928015 | [
[
[
"# Import data from Excel sheet\nimport pandas as pd\ndf = pd.read_excel('ADNI combined.xlsx', sheet_name='sample')\n#print(df)\nsid = df['RID']\ngrp = df['Group at scan date (1=CN, 2=EMCI, 3=LMCI, 4=AD, 5=SMC)']\nage = df['Age at scan']\nsex = df['Sex (1=female)']\ntiv = df['TIV_CAT12']\nfield = df['MRI_Field_Strength']\namybin = df['SUMMARYSUVR_WHOLECEREBNORM_1.11CUTOFF']\ngrpbin = (grp > 1) # 1=CN, ...",
"_____no_output_____"
],
[
"# Scan for nifti file names\nimport glob\ndataADNI3 = sorted(glob.glob('mwp1_MNI/ADNI3/*.nii.gz'))\ndataFiles = dataADNI3\nnumfiles = len(dataFiles)\nprint('Found ', str(numfiles), ' nifti files')",
"Found 575 nifti files\n"
],
[
"# Match covariate information\nimport re\nimport numpy as np\nfrom pandas import DataFrame\nfrom keras.utils import to_categorical\ndebug = False\ncov_idx = [-1] * numfiles # list; array: np.full((numfiles, 1), -1, dtype=int)\nprint('Matching covariates for loaded files ...')\nfor i,id in enumerate(sid):\n p = [j for j,x in enumerate(dataFiles) if re.search('_%04d_' % id, x)] # extract ID numbers from filename, translate to Excel row index\n if len(p)==0:\n if debug: print('Did not find %04d' % id) # did not find Excel sheet subject ID in loaded file selection\n else:\n if debug: print('Found %04d in %s: %s' % (id, p[0], dataFiles[p[0]]))\n cov_idx[p[0]] = i # store Excel index i for data file index p[0]\nprint('Checking for scans not found in Excel sheet: ', sum(x<0 for x in cov_idx))\n\nlabels = pd.DataFrame({'Group':grpbin}).iloc[cov_idx, :]\nlabels = to_categorical(np.asarray(labels)) # use grps to access original labels\ngrps = pd.DataFrame({'Group':grp, 'RID':sid}).iloc[cov_idx, :]",
"Using TensorFlow backend.\n"
],
[
"# Load residualized data from disk\nimport h5py\nhf = h5py.File('residuals_ADNI3_wb_mwp1_CAT12_MNI.hdf5', 'r')\nhf.keys # read keys\nimages = np.array(hf.get('images'))\nhf.close()\nprint(images.shape)",
"(575, 100, 100, 120, 1)\n"
],
[
"# specify version of tensorflow\n#%tensorflow_version 1.x # <- use this for Google colab\nimport tensorflow as tf\n# downgrade to specific version\n#!pip install tensorflow-gpu==1.15\n#import tensorflow as tf\nprint(tf.__version__)\n\n# disable tensorflow deprecation warnings\nimport logging\nlogging.getLogger('tensorflow').disabled=True",
"1.15.4\n"
],
[
"# helper function to obtain performance result values\ndef get_values(conf_matrix):\n assert conf_matrix.shape==(2,2)\n tn, fp, fn, tp = conf_matrix.ravel()\n sen = tp / (tp+fn)\n spec = tn / (fp+tn)\n ppv = tp / (tp+fp)\n npv = tn / (tn+fn)\n f1 = 2 * ((ppv * sen) / (ppv + sen))\n bacc = (spec + sen) / 2\n return bacc, sen, spec, ppv, npv, f1",
"_____no_output_____"
],
[
"# validation\nimport numpy as np\nfrom sklearn.metrics import roc_curve, auc\nfrom matplotlib import pyplot as plt\n%matplotlib inline\nimport keras\nfrom keras import models\nimport tensorflow as tf\nfrom sklearn.metrics import confusion_matrix\n\nacc_AD, acc_MCI, auc_AD, auc_MCI = [], [], [], []\nbacc_AD, bacc_MCI = [], []\nsen_AD, sen_MCI, spec_AD, spec_MCI = [], [], [], []\nppv_AD, ppv_MCI, npv_AD, npv_MCI = [], [], [], []\nf1_AD, f1_MCI = [], []\n\nnum_kfold = 10 # number of cross-validation loops equal to number of models\nbatch_size = 20\n\nfor k in range(num_kfold):\n print('validating model model_checkpoints/resmodel_wb_cv%d.best.hdf5' % (k+1))\n mymodel = models.load_model('model_checkpoints/resmodel_wb_cv%d.best.hdf5' % (k+1))\n \n # calculate area under the curve\n # AUC as optimization function during training: https://stackoverflow.com/questions/41032551/how-to-compute-receiving-operating-characteristic-roc-and-auc-in-keras\n pred = mymodel.predict(images, batch_size=batch_size)\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n acc = dict()\n for i in range(2): # classes dummy vector: 0 - CN, 1 - MCI/AD\n fpr[i], tpr[i], _ = roc_curve(labels[:, i], pred[:,i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n # Plot the ROC curve\n plt.figure()\n plt.plot(fpr[1], tpr[1], color='darkorange', label='ROC curve (area = %0.2f)' % roc_auc[1])\n plt.plot([0, 1], [0, 1], color='navy', linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n plt.show()\n \n # redo AUC for binary comparison: AD vs. HC and MCI vs. HC\n for i in [3,4]:\n grpi = np.equal(grps.Group.to_numpy(dtype=np.int), np.ones((grps.shape[0],), dtype=np.int)*i)\n grp1 = np.equal(grps.Group.to_numpy(dtype=np.int), np.ones((grps.shape[0],), dtype=np.int))\n grpidx = np.logical_or(grpi, grp1)\n fpr[i], tpr[i], _ = roc_curve(labels[grpidx, 1], pred[grpidx, 1])\n roc_auc[i] = auc(fpr[i], tpr[i])\n acc[i] = np.mean((labels[grpidx, 1] == np.round(pred[grpidx, 1])).astype(int))*100\n\n print('AUC for MCI vs. CN = %0.3f' % roc_auc[3])\n print('AUC for AD vs. CN = %0.3f' % roc_auc[4])\n print('Acc for MCI vs. CN = %0.1f' % acc[3])\n print('Acc for AD vs. CN = %0.1f' % acc[4])\n auc_AD.append(roc_auc[4])\n auc_MCI.append(roc_auc[3])\n acc_AD.append(acc[4])\n acc_MCI.append(acc[3])\n \n print('confusion matrix')\n confmat = confusion_matrix(grps.Group, np.round(pred[:, 1]))\n bacc, sen, spec, ppv, npv, f1 = get_values(confmat[(1,2),0:2]) # MCI\n bacc_MCI.append(bacc); sen_MCI.append(sen); spec_MCI.append(spec); ppv_MCI.append(ppv); npv_MCI.append(npv); f1_MCI.append(f1)\n bacc, sen, spec, ppv, npv, f1 = get_values(confmat[(1,3),0:2]) # AD\n bacc_AD.append(bacc); sen_AD.append(sen); spec_AD.append(spec); ppv_AD.append(ppv); npv_AD.append(npv); f1_AD.append(f1)\n print(confmat[1:4,0:2])",
"validating model model_checkpoints/resmodel_wb_cv1.best.hdf5\n"
],
[
"# print model performance summary\nfrom statistics import mean,stdev\n\nprint('Mean AUC for MCI vs. CN = %0.3f +/- %0.3f' % (mean(auc_MCI), stdev(auc_MCI)))\nprint('Mean AUC for AD vs. CN = %0.3f +/- %0.3f' % (mean(auc_AD), stdev(auc_AD)))\nprint('Mean Acc for MCI vs. CN = %0.3f +/- %0.3f' % (mean(acc_MCI), stdev(acc_MCI)))\nprint('Mean Acc for AD vs. CN = %0.3f +/- %0.3f' % (mean(acc_AD), stdev(acc_AD)))\nprint('Mean Bacc for MCI vs. CN = %0.3f +/- %0.3f' % (mean(bacc_MCI), stdev(bacc_MCI)))\nprint('Mean Bacc for AD vs. CN = %0.3f +/- %0.3f' % (mean(bacc_AD), stdev(bacc_AD)))\nprint('Mean Sen for MCI vs. CN = %0.3f +/- %0.3f' % (mean(sen_MCI), stdev(sen_MCI)))\nprint('Mean Sen for AD vs. CN = %0.3f +/- %0.3f' % (mean(sen_AD), stdev(sen_AD)))\nprint('Mean Spec for MCI vs. CN = %0.3f +/- %0.3f' % (mean(spec_MCI), stdev(spec_MCI)))\nprint('Mean Spec for AD vs. CN = %0.3f +/- %0.3f' % (mean(spec_AD), stdev(spec_AD)))\nprint('Mean PPV for MCI vs. CN = %0.3f +/- %0.3f' % (mean(ppv_MCI), stdev(ppv_MCI)))\nprint('Mean PPV for AD vs. CN = %0.3f +/- %0.3f' % (mean(ppv_AD), stdev(ppv_AD)))\nprint('Mean NPV for MCI vs. CN = %0.3f +/- %0.3f' % (mean(npv_MCI), stdev(npv_MCI)))\nprint('Mean NPV for AD vs. CN = %0.3f +/- %0.3f' % (mean(npv_AD), stdev(npv_AD)))\nprint('Mean F1 for MCI vs. CN = %0.3f +/- %0.3f' % (mean(f1_MCI), stdev(f1_MCI)))\nprint('Mean F1 for AD vs. CN = %0.3f +/- %0.3f' % (mean(f1_AD), stdev(f1_AD)))",
"Mean AUC for MCI vs. CN = 0.677 +/- 0.020\nMean AUC for AD vs. CN = 0.899 +/- 0.013\nMean Acc for MCI vs. CN = 67.349 +/- 2.547\nMean Acc for AD vs. CN = 78.840 +/- 6.310\nMean Bacc for MCI vs. CN = 0.636 +/- 0.015\nMean Bacc for AD vs. CN = 0.817 +/- 0.029\nMean Sen for MCI vs. CN = 0.496 +/- 0.082\nMean Sen for AD vs. CN = 0.858 +/- 0.036\nMean Spec for MCI vs. CN = 0.775 +/- 0.080\nMean Spec for AD vs. CN = 0.775 +/- 0.080\nMean PPV for MCI vs. CN = 0.570 +/- 0.060\nMean PPV for AD vs. CN = 0.438 +/- 0.088\nMean NPV for MCI vs. CN = 0.730 +/- 0.015\nMean NPV for AD vs. CN = 0.967 +/- 0.006\nMean F1 for MCI vs. CN = 0.523 +/- 0.031\nMean F1 for AD vs. CN = 0.573 +/- 0.068\n"
],
[
"results = pd.DataFrame({'AUC_MCI':auc_MCI, 'Acc_MCI':acc_MCI, 'Bacc_MCI':bacc_MCI, 'f1_MCI':f1_MCI,\n 'sen_MCI':sen_MCI, 'spec_MCI':spec_MCI, 'ppv_MCI':ppv_MCI, 'npv_MCI':npv_MCI,\n 'AUC_AD':auc_AD, 'Acc_AD':acc_AD, 'Bacc_AD':bacc_AD, 'f1_AD':f1_AD,\n 'sen_AD':sen_AD, 'spec_AD':spec_AD, 'ppv_AD':ppv_AD, 'npv_AD':npv_AD})\nprint(results)\nresults.to_csv('results_xval_ADNI3_checkpoints.csv')",
" AUC_MCI Acc_MCI Bacc_MCI f1_MCI sen_MCI spec_MCI ppv_MCI \\\n0 0.697574 70.370370 0.669967 0.573034 0.545455 0.794479 0.603550 \n1 0.652054 67.056530 0.630212 0.515759 0.481283 0.779141 0.555556 \n2 0.679210 67.641326 0.642794 0.538889 0.518717 0.766871 0.560694 \n3 0.658550 67.641326 0.626833 0.500000 0.443850 0.809816 0.572414 \n4 0.672238 64.912281 0.613341 0.500000 0.481283 0.745399 0.520231 \n5 0.711853 71.150097 0.639620 0.486111 0.374332 0.904908 0.693069 \n6 0.699747 64.522417 0.641055 0.562500 0.625668 0.656442 0.510917 \n7 0.667334 69.785575 0.634584 0.491803 0.401070 0.868098 0.635593 \n8 0.661592 63.547758 0.629966 0.549398 0.609626 0.650307 0.500000 \n9 0.673862 66.861598 0.628679 0.514286 0.481283 0.776074 0.552147 \n\n npv_MCI AUC_AD Acc_AD Bacc_AD f1_AD sen_AD spec_AD \\\n0 0.752907 0.917796 80.927835 0.840788 0.597826 0.887097 0.794479 \n1 0.723647 0.876262 79.123711 0.816990 0.566845 0.854839 0.779141 \n2 0.735294 0.886924 77.835052 0.802790 0.547368 0.838710 0.766871 \n3 0.717391 0.900950 81.958763 0.840392 0.606742 0.870968 0.809816 \n4 0.714706 0.908074 76.546392 0.808183 0.542714 0.870968 0.745399 \n5 0.716019 0.893083 88.917526 0.855680 0.699301 0.806452 0.904908 \n6 0.753521 0.899342 69.072165 0.763705 0.473684 0.870968 0.656442 \n7 0.716456 0.914556 85.567010 0.829210 0.636364 0.790323 0.868098 \n8 0.743860 0.886874 69.072165 0.776766 0.482759 0.903226 0.650307 \n9 0.722857 0.905897 79.381443 0.831585 0.578947 0.887097 0.776074 \n\n ppv_AD npv_AD \n0 0.450820 0.973684 \n1 0.424000 0.965779 \n2 0.406250 0.961538 \n3 0.465517 0.970588 \n4 0.394161 0.968127 \n5 0.617284 0.960912 \n6 0.325301 0.963964 \n7 0.532609 0.956081 \n8 0.329412 0.972477 \n9 0.429688 0.973077 \n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e740214d7b2be090d4d0f43b3ccbda90cd4979aa | 24,112 | ipynb | Jupyter Notebook | structural-probes/notebooks/Transfer Tense Separation.ipynb | ethanachi/structural-probes | 058dc4316a554813883eb9242d32ede56ceed571 | [
"Apache-2.0"
] | null | null | null | structural-probes/notebooks/Transfer Tense Separation.ipynb | ethanachi/structural-probes | 058dc4316a554813883eb9242d32ede56ceed571 | [
"Apache-2.0"
] | null | null | null | structural-probes/notebooks/Transfer Tense Separation.ipynb | ethanachi/structural-probes | 058dc4316a554813883eb9242d32ede56ceed571 | [
"Apache-2.0"
] | null | null | null | 162.918919 | 11,072 | 0.908469 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"representations = np.load('/u/scr/ethanchi/embeddings/fr-threeway/representations.npy')\nprojection_vector = np.load('/u/scr/ethanchi/embeddings/larger/representations-vector.npy')\n\nverbs = representations[:, 2]\n",
"_____no_output_____"
],
[
"dim = representations.shape[-1]\nprint(projection_vector.shape, verbs.shape)",
"(768,) (1176, 768)\n"
],
[
"plt.figure(figsize=(5, 5))\n\nplt.scatter(verbs[::3] @ projection_vector, np.zeros(verbs[::3].shape[0]), label=\"present\", s=0.1)\nplt.scatter(verbs[2::3] @ projection_vector, np.zeros(verbs[2::3].shape[0]), label=\"future\", s=0.1)\n\nplt.scatter(verbs[1::3] @ projection_vector, np.zeros(verbs[1::3].shape[0]), label=\"past\", s=0.1)\n\nplt.legend()\n\nplt.show()",
"_____no_output_____"
],
[
"plt.figure(figsize=(5, 5))\n\nplt.scatter(verbs[::2] @ projection_vector, np.zeros(verbs[::2].shape[0]), label=\"present\", s=0.01)\nplt.scatter(verbs[1::2] @ projection_vector, np.zeros(verbs[1::2].shape[0]), label=\"past\", s=0.01)\n\nplt.show()",
"_____no_output_____"
],
[
"print(np.max(verbs[1::2] @ projection_vector))",
"0.4734189\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7402ff9de877a664926f9563510b13bae8f5ada | 84,706 | ipynb | Jupyter Notebook | setbench/setbench/microbench_experiments/tutorial/tutorial.ipynb | cmuparlay/flock | afdcfe55cdd7507c2a19a6e0b30f3115e183cd58 | [
"MIT"
] | 19 | 2022-01-29T02:44:30.000Z | 2022-03-29T15:52:51.000Z | setbench/setbench/microbench_experiments/tutorial/tutorial.ipynb | cmuparlay/flock | afdcfe55cdd7507c2a19a6e0b30f3115e183cd58 | [
"MIT"
] | null | null | null | setbench/setbench/microbench_experiments/tutorial/tutorial.ipynb | cmuparlay/flock | afdcfe55cdd7507c2a19a6e0b30f3115e183cd58 | [
"MIT"
] | 1 | 2022-02-22T05:58:11.000Z | 2022-02-22T05:58:11.000Z | 46.262152 | 558 | 0.603098 | [
[
[
"# Data framework: the basic paradigm\n\nuser implements one function `define_experiment`\n\nthen runs `../../tools/data_framework/run_experiment.py`\n\nit runs potentially many experimental trials (over all defined configurations), captures output, builds a sqlite database, queries it, produces plots, and produces html pages to display plots...\n\nthe data framework also provides lots of tools to do querying, plot generation and analysis in jupyter notebooks (see `instructions_data.ipynb`).\n\nnone of this is specific to setbench! easy to apply to other code bases, as well. (data_framework is self contained--no dependencies on setbench.)\n\n### The following tutorial fully explains the derivation of several non-trivial `define_experiment()` functions.",
"_____no_output_____"
],
[
"# Run the following code cell before any others\n\nIt does basic initialization for this notebook.",
"_____no_output_____"
]
],
[
[
"import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nprint(\"Initialized.\")",
"_____no_output_____"
]
],
[
[
"# The 'hello world' of `run_experiment.sh`\n\ndefining a trivial experiment that compiles and runs a single command once and saves the output.\n\nwe do `run_in_jupyter` and pass `define_experiment`. could alternatively save `define_experiment` in a python file and run the equivalent `run_experiments.sh` command (described in comments)...",
"_____no_output_____"
]
],
[
[
"from _basic_functions import *\ndef define_experiment(exp_dict, args):\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench') ## working dir for compiling\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin') ## working dir for running\n set_cmd_compile (exp_dict, 'make brown_ext_abtree_lf.debra')\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./brown_ext_abtree_lf.debra -nwork 1 -nprefill 1 -insdel 5 5 -k 200000 -t 1000')\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='-cr')\n# if the define_experiment() function above were saved in a file myexp.py,\n# then the run_in_jupyter line above is equivalent to running shell command:\n# ../../tools/data_framework/run_experiment.py myexp.py -cr\n#\n# NOTE: -c causes COMPILATION to occur, and -r causes experiments to be RUN",
"_____no_output_____"
]
],
[
[
"# Try the same thing from the command line!\n\n- create a file called `myexp.py` in this directory.\n- start it with `from _basic_functions import *`\n- copy the `define_experiment` function above into `myexp.py`\n- run `../../tools/data_framework/run_experiment.py myexp.py -cr` in the shell (starting from this directory)\n\nif you get an error along the lines of:\n\n`NameError: name 'set_dir_compile' is not defined`\n\nthen you probably forgot to start the file with `from _basic_functions import *`, which is needed in any file where you define a `define_experiment` function for use with `run_experiment.py`.",
"_____no_output_____"
],
[
"# (Re)running results without compiling\n\nyou can rerun experiments without compiling by omitting `-c`",
"_____no_output_____"
]
],
[
[
"def define_experiment(exp_dict, args):\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench') ## working dir for compiling\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin') ## working dir for running\n set_cmd_compile (exp_dict, 'make brown_ext_abtree_lf.debra')\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./brown_ext_abtree_lf.debra -nwork 1 -nprefill 1 -insdel 5 5 -k 200000 -t 1000')\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='-r')\n# equiv cmd: [...]/run_experiment.py myexp.py -r",
"_____no_output_____"
]
],
[
[
"# Data files (captured stdout/err)\n\nevery time the data_framework runs your \"run command\" (provided by `set_cmd_run`), the output is automatically saved in a `data file`.\n\nthis is the output of that one run we executed.",
"_____no_output_____"
]
],
[
[
"print(shell_to_str('cat data/data000001.txt'))",
"_____no_output_____"
]
],
[
[
"# Running with varying `run param`eters\n\nof course running one command isn't very interesting... you could do that yourself.\n\ninstead, we want to run the command many times, with different arguments. to this end, we allow the user to specify `run param`s.\n\nthe idea is as follows:\n- call `add_run_param` to make the data framework aware of parameters that you want your experiments to be run with.\n- your program will be run once for each set of values in the CROSS PRODUCT of all parameters.\n- (i.e., we will run your program with every combination of parameters)\n\n### Replacement strings / tokens\n\nyou can use any of the run params you define to dynamically replace `{_tokens_like_this}` in the run command. for example, we include `{DS_TYPENAME}` in our run command, and it will be replaced by the current value of `{DS_TYPENAME}`. (that's right, we can run different commands based on the current value of `DS_TYPENAME`.)\n \nyou can also get the paths to key directories by using:\n- `{__dir_compile}`\n- `{__dir_run}`\n- `{__dir_data}`\n\nthe following replacement token is also defined for you:\n- `{__step}` the number of runs done so far, padded to six digits with leading zeros\n\n*note:* we now need to compile ALL of the binaries we want to *run*. so, we just change our make command to compile everything...\n\n",
"_____no_output_____"
]
],
[
[
"def define_experiment(exp_dict, args):\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')\n set_cmd_compile (exp_dict, 'make -j6') ## -j specifies how many threads to compile with\n\n add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])\n\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork 1 -nprefill 1 -insdel 5 5 -k 200000 -t 1000')\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='-cr')",
"_____no_output_____"
]
],
[
[
"# Extracting data fields from captured stdout/err\n\nNOW we're going to EXTRACT data automatically from the generated data file(s). To do this, we must include the argument `-d` which stands for `database creation`.\n\nnote 3 data files were produced this time: one for each value of `DS_TYPENAME`. let's put those data files to use by specifying that we want to *extract* some text from each data file.\n\nin particular, let's extract a line of the form \"`DS_TYPENAME=...`\" and a line of the form \"`total_throughput=...`\" from each data file. (you can find such lines in the data file above if you like.)\n\nextracted data is stored in a sqlite database `data/output_database.sqlite` in a table called `data`. (each field name passed to `add_data_field` becomes a **column** in `data`.)\n\nto specify a column to be extracted, we call `add_data_field()`. we do this for `total_throughput`, but note that we do *not* have to do this for `DS_TYPENAME`, as it was already added as a `run param`.\n\nwhenever you add a data field, you should choose a column type `coltype` from:\n- `'TEXT'`\n- `'INTEGER'`\n- `'REAL'`\n\nthe `default` if you do not specify is `'TEXT'`. note, however, that allowing the default `'TEXT'` option for a `numeric` field can cause problems when it is time to produce **graphs/plots**!",
"_____no_output_____"
]
],
[
[
"def define_experiment(exp_dict, args):\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')\n set_cmd_compile (exp_dict, 'make -j6')\n\n add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])\n\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork 1 -nprefill 1 -insdel 5 5 -k 200000 -t 1000')\n\n add_data_field (exp_dict, 'total_throughput', coltype='INTEGER')\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='-rd')",
"_____no_output_____"
]
],
[
[
"# Querying the database\n\nNote that we can simply **access** the last database we created, *WITHOUT rerunning* any experiments, by omitting all command line args in our `run_in_jupyter` call.\n\nAlso note that you can accomplish the same thing from the **command line** by running `../../tools/data_framework/run_experiment.py myexp.py` with `cmdline_args` omitted. However, since you can't pass your `define_experiments` function as a command line argument, you have to save it in a `.py` file and pass the name `myexp.py` of that file as the first argument to `run_experiment.py`.\n\nTo query the database, we can use function `select_to_dataframe(sql_string)` with a suitable `sql_string`. There are many other powerful functions included for querying and plotting data, but those are covered in `microbench_experiments/example/instructions_data.ipynb`. In **this** notebook we are focusing on the design of the `define_experiment` function.\n\n## Extra columns\n\nNote that the resulting query shows numerous extra columns such as `__hostname`, `__step` and `__cmd_run`, that we did *not* add ourselves. These are added *automatically* by the data framework.",
"_____no_output_____"
]
],
[
[
"import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='')\ndf = select_to_dataframe('select * from data')\ndf\n\n# run_in_jupyter call above has equivalent command:\n# [...]/run_experiment.py myexp.py\n",
"_____no_output_____"
]
],
[
[
"# Suppressing logging output in `run_in_jupyter`\n\nIf you want to call `run_in_jupyter` as above *without* seeing the `logging data` that was copied to stdout, you can disable the log output by calling `disable_tee_stdout()`. Note that logs will still be collected, but the output will **only** go to the log file `output_log.txt`.",
"_____no_output_____"
]
],
[
[
"import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\ndisable_tee_stdout()\nrun_in_jupyter(define_experiment, cmdline_args='')\ndf = select_to_dataframe('select * from data')\nenable_tee_stdout() ## remember to enable, or you won't get output where you DO expect it...\ndf\n",
"_____no_output_____"
]
],
[
[
"# Running multiple trials\n\nif you want to perform repeated trials of each experimental configuration, add a run_param called \"`__trials`\", and specify a list of trial numbers (as below).\n\n(the run_param doesn't *need* to be called `__trials` exactly, but if it is called `__trials` exactly,\nthen extra sanity checks will be performed to verify, for example, that each data point in a graphical plot\nrepresents the average of precisely as many experimental runs as there are entries in the `__trials` list.)\n",
"_____no_output_____"
]
],
[
[
"def define_experiment(exp_dict, args):\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')\n set_cmd_compile (exp_dict, 'make -j6')\n\n add_run_param (exp_dict, '__trials', [1, 2, 3])\n add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])\n\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork 1 -nprefill 1 -insdel 5 5 -k 200000 -t 1000')\n\n add_data_field (exp_dict, 'total_throughput', coltype='INTEGER')\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='-rd')",
"_____no_output_____"
]
],
[
[
"## Querying the data (to see the multiple trials)",
"_____no_output_____"
]
],
[
[
"select_to_dataframe('select * from data')",
"_____no_output_____"
]
],
[
[
"# Extractors: mining data from arbitrary text\n\nby default, when you call `add_data_field(exp_dict, 'XYZ')`, a field `'XYZ'` will be fetched from each data file using extractor `grep_line()`, which greps (searches) for a line of the form `'XYZ={arbitrary string}\\n'`\n\n*if a field you want to extract is not stored that way in the output data*, then you can specify a custom `extractor` function, as we do in our example with `get_maxres()` below, to extract the max resident size from the 6th space-separated column of the output of the linux \"time\" command.\n\nalso note: each field added with `add_data_field` becomes a replacement token (e.g., `{DS_TYPENAME}`) that can be references in any plot titles, axis titles, field lists, etc. (which we will see more on below).\n\nthe following special fields are also defined for you (and added to the `data` table):\n- `{__step}` the number of runs done so far, padded to six digits with leading zeros\n- `{__cmd_run}` your cmd_run string with any tokens replaced appropriately for this run\n- `{__file_data}` the output filename for the current run's data\n- `{__path_data}` the relative path to the output file for the current run's data\n- `{__hostname}` the result of running the hostname command on the machine\n- `{__id}` a unique row ID\n\nnote: in the following, `defaults` are `validator=is_nonempty` and `extractor=grep_line`.",
"_____no_output_____"
],
[
"## Text output we are *trying* to extract max resident size from\n\nA line of the form:\n\n`960.43user 50.70system 0:06.14elapsed 16449%CPU (0avgtext+0avgdata 3034764maxresident)k`\n\nFrom this, we would like to extract `3034764`, then convert from KB to MB...",
"_____no_output_____"
],
[
"## Extractor that accomplishes this\n\n`input`: an `extractor` function takes, as its arguments: the same `exp_dict` argument as `define_experiment()`, a `file_name` to load data from, and a `field_name` to extract.\n\n`processing`: it should fetch the appropriate contents for that field, from the given `file_name` and return them.\n\n`output`: return type can be a `string`, `int` or `float`.\n\n(in cases like this, where we're writing a custom `extractor` to fetch a specific field, the `field_name` argument ends up being irrelevant.)\n\nyou are free to read the contents of the file, and process the data you see however you like, to come up with the desired return value.\n\nin our case, we will use the `shell_to_str()` utility function provided by the data framework to run a sequence of `bash` shell commands to extract the desired string from the file, then cast it to a `float` and convert it from kilobytes to megabytes.\n\n## (you could just as easily do this with pure python code. the choice is yours.)",
"_____no_output_____"
]
],
[
[
"def get_maxres(exp_dict, file_name, field_name):\n ## manually parse the maximum resident size from the output of `time` and add it to the data file\n maxres_kb_str = shell_to_str('grep \"maxres\" {} | cut -d\" \" -f6 | cut -d\"m\" -f1'.format(file_name))\n return float(maxres_kb_str) / 1000",
"_____no_output_____"
]
],
[
[
"## **Using** this extractor in `define_experiment`\n\nwe actually use this extractor by adding a data field and specifying it:\n\n`add_data_field (exp_dict, 'maxresident_mb', extractor=get_maxres)`",
"_____no_output_____"
]
],
[
[
"def get_maxres(exp_dict, file_name, field_name):\n ## manually parse the maximum resident size from the output of `time` and add it to the data file\n maxres_kb_str = shell_to_str('grep \"maxres\" {} | cut -d\" \" -f6 | cut -d\"m\" -f1'.format(file_name))\n return float(maxres_kb_str) / 1000\n\ndef define_experiment(exp_dict, args):\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')\n set_cmd_compile (exp_dict, 'make -j6')\n\n add_run_param (exp_dict, '__trials', [1, 2, 3])\n add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])\n\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork 1 -nprefill 1 -insdel 5 5 -k 200000 -t 1000')\n\n add_data_field (exp_dict, 'total_throughput', coltype='INTEGER')\n add_data_field (exp_dict, 'maxresident_mb', coltype='REAL', extractor=get_maxres)\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='-rd')",
"_____no_output_____"
]
],
[
[
"## Viewing the resulting data\n\nnote the `maxresident_mb` column -- highlighted for emphasis using Pandas DataFrame `style.applymap()`.",
"_____no_output_____"
]
],
[
[
"df = select_to_dataframe('select * from data')\n\ndf.style.applymap(lambda s: 'background-color: #b63f3f', subset=pd.IndexSlice[:, ['maxresident_mb']])",
"_____no_output_____"
]
],
[
[
"# Validators: *checking* extracted data\n\nsuppose you want to run some basic *sanity checks* on fields you pull from data files.\n\na `validator` function is a great way of having the data framework perform a basic check on values as they are extracted from data files.\n\npre-existing `validator` functions:\n- `is_positive`\n- `is_nonempty`\n- `is_equal(to_value)`\n\nfor example, suppose we want to verify that `total_throughput` and `maxresident_mb` are both **positive** numbers. to do this, we specify `validator=is_positive` for each, below.\n\nnote: you can write your own `validator` by mimicking the ones in `../../tools/data_framework/_basic_functions.py`. (see `is_positive` and `is_equal`.)",
"_____no_output_____"
]
],
[
[
"def get_maxres(exp_dict, file_name, field_name):\n ## manually parse the maximum resident size from the output of `time` and add it to the data file\n maxres_kb_str = shell_to_str('grep \"maxres\" {} | cut -d\" \" -f6 | cut -d\"m\" -f1'.format(file_name))\n return float(maxres_kb_str) / 1000\n\ndef define_experiment(exp_dict, args):\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')\n set_cmd_compile (exp_dict, 'make -j6')\n\n add_run_param (exp_dict, '__trials', [1, 2, 3])\n add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])\n\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork 1 -nprefill 1 -insdel 5 5 -k 200000 -t 1000')\n\n add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)\n add_data_field (exp_dict, 'maxresident_mb', coltype='REAL', extractor=get_maxres, validator=is_positive)\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='-rd')",
"_____no_output_____"
]
],
[
[
"# What happens when a field *fails* validation?\n\nwe trigger a validation failure by specifying an obviously incorrect validator `is_equal('hello')`",
"_____no_output_____"
]
],
[
[
"def get_maxres(exp_dict, file_name, field_name):\n ## manually parse the maximum resident size from the output of `time` and add it to the data file\n maxres_kb_str = shell_to_str('grep \"maxres\" {} | cut -d\" \" -f6 | cut -d\"m\" -f1'.format(file_name))\n return float(maxres_kb_str) / 1000\n\ndef define_experiment(exp_dict, args):\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')\n set_cmd_compile (exp_dict, 'make -j6')\n\n add_run_param (exp_dict, '__trials', [1, 2, 3])\n add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])\n\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork 1 -nprefill 1 -insdel 5 5 -k 200000 -t 1000')\n\n add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_equal('hello'))\n add_data_field (exp_dict, 'maxresident_mb', coltype='REAL', extractor=get_maxres, validator=is_positive)\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='-rd', error_exit_code=0)",
"_____no_output_____"
]
],
[
[
"# Plotting results (for data with <ins>3 dimensions</ins>)\n\nOne of the main reasons I created the data framework was to make it stupid-easy to produce lots of graphs/plots.\n\nThe main tool for doing this is the `add_plot_set` function.\n\n`add_plot_set()` can be used to cause a SET of plots to be rendered as images in the data directory.\n\nthe precise SET of plots is defined by the fields included in `varying_cols_list` keyword argument.\n (the data framework will iterate over all distinct combinations of values in `varying_cols_list`,\n and will render a plot for each.)\n in the example below, we do *not* pass any `varying_cols_list` argument, so only a single plot is produced.\n\n(we will see where `varying_cols_list` is useful, and how it is used, in some of the later examples...)\n\nNote: a plot's title and filename can only use replacement `{tokens}` that correspond\n to fields THAT ARE INCLUDED in `varying_cols_list[]`.\n (this is because only those tokens are well defined and unique PER PLOT)\n\n### Note: any plots you define are *not actually rendered* unless you add command line argument `-p`",
"_____no_output_____"
]
],
[
[
"def define_experiment(exp_dict, args):\n set_dir_tools (exp_dict, os.getcwd() + '/../../tools') ## tools library for plotting\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')\n set_cmd_compile (exp_dict, 'make -j6')\n\n add_run_param (exp_dict, '__trials', [1, 2])\n add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 4, 8])\n add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])\n\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel 5 5 -k 200000 -t 1000')\n\n add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)\n\n add_plot_set(\n exp_dict\n , name='throughput.png'\n , title='Throughput vs data structure'\n , series='DS_TYPENAME'\n , x_axis='TOTAL_THREADS'\n , y_axis='total_throughput'\n , plot_type='bars', plot_cmd_args = '--legend-include'\n )\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='-rdp')",
"_____no_output_____"
]
],
[
[
"## Let's view the data and plot produced by the previous cell\n\n(You have to run the previous cell before running the next one.)",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\ndisplay(Image('data/throughput.png'))\ndisplay(select_to_dataframe('select * from data'))",
"_____no_output_____"
]
],
[
[
"# Plotting data with a custom function\n\nIf you want full control over how your data is plotted, you can specify your own function as the `plot_type` argument.\n\nYour custom function will be called with keyword arguments:\n- `filename` -- the output filename for the plot image\n- `column_filters` -- the *current* values of all fields in `varying_cols_list` (if any)\n- `data` -- a Pandas DataFrame containing the (filtered) data for this plot\n- `series_name` -- name of the column containing `series` in `data` (`''` if no series)\n- `x_name` -- name of the column containing `x-values` in `data`\n- `y_name` -- name of the column containing `y-values` in `data`\n- `exp_dict` -- same as `exp_dict` passed to `define_experiment`\n\nTo *better understand* what data is passed to a custom function, let's create a custom function that just prints its arguments.",
"_____no_output_____"
]
],
[
[
"def my_plot_func(filename, column_filters, data, series_name, x_name, y_name, exp_dict=None):\n print('## filename: {}'.format(filename))\n print('## filters: {}'.format(column_filters))\n print('## data:')\n print(data)\n\ndef define_experiment(exp_dict, args):\n set_dir_tools (exp_dict, os.getcwd() + '/../../tools')\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')\n set_cmd_compile (exp_dict, 'make -j6')\n\n add_run_param (exp_dict, '__trials', [1, 2])\n add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 4, 8])\n add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])\n\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel 5 5 -k 200000 -t 1000')\n\n add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)\n\n add_plot_set(\n exp_dict\n , name='throughput.png'\n , title='Throughput vs data structure'\n , series='DS_TYPENAME'\n , x_axis='TOTAL_THREADS'\n , y_axis='total_throughput'\n , plot_type=my_plot_func\n )\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\n\ndisable_tee_stdout() ## disable regular log printing so we ONLY see OUR prints below\nrun_in_jupyter(define_experiment, cmdline_args='-dp')\nenable_tee_stdout()",
"_____no_output_____"
]
],
[
[
"# For example, we can plot this data *manually* using `Pandas`\n\nSince we have `TWO trials` per combination of `DS_TYPENAME` and `TOTAL_THREADS`, we need to aggregate our data somehow before plotting. We can use `pandas` `pivot_table()` function to compute the `mean` of the trials for each data point.\n\nOnce we have a pivot table, we can call `pandas` `plot()` to render it, then use `savefig()` to save it to the provided `filename`.\n\nOf course, you can write your own such functions, and make them arbitrarily complex/customized...",
"_____no_output_____"
]
],
[
[
"import pandas\nimport matplotlib as mpl\n\ndef my_plot_func(filename, column_filters, data, series_name, x_name, y_name, exp_dict=None):\n table = pandas.pivot_table(data, index=x_name, columns=series_name, values=y_name, aggfunc='mean')\n table.plot(kind='line')\n mpl.pyplot.savefig(filename)\n print('## SAVED FIGURE {}'.format(filename))\n\ndef define_experiment(exp_dict, args):\n set_dir_tools (exp_dict, os.getcwd() + '/../../tools')\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')\n set_cmd_compile (exp_dict, 'make -j6')\n\n add_run_param (exp_dict, '__trials', [1, 2])\n add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 4, 8])\n add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])\n\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel 5 5 -k 200000 -t 1000')\n\n add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)\n\n add_plot_set(\n exp_dict\n , name='throughput.png'\n , title='Throughput vs data structure'\n , series='DS_TYPENAME'\n , x_axis='TOTAL_THREADS'\n , y_axis='total_throughput'\n , plot_type=my_plot_func\n )\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\ndisable_tee_stdout()\nrun_in_jupyter(define_experiment, cmdline_args='-dp')\nenable_tee_stdout()",
"_____no_output_____"
]
],
[
[
"## Viewing the generated figure",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\ndisplay(Image('data/throughput.png'))",
"_____no_output_____"
]
],
[
[
"# Producing *many* plots (for data with <ins>5 dimensions</ins>)\n\nthe real power of `add_plot_set` only starts to show once you want to plot *many* plots at once.\n\nso, let's add a couple of dimensions to our data:\n- key range (`MAXKEY` in the data file)\n- update rate (`INS_DEL_FRAC` in the data file)\n\nand use them to produce **multiple plots** (one for each combination of values of these dimensions). we do this by specifying `varying_cols_list` in `add_plot_set`.\n\nwe can also customize the plot file`name`s and `title`s with these parameters.\n\n# Showing these plots in a table in an HTML page\n\nwe also generate an HTML page to show off these grids in a table by invoking `add_page_set`.\n\nHTML page construction only occurs if you specify command line argument `-w` (which stands for `website creation`) to `run_experiment.py`. so, we add this to `run_in_jupyter`.\n\nnote: you can also customize the `index.html` starting page (which is blank by default) by providing your own `HTML body` string to the function `set_content_index_html(exp_dict, content_html_string)`.",
"_____no_output_____"
]
],
[
[
"def define_experiment(exp_dict, args):\n set_dir_tools (exp_dict, os.getcwd() + '/../../tools') ## path to tools library\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')\n set_cmd_compile (exp_dict, 'make bin_dir={__dir_run} -j6')\n\n add_run_param (exp_dict, '__trials', [1, 2])\n add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 4, 8])\n add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])\n add_run_param (exp_dict, 'MAXKEY', [20000, 200000])\n add_run_param (exp_dict, 'INS_DEL_FRAC', [\"0.0 0.0\", \"5.0 5.0\"])\n\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel {INS_DEL_FRAC} -k {MAXKEY} -t 1000')\n\n add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)\n\n add_plot_set(\n exp_dict\n , name='throughput-{INS_DEL_FRAC}-{MAXKEY}k.png'\n , title='{INS_DEL_FRAC} {MAXKEY}k: throughput'\n , varying_cols_list=['MAXKEY', 'INS_DEL_FRAC']\n , series='DS_TYPENAME'\n , x_axis='TOTAL_THREADS'\n , y_axis='total_throughput'\n , plot_type='bars'\n )\n\n ## render one legend for all plots (since the legend is the same for all).\n ## if legend varies from plot to plot, you might enable legends for all plots,\n ## or write a custom plotting command that determines what to do, given your data\n add_plot_set(exp_dict, name='throughput-legend.png', series='DS_TYPENAME', x_axis='TOTAL_THREADS', y_axis='total_throughput', plot_type='bars', plot_cmd_args='--legend-only --legend-columns 3')\n\n ## we place the above legend at the bottom of *each* table by providing \"legend_file\"\n add_page_set(\n exp_dict\n , image_files='throughput-{INS_DEL_FRAC}-{MAXKEY}k.png'\n , name='throughput'\n , column_field='INS_DEL_FRAC'\n , row_field='MAXKEY'\n , legend_file='throughput-legend.png'\n )\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='-rdpw')",
"_____no_output_____"
]
],
[
[
"## Let's view the plots produced by the previous cell\n\nnote you can click on the plots to \"drill down\" into the data.",
"_____no_output_____"
]
],
[
[
"show_html('data/throughput.html')",
"_____no_output_____"
]
],
[
[
"# How about 4 dimensions?\n\nWe just saw how to plot 3- and 5-dimensional data...\n\nLet's remove the `MAXKEY` column / data dimension to reduce the dimensionality of the data to 4.\n\nWith only one column in the `varying_cols_list` and NO `row_field` specified in `add_page_set`, there will only be one row of plots. (So a strip of plots instead of a grid.)",
"_____no_output_____"
]
],
[
[
"def define_experiment(exp_dict, args):\n set_dir_tools (exp_dict, os.getcwd() + '/../../tools') ## path to tools library\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')\n set_cmd_compile (exp_dict, 'make bin_dir={__dir_run} -j6')\n\n add_run_param (exp_dict, '__trials', [1, 2])\n add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 4, 8])\n add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])\n add_run_param (exp_dict, 'INS_DEL_FRAC', [\"0.0 0.0\", \"5.0 5.0\"])\n\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel {INS_DEL_FRAC} -k 200000 -t 1000')\n\n add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)\n\n add_plot_set(\n exp_dict\n , name='throughput-{INS_DEL_FRAC}.png'\n , title='{INS_DEL_FRAC}: throughput'\n , varying_cols_list=['INS_DEL_FRAC']\n , series='DS_TYPENAME'\n , x_axis='TOTAL_THREADS'\n , y_axis='total_throughput'\n , plot_type='bars'\n )\n\n ## render one legend for all plots (since the legend is the same for all).\n ## if legend varies from plot to plot, you might enable legends for all plots,\n ## or write a custom plotting command that determines what to do, given your data\n add_plot_set(exp_dict, name='throughput-legend.png', series='DS_TYPENAME', x_axis='TOTAL_THREADS', y_axis='total_throughput', plot_type='bars', plot_cmd_args='--legend-only --legend-columns 3')\n\n ## we place the above legend at the bottom of *each* table by providing \"legend_file\"\n add_page_set(\n exp_dict\n , image_files='throughput-{INS_DEL_FRAC}.png'\n , name='throughput'\n , column_field='INS_DEL_FRAC'\n , legend_file='throughput-legend.png'\n )\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='-rdpw')",
"_____no_output_____"
]
],
[
[
"## Let's view the plots produced by the previous cell",
"_____no_output_____"
]
],
[
[
"show_html('data/throughput.html')",
"_____no_output_____"
]
],
[
[
"# Plots and HTML for data with <ins>6 dimensions</ins>\n\nnote that we could have added more than 2 dimensions of data (resulting in data with 6+ dimensions), listing potentially many fields in `varying_cols_list`, and this simply would have resulted in *more plots*.\n\nnote that if we had **one** more dimension of data (6 dimensions in total), it could be listed in the keyword argument `table_field`, and **multiple** HTML tables would be rendered in a single HTML page (one for each value of this column).",
"_____no_output_____"
]
],
[
[
"def define_experiment(exp_dict, args):\n set_dir_tools (exp_dict, os.getcwd() + '/../../tools') ## path to tools library\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')\n set_cmd_compile (exp_dict, 'make bin_dir={__dir_run} -j6')\n\n add_run_param (exp_dict, '__trials', [1])\n add_run_param (exp_dict, 'TOTAL_THREADS', [2, 4, 8])\n add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_abtree_lf', 'bronson_pext_bst_occ'])\n add_run_param (exp_dict, 'MAXKEY', [20000, 200000])\n add_run_param (exp_dict, 'INS_DEL_FRAC', ['0.0 0.0', '5.0 5.0'])\n ## unlike the above four fields,\n ## the run command does NOT produce a line of the form 'malloc=[...]'.\n ## so, run_experiment.py will APPEND a line of this form to the datafile!\n add_run_param (exp_dict, 'malloc', ['jemalloc', 'mimalloc'])\n\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/lib{malloc}.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel {INS_DEL_FRAC} -k {MAXKEY} -t 1000')\n\n add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)\n add_data_field (exp_dict, 'malloc', validator=is_run_param('malloc'))\n\n add_plot_set(\n exp_dict\n , name='throughput-{malloc}-{INS_DEL_FRAC}-{MAXKEY}.png'\n , title='{malloc} {INS_DEL_FRAC} {MAXKEY}'\n , varying_cols_list=['malloc', 'MAXKEY', 'INS_DEL_FRAC']\n , series='DS_TYPENAME'\n , x_axis='TOTAL_THREADS'\n , y_axis='total_throughput'\n , plot_type='bars'\n )\n\n ## render one legend for all plots (since the legend is the same for all).\n ## if legend varies from plot to plot, you might enable legends for all plots,\n ## or write a custom plotting command that determines what to do, given your data\n add_plot_set(exp_dict, name='throughput-legend.png', series='DS_TYPENAME', x_axis='TOTAL_THREADS', y_axis='total_throughput', plot_type='bars', plot_cmd_args='--legend-only --legend-columns 3')\n\n ## note: choice of column / row / table field determines how the HTML page looks -- up to you!\n add_page_set(\n exp_dict\n , image_files='throughput-{malloc}-{INS_DEL_FRAC}-{MAXKEY}.png'\n , name='throughput'\n , column_field='INS_DEL_FRAC'\n , row_field='MAXKEY'\n , table_field='malloc'\n , legend_file='throughput-legend.png'\n )\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='-rdpw')",
"_____no_output_____"
]
],
[
[
"## Let's view the data, plots and HTML we produced",
"_____no_output_____"
]
],
[
[
"show_html('data/throughput.html')\ndisplay(select_to_dataframe('select * from data'))",
"_____no_output_____"
]
],
[
[
"# Plots and HTML for data with <ins>7+ dimensions</ins>\n\nif we had MORE than one extra dimension of data (7+ dimensions in total), we could list additional fields in the keyword argument `page_field_list`, which would cause additional HTML pages to be rendered (one for each combination of values for fields in `page_field_list`), and linked together by an `index.htm`. (note that the `name` keyword argument of `page_field_list` must also be modified to reference these fields, in order for multiple HTML files to be created---you must specify what sort of naming convention you'd like the framework to use.)",
"_____no_output_____"
]
],
[
[
"def define_experiment(exp_dict, args):\n set_dir_tools (exp_dict, os.getcwd() + '/../../tools') ## path to tools library\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')\n set_cmd_compile (exp_dict, 'make bin_dir={__dir_run} -j6')\n\n add_run_param (exp_dict, '__trials', [1])\n add_run_param (exp_dict, 'TOTAL_THREADS', [2, 8])\n add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_abtree_lf', 'bronson_pext_bst_occ'])\n add_run_param (exp_dict, 'MAXKEY', [20000, 200000])\n add_run_param (exp_dict, 'INS_DEL_FRAC', ['0.0 0.0', '5.0 5.0'])\n ## unlike the above four fields,\n ## the run command does NOT produce a line of the form 'malloc=[...]'.\n ## so, run_experiment.py will APPEND a line of this form to the datafile!\n add_run_param (exp_dict, 'malloc', ['jemalloc', 'mimalloc'])\n ## ditto for reclaimer\n add_run_param (exp_dict, 'numactl', ['', 'numactl --interleave=all'])\n\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/lib{malloc}.so {numactl} time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel {INS_DEL_FRAC} -k {MAXKEY} -t 1000')\n\n add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)\n\n add_plot_set(\n exp_dict\n , name='throughput-{malloc}-{numactl}-{INS_DEL_FRAC}-{MAXKEY}.png'\n , title='{INS_DEL_FRAC} {MAXKEY}'\n , varying_cols_list=['malloc', 'numactl', 'MAXKEY', 'INS_DEL_FRAC']\n , series='DS_TYPENAME'\n , x_axis='TOTAL_THREADS'\n , y_axis='total_throughput'\n , plot_type='bars'\n )\n\n ## render one legend for all plots (since the legend is the same for all).\n ## if legend varies from plot to plot, you might enable legends for all plots,\n ## or write a custom plotting command that determines what to do, given your data\n add_plot_set(exp_dict, name='throughput-legend.png', series='DS_TYPENAME', x_axis='TOTAL_THREADS', y_axis='total_throughput', plot_type='bars', plot_cmd_args='--legend-only --legend-columns 3')\n\n ## we place the above legend at the bottom of *each* table by providing \"legend_file\"\n add_page_set(\n exp_dict\n , image_files='throughput-{malloc}-{numactl}-{INS_DEL_FRAC}-{MAXKEY}.png'\n , name='throughput'\n , column_field='numactl'\n , row_field='malloc'\n , table_field='MAXKEY'\n , page_field_list=['INS_DEL_FRAC']\n , legend_file='throughput-legend.png'\n )\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='-rdpw')",
"_____no_output_____"
]
],
[
[
"## Let's view the data, plots and HTML we produced",
"_____no_output_____"
]
],
[
[
"show_html('data/index.html')\ndisplay(select_to_dataframe('select * from data'))",
"_____no_output_____"
]
],
[
[
"# It's easy to plot *many* value fields vs your `run_params`\n\nLet's go back to our 5-dimensional data example to demonstrate how to easily produce plots from *many different value fields* (not just `total_throughput`).\n\n### First let's run a quick shell command to check what kinds of fields exist in our data\n\n(This command uses `grep` with a simple `regex` to look for lines of the form \"XYZ=*number*\")",
"_____no_output_____"
]
],
[
[
"shell_to_list('grep -E \"^[^ =]+=[0-9.]+$\" data/data000001.txt', sep='\\n')",
"_____no_output_____"
]
],
[
[
"## Let's focus on the following fields from that list:\n\n- `tree_stats_numNodes`\n- `tree_stats_height`\n- `tree_stats_avgKeyDepth`\n- `global_epoch_counter`\n- `PAPI_L2_TCM`\n- `PAPI_L3_TCM`\n- `PAPI_TOT_CYC`\n- `PAPI_TOT_INS`\n- `total_throughput`",
"_____no_output_____"
]
],
[
[
"def define_experiment(exp_dict, args):\n set_dir_tools (exp_dict, os.getcwd() + '/../../tools')\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')\n set_cmd_compile (exp_dict, 'make -j6')\n\n add_run_param (exp_dict, '__trials', [1, 2])\n add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 4, 8])\n add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])\n add_run_param (exp_dict, 'MAXKEY', [20000, 200000])\n add_run_param (exp_dict, 'INS_DEL_FRAC', [\"0.0 0.0\", \"5.0 5.0\"])\n\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel {INS_DEL_FRAC} -k {MAXKEY} -t 1000')\n\n add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)\n add_data_field (exp_dict, 'tree_stats_numNodes', coltype='INTEGER')\n add_data_field (exp_dict, 'tree_stats_height', coltype='INTEGER')\n add_data_field (exp_dict, 'tree_stats_avgKeyDepth', coltype='REAL')\n add_data_field (exp_dict, 'global_epoch_counter', coltype='INTEGER')\n add_data_field (exp_dict, 'PAPI_L2_TCM', coltype='REAL')\n add_data_field (exp_dict, 'PAPI_L3_TCM', coltype='REAL')\n add_data_field (exp_dict, 'PAPI_TOT_CYC', coltype='REAL')\n add_data_field (exp_dict, 'PAPI_TOT_INS', coltype='REAL')\n\n ## render one legend for all plots (since the legend is the same for all).\n ## if legend varies from plot to plot, you might enable legends for all plots,\n ## or write a custom plotting command that determines what to do, given your data\n add_plot_set(exp_dict, name='legend.png', series='DS_TYPENAME', x_axis='TOTAL_THREADS', y_axis='total_throughput', plot_type='bars', plot_cmd_args='--legend-only --legend-columns 3')\n\n ## render a plot_set for EVERY numeric data field extracted above\n for field in get_numeric_data_fields(exp_dict):\n add_plot_set(\n exp_dict\n , name=field+'-{INS_DEL_FRAC}-{MAXKEY}k.png'\n , title='{INS_DEL_FRAC} {MAXKEY}k: '+field\n , varying_cols_list=['MAXKEY', 'INS_DEL_FRAC']\n , series='DS_TYPENAME'\n , x_axis='TOTAL_THREADS'\n , y_axis=field\n , plot_type='bars'\n )\n\n ## and also add a page_set for each data field.\n ## we place the above legend at the bottom of *each* table by providing \"legend_file\"\n add_page_set(\n exp_dict\n , image_files=field+'-{INS_DEL_FRAC}-{MAXKEY}k.png'\n , name=field\n , column_field='INS_DEL_FRAC'\n , row_field='MAXKEY'\n , legend_file='legend.png'\n )\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='-rdpw')",
"_____no_output_____"
]
],
[
[
"## Viewing the results",
"_____no_output_____"
]
],
[
[
"import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nshow_html('data/index.html')",
"_____no_output_____"
]
],
[
[
"# Rendering *many data fields* on a *single* HTML page\n\nin the previous example, we build one page for each data field extracted. however, you might want, for example, to build a single page with many data fields, each appearing as a *row* of plots.\n\nif you take a moment to think about *how* you would accomplish this using `add_page_set`, it's not obvious that you even *can*... you can specify *one field* as the `row_field`, but in this case we want to show *many different fields, one per row*.",
"_____no_output_____"
]
],
[
[
"def define_experiment(exp_dict, args):\n set_dir_tools (exp_dict, os.getcwd() + '/../../tools')\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')\n set_cmd_compile (exp_dict, 'make -j6')\n\n add_run_param (exp_dict, '__trials', [1, 2])\n add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 4, 8])\n add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])\n add_run_param (exp_dict, 'MAXKEY', [20000, 200000])\n add_run_param (exp_dict, 'INS_DEL_FRAC', [\"0.0 0.0\", \"5.0 5.0\"])\n\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel {INS_DEL_FRAC} -k {MAXKEY} -t 1000')\n\n add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)\n add_data_field (exp_dict, 'tree_stats_numNodes', coltype='INTEGER')\n add_data_field (exp_dict, 'tree_stats_height', coltype='INTEGER')\n add_data_field (exp_dict, 'tree_stats_avgKeyDepth', coltype='REAL')\n add_data_field (exp_dict, 'global_epoch_counter', coltype='INTEGER')\n add_data_field (exp_dict, 'PAPI_L2_TCM', coltype='REAL')\n add_data_field (exp_dict, 'PAPI_L3_TCM', coltype='REAL')\n add_data_field (exp_dict, 'PAPI_TOT_CYC', coltype='REAL')\n add_data_field (exp_dict, 'PAPI_TOT_INS', coltype='REAL')\n\n ## render one legend for all plots\n add_plot_set(exp_dict, name='legend.png', series='DS_TYPENAME', x_axis='TOTAL_THREADS', y_axis='total_throughput', plot_type='bars', plot_cmd_args='--legend-only --legend-columns 3')\n\n ## render plots\n value_fields = get_numeric_data_fields(exp_dict)\n for field in value_fields:\n add_plot_set(\n exp_dict\n , name=field+'-{INS_DEL_FRAC}-{MAXKEY}k.png'\n , title='{INS_DEL_FRAC} {MAXKEY}k: '+field\n , varying_cols_list=['MAXKEY', 'INS_DEL_FRAC']\n , series='DS_TYPENAME'\n , x_axis='TOTAL_THREADS'\n , y_axis=field\n , plot_type='bars'\n )\n\n ## and also add a page_set to show all plots\n add_page_set(\n exp_dict\n , image_files='{row_field}-{INS_DEL_FRAC}-{MAXKEY}k.png'\n , name='comparison'\n , column_field='INS_DEL_FRAC'\n , row_field=value_fields\n , table_field='MAXKEY'\n , legend_file='legend.png'\n )\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='-dpw')",
"_____no_output_____"
]
],
[
[
"## Viewing the results",
"_____no_output_____"
]
],
[
[
"import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nshow_html('data/index.html')",
"_____no_output_____"
]
],
[
[
"# Separating `tables` into different `pages`\n\nif you prefer, you can eliminate the `table_field` argument to `add_page_set` and instead use `page_field_list`. this produces a slightly different effect.",
"_____no_output_____"
]
],
[
[
"def define_experiment(exp_dict, args):\n set_dir_tools (exp_dict, os.getcwd() + '/../../tools')\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')\n set_cmd_compile (exp_dict, 'make -j6')\n\n add_run_param (exp_dict, '__trials', [1, 2])\n add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 4, 8])\n add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])\n add_run_param (exp_dict, 'MAXKEY', [20000, 200000])\n add_run_param (exp_dict, 'INS_DEL_FRAC', [\"0.0 0.0\", \"5.0 5.0\"])\n\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel {INS_DEL_FRAC} -k {MAXKEY} -t 1000')\n\n add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)\n add_data_field (exp_dict, 'tree_stats_numNodes', coltype='INTEGER')\n add_data_field (exp_dict, 'tree_stats_height', coltype='INTEGER')\n add_data_field (exp_dict, 'tree_stats_avgKeyDepth', coltype='REAL')\n add_data_field (exp_dict, 'global_epoch_counter', coltype='INTEGER')\n add_data_field (exp_dict, 'PAPI_L2_TCM', coltype='REAL')\n add_data_field (exp_dict, 'PAPI_L3_TCM', coltype='REAL')\n add_data_field (exp_dict, 'PAPI_TOT_CYC', coltype='REAL')\n add_data_field (exp_dict, 'PAPI_TOT_INS', coltype='REAL')\n\n ## render one legend for all plots\n add_plot_set(exp_dict, name='legend.png', series='DS_TYPENAME', x_axis='TOTAL_THREADS', y_axis='total_throughput', plot_type='bars', plot_cmd_args='--legend-only --legend-columns 3')\n\n ## render plots\n value_fields = get_numeric_data_fields(exp_dict)\n for field in value_fields:\n add_plot_set(\n exp_dict\n , name=field+'-{INS_DEL_FRAC}-{MAXKEY}k.png'\n , title='{INS_DEL_FRAC} {MAXKEY}k: '+field\n , varying_cols_list=['MAXKEY', 'INS_DEL_FRAC']\n , series='DS_TYPENAME'\n , x_axis='TOTAL_THREADS'\n , y_axis=field\n , plot_type='bars'\n )\n\n ## and also add a page_set to show all plots\n add_page_set(\n exp_dict\n , image_files='{row_field}-{INS_DEL_FRAC}-{MAXKEY}k.png'\n , name='comparison'\n , column_field='INS_DEL_FRAC'\n , row_field=value_fields\n , page_field_list=['MAXKEY']\n , legend_file='legend.png'\n )\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='-dpw')",
"_____no_output_____"
]
],
[
[
"## Viewing the results",
"_____no_output_____"
]
],
[
[
"import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nshow_html('data/index.html')",
"_____no_output_____"
]
],
[
[
"# Defining a `--testing` mode\n## Briefly running each configuration *BEFORE* doing a full run\n\ni often find it useful to have a `testing` mode (enabled with argument `--testing`), that runs for less time, but still explores all (important) configurations of run parameters, to make sure nothing simple will fail when i run for many hours. (fail-fast is good!)\n\nto this end, a variable called `args.testing` is accessible in `define_experiment`, and if it's `True`, then the user has passed `--testing` as a command line arg.\n\nthe correct response to this is to limit the set of configurations somehow, perhaps be reducing the number of thread counts, and/or the reducing length of time to execute in each trial, and/or limiting runs to a single trial, and/or eliminating data structure prefilling (or anything else that you find appropriate).\n\nfor example, let's add a simple `--testing` mode to the previous code cell.\n\nnote the `if args.testing:` block, as well as the `--testing` argument passed to `run_in_jupyter` *instead of* the previous `` argument. (we also eliminate the `-r` argument, since we want to actually run our testing mode.)\n\nobserve that this new `--testing` mode takes around 20 seconds to run, compared to several minutes without specifying `--testing`. (this time difference becomes much more drastic if you would normally run more trials, thread counts, or for longer than 1 second. :)) \n\ni make it a habit to run in `--testing` mode and take a quick peek at the results before running my full experiments.",
"_____no_output_____"
]
],
[
[
"def define_experiment(exp_dict, args):\n set_dir_tools (exp_dict, os.getcwd() + '/../../tools')\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')\n set_cmd_compile (exp_dict, 'make -j6')\n\n add_run_param (exp_dict, '__trials', [1, 2])\n add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 4, 8])\n add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])\n add_run_param (exp_dict, 'MAXKEY', [20000, 200000])\n add_run_param (exp_dict, 'INS_DEL_FRAC', [\"0.0 0.0\", \"5.0 5.0\"])\n\n millis_to_run = 1000\n\n ## defined a reduced set of configurations for testing mode\n if args.testing:\n add_run_param (exp_dict, '__trials', [1])\n add_run_param (exp_dict, 'TOTAL_THREADS', [1, 8])\n millis_to_run = 100\n\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel {INS_DEL_FRAC} -k {MAXKEY} -t ' + str(millis_to_run))\n\n add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)\n add_data_field (exp_dict, 'tree_stats_numNodes', coltype='INTEGER')\n add_data_field (exp_dict, 'tree_stats_height', coltype='INTEGER')\n add_data_field (exp_dict, 'tree_stats_avgKeyDepth', coltype='REAL')\n add_data_field (exp_dict, 'global_epoch_counter', coltype='INTEGER')\n add_data_field (exp_dict, 'PAPI_L2_TCM', coltype='REAL')\n add_data_field (exp_dict, 'PAPI_L3_TCM', coltype='REAL')\n add_data_field (exp_dict, 'PAPI_TOT_CYC', coltype='REAL')\n add_data_field (exp_dict, 'PAPI_TOT_INS', coltype='REAL')\n\n ## render one legend for all plots\n add_plot_set(exp_dict, name='legend.png', series='DS_TYPENAME', x_axis='TOTAL_THREADS', y_axis='total_throughput', plot_type='bars', plot_cmd_args='--legend-only --legend-columns 3')\n\n ## render plots\n value_fields = get_numeric_data_fields(exp_dict)\n for field in value_fields:\n add_plot_set(\n exp_dict\n , name=field+'-{INS_DEL_FRAC}-{MAXKEY}k.png'\n , title='{INS_DEL_FRAC} {MAXKEY}k: '+field\n , varying_cols_list=['MAXKEY', 'INS_DEL_FRAC']\n , series='DS_TYPENAME'\n , x_axis='TOTAL_THREADS'\n , y_axis=field\n , plot_type='bars'\n )\n\n ## and also add a page_set to show all plots\n add_page_set(\n exp_dict\n , image_files='{row_field}-{INS_DEL_FRAC}-{MAXKEY}k.png'\n , name='comparison'\n , column_field='INS_DEL_FRAC'\n , row_field=value_fields\n , page_field_list=['MAXKEY']\n , legend_file='legend.png'\n )\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='--testing -rdpw')",
"_____no_output_____"
]
],
[
[
"## Viewing the `--testing` mode results",
"_____no_output_____"
]
],
[
[
"import sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nshow_html('data/index.html')",
"_____no_output_____"
]
],
[
[
"# Custom output filename patterns\n\nin the experiments above, we have always used the default filename for output files: `dataXXXXXX.txt`.\n\nif you want a different file naming scheme, it's easy to specify a pattern for this using `set_file_data(exp_dict, pattern)`.\n\nlet's see an example of this, where we include the current values of several `run_param`s in the outfile file pattern.\n\n(you can also set the output directory with `set_dir_data(exp_dict, path)`.)",
"_____no_output_____"
]
],
[
[
"def define_experiment(exp_dict, args):\n set_dir_tools (exp_dict, os.getcwd() + '/../../tools')\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')\n set_cmd_compile (exp_dict, 'make -j6')\n\n add_run_param (exp_dict, '__trials', [1, 2])\n add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 8])\n add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])\n add_run_param (exp_dict, 'MAXKEY', [20000, 200000])\n add_run_param (exp_dict, 'INS_DEL_FRAC', [\"0.0 0.0\", \"5.0 5.0\"])\n\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel {INS_DEL_FRAC} -k {MAXKEY} -t 1000')\n\n set_file_data (exp_dict, 'my_data_n{TOTAL_THREADS}_k{MAXKEY}_insdel{INS_DEL_FRAC}_{DS_TYPENAME}.txt')\n\n add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='--testing -rdpw')",
"_____no_output_____"
]
],
[
[
"# Automatic best-effort sanity checks\n\nthe data framework does its best to identify some basic mistakes that are common when running repeated experiments over a large configuration space. we describe some of them here, and show how they work.\n\nfor example, observe that the following `define_experiment` function attempts to plot `TOTAL_THREADS` on the x-axis, `total_throughput` on the y-axis, with `DS_TYPENAME` as the series, but completely ignores `MAXKEY` in the `add_plot_set` call.\n\nthis is a mistake, as this would result in `averaging` unrelated data points with two *different* values of `MAXKEY`.\n\nrun the following code cell to see the detailed error message that results in this situation. it attempts to be as helpful as possible in helping you diagnose the cause. in this case it essentially identifies and highlights the problematic column (`MAXKEY`) *for you*, and suggests a fix (adding it to the `varying_cols_list` argument when calling `add_plot_set`).\n\nof course, just because something plots successfully doesn't mean you haven't made a mistake... but we do our best to catch a variety of simple mistakes. (or at least assert and fail-fast when *some* sensible assumptions are violated.)",
"_____no_output_____"
]
],
[
[
"def define_experiment(exp_dict, args):\n set_dir_tools (exp_dict, os.getcwd() + '/../../tools')\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')\n set_cmd_compile (exp_dict, 'make -j6')\n\n add_run_param (exp_dict, '__trials', [1, 2])\n add_run_param (exp_dict, 'TOTAL_THREADS', [1, 8])\n add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])\n add_run_param (exp_dict, 'MAXKEY', [20000, 200000])\n\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel 0.5 0.5 -k {MAXKEY} -t 100')\n\n add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)\n\n add_plot_set(\n exp_dict\n , name='throughput.png'\n , title='throughput'\n , series='DS_TYPENAME'\n , x_axis='TOTAL_THREADS'\n , y_axis='total_throughput'\n , plot_type='bars'\n )\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='-rdp', error_exit_code=0)",
"_____no_output_____"
]
],
[
[
"# Automatic archival features\n## (data zip, git commit hash fetch, git diff file archive)\n\nactivated with command line arg: `-z` (which stands for `zip creation`)\n\nthe data framework offers a powerful convenience for archiving your experiments: it can automatically ZIP *as little data as is needed* to guarantee you won't lose the ability to return to this exact code/data state (file/directory structure).\n\nhow does it do this?\n\nwell, assuming you are working a git repository, and are committing changes as you go, the repository's current `commit hash` presumably gives you a way to get *pretty close* to your current file/directory structure.\n\nbut of course, it will be missing any changes you've made since your last commit! this includes all of the data you've just generated, as well as any tentative code changes you've made (perhaps experimental changes you're currently testing).\n\nhappily, we can *extract* the list of files you've changed *since your last commit* directly with a `git` command: `git status -s | awk '{if ($1 != \"D\") print $2}' | grep -v \"/$\"`\n\nso, we do this, and then once we have this list of files, we selectively add *them* to a ZIP file along with the data directory we just produced, as well as the file `output_log.txt`.\n\ncrucially, any files that are ignored by `git` (because they are covered by a pattern in your `.gitignore` file) will *NOT* be added to the ZIP file. this means you can automatically exclude files easily that you wouldn't want in your repo anyway. (normally the `data` folder produced by your experiments would probably fall into that category, but we add it manually. if you want to add more files manually, see the function `do_finish` in `run_experiment.py`.)\n\nthis whole process should make it easier to achieve a *much* smaller file size for an archive that you *can* reconstruct to reproduce experiments. this smaller file size *should* make it feasible to archive *every* set of experiments you run by default, along with enough information to understand exactly what was run, later. (and, you should only occasionally have to clean up your archives.) \n\nthis can help you eliminate one of the questions we all *hate* asking: `what on earth did we run to get these results?`\n\nto help you reconstruct your current file/directory state later, we dump all relevant information about the `current commit`, including the `commit hash` to `output_log.txt` before we add it to the ZIP. you can find this information about the commit by looking for `'git status:'` or `'commit hash='` in `output_log.txt`.\n\nfor example, the following code causes text along the following lines to be archived as part of `output_log.txt`:\n\n ## ## Fetching git status and any uncommitted changes for archival purposes\n ## \n ## commit_hash=05ec0e2184bd8c7a30e22457483cbeeadd0c2461\n ## git_status:\n ## On branch data_framework\n ## Your branch is up to date with 'origin/data_framework'.\n ## \n ## Changes not staged for commit:\n ## (use \"git add <file>...\" to update what will be committed)\n ## (use \"git checkout -- <file>...\" to discard changes in working directory)\n ## (commit or discard the untracked or modified content in submodules)\n ## \n ## \tmodified: .vscode/settings.json\n ## \tmodified: microbench_experiments/tutorial/tutorial.ipynb\n ## \tmodified: microbench_experiments/tutorial/tutorial_extra.ipynb\n ## \tmodified: tools (new commits, modified content)\n ## \n ## no changes added to commit (use \"git add\" and/or \"git commit -a\")\n ## \n ## diff_files=['.vscode/settings.json', 'microbench_experiments/tutorial/tutorial.ipynb', 'microbench_experiments/tutorial/tutorial_extra.ipynb', 'tools']\n\n## on my system, the following code produces an archive smaller than `3MB`, which offers complete reproducibility (and even includes 37 generated plots), despite the entire contents of setbench reaching `140MB`!\n",
"_____no_output_____"
]
],
[
[
"def define_experiment(exp_dict, args):\n set_dir_tools (exp_dict, os.getcwd() + '/../../tools')\n set_dir_compile (exp_dict, os.getcwd() + '/../../microbench')\n set_dir_run (exp_dict, os.getcwd() + '/../../microbench/bin')\n set_cmd_compile (exp_dict, 'make -j6')\n\n add_run_param (exp_dict, '__trials', [1, 2])\n add_run_param (exp_dict, 'TOTAL_THREADS', [1, 2, 4, 8])\n add_run_param (exp_dict, 'DS_TYPENAME', ['brown_ext_ist_lf', 'brown_ext_abtree_lf', 'bronson_pext_bst_occ'])\n add_run_param (exp_dict, 'MAXKEY', [20000, 200000])\n add_run_param (exp_dict, 'INS_DEL_FRAC', [\"0.0 0.0\", \"5.0 5.0\"])\n\n set_cmd_run (exp_dict, 'LD_PRELOAD=../../lib/libjemalloc.so numactl --interleave=all time ./{DS_TYPENAME}.debra -nwork {TOTAL_THREADS} -nprefill {TOTAL_THREADS} -insdel {INS_DEL_FRAC} -k {MAXKEY} -t 1000')\n\n add_data_field (exp_dict, 'total_throughput', coltype='INTEGER', validator=is_positive)\n add_data_field (exp_dict, 'tree_stats_numNodes', coltype='INTEGER')\n add_data_field (exp_dict, 'tree_stats_height', coltype='INTEGER')\n add_data_field (exp_dict, 'tree_stats_avgKeyDepth', coltype='REAL')\n add_data_field (exp_dict, 'global_epoch_counter', coltype='INTEGER')\n add_data_field (exp_dict, 'PAPI_L2_TCM', coltype='REAL')\n add_data_field (exp_dict, 'PAPI_L3_TCM', coltype='REAL')\n add_data_field (exp_dict, 'PAPI_TOT_CYC', coltype='REAL')\n add_data_field (exp_dict, 'PAPI_TOT_INS', coltype='REAL')\n\n ## render one legend for all plots\n add_plot_set(exp_dict, name='legend.png', series='DS_TYPENAME', x_axis='TOTAL_THREADS', y_axis='total_throughput', plot_type='bars', plot_cmd_args='--legend-only --legend-columns 3')\n\n ## render plots\n value_fields = get_numeric_data_fields(exp_dict)\n for field in value_fields:\n add_plot_set(\n exp_dict\n , name=field+'-{INS_DEL_FRAC}-{MAXKEY}k.png'\n , title='{INS_DEL_FRAC} {MAXKEY}k: '+field\n , varying_cols_list=['MAXKEY', 'INS_DEL_FRAC']\n , series='DS_TYPENAME'\n , x_axis='TOTAL_THREADS'\n , y_axis=field\n , plot_type='bars'\n )\n\n ## and also add a page_set to show all plots\n add_page_set(\n exp_dict\n , image_files='{row_field}-{INS_DEL_FRAC}-{MAXKEY}k.png'\n , name='comparison'\n , column_field='INS_DEL_FRAC'\n , row_field=value_fields\n , table_field='MAXKEY'\n , legend_file='legend.png'\n )\n\nimport sys ; sys.path.append('../../tools/data_framework') ; from run_experiment import *\nrun_in_jupyter(define_experiment, cmdline_args='-rdpwz')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e740311ef86f218ef289a408bdc4d2d24a56293a | 56,793 | ipynb | Jupyter Notebook | site/en-snapshot/neural_structured_learning/tutorials/graph_keras_lstm_imdb.ipynb | ilyaspiridonov/docs-l10n | a061a44e40d25028d0a4458094e48ab717d3565c | [
"Apache-2.0"
] | 1 | 2021-09-23T09:56:29.000Z | 2021-09-23T09:56:29.000Z | site/en-snapshot/neural_structured_learning/tutorials/graph_keras_lstm_imdb.ipynb | ilyaspiridonov/docs-l10n | a061a44e40d25028d0a4458094e48ab717d3565c | [
"Apache-2.0"
] | null | null | null | site/en-snapshot/neural_structured_learning/tutorials/graph_keras_lstm_imdb.ipynb | ilyaspiridonov/docs-l10n | a061a44e40d25028d0a4458094e48ab717d3565c | [
"Apache-2.0"
] | 1 | 2020-06-23T07:43:49.000Z | 2020-06-23T07:43:49.000Z | 36.382447 | 429 | 0.540049 | [
[
[
"##### Copyright 2019 Google LLC",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Graph regularization for sentiment classification using synthesized graphs\n\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/neural_structured_learning/tutorials/graph_keras_lstm_imdb\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/neural-structured-learning/blob/master/g3doc/tutorials/graph_keras_lstm_imdb.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/neural-structured-learning/blob/master/g3doc/tutorials/graph_keras_lstm_imdb.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"## Overview",
"_____no_output_____"
],
[
"This notebook classifies movie reviews as *positive* or *negative* using the\ntext of the review. This is an example of *binary* classification, an important\nand widely applicable kind of machine learning problem.\n\nWe will demonstrate the use of graph regularization in this notebook by building\na graph from the given input. The general recipe for building a\ngraph-regularized model using the Neural Structured Learning (NSL) framework\nwhen the input does not contain an explicit graph is as follows:\n\n1. Create embeddings for each text sample in the input. This can be done using\n pre-trained models such as [word2vec](https://arxiv.org/pdf/1310.4546.pdf),\n [Swivel](https://arxiv.org/abs/1602.02215),\n [BERT](https://arxiv.org/abs/1810.04805) etc.\n2. Build a graph based on these embeddings by using a similarity metric such as\n the 'L2' distance, 'cosine' distance, etc. Nodes in the graph correspond to\n samples and edges in the graph correspond to similarity between pairs of\n samples.\n3. Generate training data from the above synthesized graph and sample features.\n The resulting training data will contain neighbor features in addition to\n the original node features.\n4. Create a neural network as a base model using the Keras sequential,\n functional, or subclass API.\n5. Wrap the base model with the GraphRegularization wrapper class, which is\n provided by the NSL framework, to create a new graph Keras model. This new\n model will include a graph regularization loss as the regularization term in\n its training objective.\n6. Train and evaluate the graph Keras model.\n\n**Note**: We expect that it would take readers about 1 hour to go through this\ntutorial.",
"_____no_output_____"
],
[
"## Requirements\n\n1. Install the Neural Structured Learning package.\n2. Install tensorflow-hub.",
"_____no_output_____"
]
],
[
[
"!pip install --quiet neural-structured-learning\n!pip install --quiet tensorflow-hub",
"_____no_output_____"
]
],
[
[
"## Dependencies and imports",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\n\nimport neural_structured_learning as nsl\n\nimport tensorflow as tf\nimport tensorflow_hub as hub\n\n# Resets notebook state\ntf.keras.backend.clear_session()\n\nprint(\"Version: \", tf.__version__)\nprint(\"Eager mode: \", tf.executing_eagerly())\nprint(\"Hub version: \", hub.__version__)\nprint(\n \"GPU is\",\n \"available\" if tf.config.list_physical_devices(\"GPU\") else \"NOT AVAILABLE\")",
"_____no_output_____"
]
],
[
[
"## IMDB dataset\n\nThe\n[IMDB dataset](https://www.tensorflow.org/api_docs/python/tf/keras/datasets/imdb)\ncontains the text of 50,000 movie reviews from the\n[Internet Movie Database](https://www.imdb.com/). These are split into 25,000\nreviews for training and 25,000 reviews for testing. The training and testing\nsets are *balanced*, meaning they contain an equal number of positive and\nnegative reviews.\n\nIn this tutorial, we will use a preprocessed version of the IMDB dataset.",
"_____no_output_____"
],
[
"### Download preprocessed IMDB dataset\n\nThe IMDB dataset comes packaged with TensorFlow. It has already been\npreprocessed such that the reviews (sequences of words) have been converted to\nsequences of integers, where each integer represents a specific word in a\ndictionary.\n\nThe following code downloads the IMDB dataset (or uses a cached copy if it has\nalready been downloaded):",
"_____no_output_____"
]
],
[
[
"imdb = tf.keras.datasets.imdb\n(pp_train_data, pp_train_labels), (pp_test_data, pp_test_labels) = (\n imdb.load_data(num_words=10000))",
"_____no_output_____"
]
],
[
[
"The argument `num_words=10000` keeps the top 10,000 most frequently occurring words in the training data. The rare words are discarded to keep the size of the vocabulary manageable.",
"_____no_output_____"
],
[
"### Explore the data\n\nLet's take a moment to understand the format of the data. The dataset comes preprocessed: each example is an array of integers representing the words of the movie review. Each label is an integer value of either 0 or 1, where 0 is a negative review, and 1 is a positive review.",
"_____no_output_____"
]
],
[
[
"print('Training entries: {}, labels: {}'.format(\n len(pp_train_data), len(pp_train_labels)))\ntraining_samples_count = len(pp_train_data)",
"_____no_output_____"
]
],
[
[
"The text of reviews have been converted to integers, where each integer represents a specific word in a dictionary. Here's what the first review looks like:",
"_____no_output_____"
]
],
[
[
"print(pp_train_data[0])",
"_____no_output_____"
]
],
[
[
"Movie reviews may be different lengths. The below code shows the number of words in the first and second reviews. Since inputs to a neural network must be the same length, we'll need to resolve this later.",
"_____no_output_____"
]
],
[
[
"len(pp_train_data[0]), len(pp_train_data[1])",
"_____no_output_____"
]
],
[
[
"### Convert the integers back to words\n\nIt may be useful to know how to convert integers back to the corresponding text.\nHere, we'll create a helper function to query a dictionary object that contains\nthe integer to string mapping:",
"_____no_output_____"
]
],
[
[
"def build_reverse_word_index():\n # A dictionary mapping words to an integer index\n word_index = imdb.get_word_index()\n\n # The first indices are reserved\n word_index = {k: (v + 3) for k, v in word_index.items()}\n word_index['<PAD>'] = 0\n word_index['<START>'] = 1\n word_index['<UNK>'] = 2 # unknown\n word_index['<UNUSED>'] = 3\n return dict((value, key) for (key, value) in word_index.items())\n\nreverse_word_index = build_reverse_word_index()\n\ndef decode_review(text):\n return ' '.join([reverse_word_index.get(i, '?') for i in text])",
"_____no_output_____"
]
],
[
[
"Now we can use the `decode_review` function to display the text for the first review:",
"_____no_output_____"
]
],
[
[
"decode_review(pp_train_data[0])",
"_____no_output_____"
]
],
[
[
"## Graph construction\n\nGraph construction involves creating embeddings for text samples and then using\na similarity function to compare the embeddings.\n\nBefore proceeding further, we first create a directory to store artifacts\ncreated by this tutorial.",
"_____no_output_____"
]
],
[
[
"!mkdir -p /tmp/imdb",
"_____no_output_____"
]
],
[
[
"### Create sample embeddings",
"_____no_output_____"
],
[
"We will use pretrained Swivel embeddings to create embeddings in the\n`tf.train.Example` format for each sample in the input. We will store the\nresulting embeddings in the `TFRecord` format along with an additional feature\nthat represents the ID of each sample. This is important and will allow us match\nsample embeddings with corresponding nodes in the graph later.",
"_____no_output_____"
]
],
[
[
"pretrained_embedding = 'https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1'\n\nhub_layer = hub.KerasLayer(\n pretrained_embedding, input_shape=[], dtype=tf.string, trainable=True)",
"_____no_output_____"
],
[
"def _int64_feature(value):\n \"\"\"Returns int64 tf.train.Feature.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value.tolist()))\n\n\ndef _bytes_feature(value):\n \"\"\"Returns bytes tf.train.Feature.\"\"\"\n return tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[value.encode('utf-8')]))\n\n\ndef _float_feature(value):\n \"\"\"Returns float tf.train.Feature.\"\"\"\n return tf.train.Feature(float_list=tf.train.FloatList(value=value.tolist()))\n\n\ndef create_embedding_example(word_vector, record_id):\n \"\"\"Create tf.Example containing the sample's embedding and its ID.\"\"\"\n\n text = decode_review(word_vector)\n\n # Shape = [batch_size,].\n sentence_embedding = hub_layer(tf.reshape(text, shape=[-1,]))\n\n # Flatten the sentence embedding back to 1-D.\n sentence_embedding = tf.reshape(sentence_embedding, shape=[-1])\n\n features = {\n 'id': _bytes_feature(str(record_id)),\n 'embedding': _float_feature(sentence_embedding.numpy())\n }\n return tf.train.Example(features=tf.train.Features(feature=features))\n\n\ndef create_embeddings(word_vectors, output_path, starting_record_id):\n record_id = int(starting_record_id)\n with tf.io.TFRecordWriter(output_path) as writer:\n for word_vector in word_vectors:\n example = create_embedding_example(word_vector, record_id)\n record_id = record_id + 1\n writer.write(example.SerializeToString())\n return record_id\n\n\n# Persist TF.Example features containing embeddings for training data in\n# TFRecord format.\ncreate_embeddings(pp_train_data, '/tmp/imdb/embeddings.tfr', 0)",
"_____no_output_____"
]
],
[
[
"### Build a graph\n\nNow that we have the sample embeddings, we will use them to build a similarity\ngraph, i.e, nodes in this graph will correspond to samples and edges in this\ngraph will correspond to similarity between pairs of nodes.\n\nNeural Structured Learning provides a graph building library to build a graph\nbased on sample embeddings. It uses **cosine similarity** as the similarity\nmeasure to compare embeddings and build edges between them. It also allows us to\nspecify a similarity threshold, which can be used to discard dissimilar edges\nfrom the final graph. In this example, using 0.99 as the similarity threshold,\nwe end up with a graph that has 445,327 bi-directional edges.",
"_____no_output_____"
]
],
[
[
"nsl.tools.build_graph(['/tmp/imdb/embeddings.tfr'],\n '/tmp/imdb/graph_99.tsv',\n similarity_threshold=0.99)",
"_____no_output_____"
]
],
[
[
"**Note:** Graph quality and by extension, embedding quality, are very important\nfor graph regularization. While we have used Swivel embeddings in this notebook,\nusing BERT embeddings for instance, will likely capture review semantics more\naccurately. We encourage users to use embeddings of their choice and as\nappropriate to their needs.",
"_____no_output_____"
],
[
"## Sample features\n\nWe create sample features for our problem using the `tf.train.Example` format\nand persist them in the `TFRecord` format. Each sample will include the\nfollowing three features:\n\n1. **id**: The node ID of the sample.\n2. **words**: An int64 list containing word IDs.\n3. **label**: A singleton int64 identifying the target class of the review.",
"_____no_output_____"
]
],
[
[
"def create_example(word_vector, label, record_id):\n \"\"\"Create tf.Example containing the sample's word vector, label, and ID.\"\"\"\n features = {\n 'id': _bytes_feature(str(record_id)),\n 'words': _int64_feature(np.asarray(word_vector)),\n 'label': _int64_feature(np.asarray([label])),\n }\n return tf.train.Example(features=tf.train.Features(feature=features))\n\ndef create_records(word_vectors, labels, record_path, starting_record_id):\n record_id = int(starting_record_id)\n with tf.io.TFRecordWriter(record_path) as writer:\n for word_vector, label in zip(word_vectors, labels):\n example = create_example(word_vector, label, record_id)\n record_id = record_id + 1\n writer.write(example.SerializeToString())\n return record_id\n\n# Persist TF.Example features (word vectors and labels) for training and test\n# data in TFRecord format.\nnext_record_id = create_records(pp_train_data, pp_train_labels,\n '/tmp/imdb/train_data.tfr', 0)\ncreate_records(pp_test_data, pp_test_labels, '/tmp/imdb/test_data.tfr',\n next_record_id)",
"_____no_output_____"
]
],
[
[
"## Augment training data with graph neighbors\n\nSince we have the sample features and the synthesized graph, we can generate the\naugmented training data for Neural Structured Learning. The NSL framework\nprovides a library to combine the graph and the sample features to produce\nthe final training data for graph regularization. The resulting training data\nwill include original sample features as well as features of their corresponding\nneighbors.\n\nIn this tutorial, we consider undirected edges and use a maximum of 3 neighbors\nper sample to augment training data with graph neighbors.",
"_____no_output_____"
]
],
[
[
"nsl.tools.pack_nbrs(\n '/tmp/imdb/train_data.tfr',\n '',\n '/tmp/imdb/graph_99.tsv',\n '/tmp/imdb/nsl_train_data.tfr',\n add_undirected_edges=True,\n max_nbrs=3)",
"_____no_output_____"
]
],
[
[
"## Base model\n\nWe are now ready to build a base model without graph regularization. In order to\nbuild this model, we can either use embeddings that were used in building the\ngraph, or we can learn new embeddings jointly along with the classification\ntask. For the purpose of this notebook, we will do the latter.",
"_____no_output_____"
],
[
"### Global variables",
"_____no_output_____"
]
],
[
[
"NBR_FEATURE_PREFIX = 'NL_nbr_'\nNBR_WEIGHT_SUFFIX = '_weight'",
"_____no_output_____"
]
],
[
[
"### Hyperparameters\n\nWe will use an instance of `HParams` to inclue various hyperparameters and\nconstants used for training and evaluation. We briefly describe each of them\nbelow:\n\n- **num_classes**: There are 2 classes -- *positive* and *negative*.\n\n- **max_seq_length**: This is the maximum number of words considered from each\n movie review in this example.\n\n- **vocab_size**: This is the size of the vocabulary considered for this\n example.\n\n- **distance_type**: This is the distance metric used to regularize the sample\n with its neighbors.\n\n- **graph_regularization_multiplier**: This controls the relative weight of\n the graph regularization term in the overall loss function.\n\n- **num_neighbors**: The number of neighbors used for graph regularization.\n This value has to be less than or equal to the `max_nbrs` argument used\n above when invoking `nsl.tools.pack_nbrs`.\n\n- **num_fc_units**: The number of units in the fully connected layer of the\n neural network.\n\n- **train_epochs**: The number of training epochs.\n\n- **batch_size**: Batch size used for training and evaluation.\n\n- **eval_steps**: The number of batches to process before deeming evaluation\n is complete. If set to `None`, all instances in the test set are evaluated.",
"_____no_output_____"
]
],
[
[
"class HParams(object):\n \"\"\"Hyperparameters used for training.\"\"\"\n def __init__(self):\n ### dataset parameters\n self.num_classes = 2\n self.max_seq_length = 256\n self.vocab_size = 10000\n ### neural graph learning parameters\n self.distance_type = nsl.configs.DistanceType.L2\n self.graph_regularization_multiplier = 0.1\n self.num_neighbors = 2\n ### model architecture\n self.num_embedding_dims = 16\n self.num_lstm_dims = 64\n self.num_fc_units = 64\n ### training parameters\n self.train_epochs = 10\n self.batch_size = 128\n ### eval parameters\n self.eval_steps = None # All instances in the test set are evaluated.\n\nHPARAMS = HParams()",
"_____no_output_____"
]
],
[
[
"### Prepare the data\n\nThe reviews—the arrays of integers—must be converted to tensors before being fed\ninto the neural network. This conversion can be done a couple of ways:\n\n* Convert the arrays into vectors of `0`s and `1`s indicating word occurrence,\n similar to a one-hot encoding. For example, the sequence `[3, 5]` would become a `10000`-dimensional vector that is all zeros except for indices `3` and `5`, which are ones. Then, make this the first layer in our network—a `Dense` layer—that can handle floating point vector data. This approach is memory intensive, though, requiring a `num_words * num_reviews` size matrix.\n\n* Alternatively, we can pad the arrays so they all have the same length, then\n create an integer tensor of shape `max_length * num_reviews`. We can use an\n embedding layer capable of handling this shape as the first layer in our\n network.\n\nIn this tutorial, we will use the second approach.\n\nSince the movie reviews must be the same length, we will use the `pad_sequence`\nfunction defined below to standardize the lengths.",
"_____no_output_____"
]
],
[
[
"def make_dataset(file_path, training=False):\n \"\"\"Creates a `tf.data.TFRecordDataset`.\n\n Args:\n file_path: Name of the file in the `.tfrecord` format containing\n `tf.train.Example` objects.\n training: Boolean indicating if we are in training mode.\n\n Returns:\n An instance of `tf.data.TFRecordDataset` containing the `tf.train.Example`\n objects.\n \"\"\"\n\n def pad_sequence(sequence, max_seq_length):\n \"\"\"Pads the input sequence (a `tf.SparseTensor`) to `max_seq_length`.\"\"\"\n pad_size = tf.maximum([0], max_seq_length - tf.shape(sequence)[0])\n padded = tf.concat(\n [sequence.values,\n tf.fill((pad_size), tf.cast(0, sequence.dtype))],\n axis=0)\n # The input sequence may be larger than max_seq_length. Truncate down if\n # necessary.\n return tf.slice(padded, [0], [max_seq_length])\n\n def parse_example(example_proto):\n \"\"\"Extracts relevant fields from the `example_proto`.\n\n Args:\n example_proto: An instance of `tf.train.Example`.\n\n Returns:\n A pair whose first value is a dictionary containing relevant features\n and whose second value contains the ground truth labels.\n \"\"\"\n # The 'words' feature is a variable length word ID vector.\n feature_spec = {\n 'words': tf.io.VarLenFeature(tf.int64),\n 'label': tf.io.FixedLenFeature((), tf.int64, default_value=-1),\n }\n # We also extract corresponding neighbor features in a similar manner to\n # the features above during training.\n if training:\n for i in range(HPARAMS.num_neighbors):\n nbr_feature_key = '{}{}_{}'.format(NBR_FEATURE_PREFIX, i, 'words')\n nbr_weight_key = '{}{}{}'.format(NBR_FEATURE_PREFIX, i,\n NBR_WEIGHT_SUFFIX)\n feature_spec[nbr_feature_key] = tf.io.VarLenFeature(tf.int64)\n\n # We assign a default value of 0.0 for the neighbor weight so that\n # graph regularization is done on samples based on their exact number\n # of neighbors. In other words, non-existent neighbors are discounted.\n feature_spec[nbr_weight_key] = tf.io.FixedLenFeature(\n [1], tf.float32, default_value=tf.constant([0.0]))\n\n features = tf.io.parse_single_example(example_proto, feature_spec)\n\n # Since the 'words' feature is a variable length word vector, we pad it to a\n # constant maximum length based on HPARAMS.max_seq_length\n features['words'] = pad_sequence(features['words'], HPARAMS.max_seq_length)\n if training:\n for i in range(HPARAMS.num_neighbors):\n nbr_feature_key = '{}{}_{}'.format(NBR_FEATURE_PREFIX, i, 'words')\n features[nbr_feature_key] = pad_sequence(features[nbr_feature_key],\n HPARAMS.max_seq_length)\n\n labels = features.pop('label')\n return features, labels\n\n dataset = tf.data.TFRecordDataset([file_path])\n if training:\n dataset = dataset.shuffle(10000)\n dataset = dataset.map(parse_example)\n dataset = dataset.batch(HPARAMS.batch_size)\n return dataset\n\n\ntrain_dataset = make_dataset('/tmp/imdb/nsl_train_data.tfr', True)\ntest_dataset = make_dataset('/tmp/imdb/test_data.tfr')",
"_____no_output_____"
]
],
[
[
"### Build the model\n\nA neural network is created by stacking layers—this requires two main architectural decisions:\n\n* How many layers to use in the model?\n* How many *hidden units* to use for each layer?\n\nIn this example, the input data consists of an array of word-indices. The labels to predict are either 0 or 1.\n\nWe will use a bi-directional LSTM as our base model in this tutorial.",
"_____no_output_____"
]
],
[
[
"# This function exists as an alternative to the bi-LSTM model used in this\n# notebook.\ndef make_feed_forward_model():\n \"\"\"Builds a simple 2 layer feed forward neural network.\"\"\"\n inputs = tf.keras.Input(\n shape=(HPARAMS.max_seq_length,), dtype='int64', name='words')\n embedding_layer = tf.keras.layers.Embedding(HPARAMS.vocab_size, 16)(inputs)\n pooling_layer = tf.keras.layers.GlobalAveragePooling1D()(embedding_layer)\n dense_layer = tf.keras.layers.Dense(16, activation='relu')(pooling_layer)\n outputs = tf.keras.layers.Dense(1, activation='sigmoid')(dense_layer)\n return tf.keras.Model(inputs=inputs, outputs=outputs)\n\n\ndef make_bilstm_model():\n \"\"\"Builds a bi-directional LSTM model.\"\"\"\n inputs = tf.keras.Input(\n shape=(HPARAMS.max_seq_length,), dtype='int64', name='words')\n embedding_layer = tf.keras.layers.Embedding(HPARAMS.vocab_size,\n HPARAMS.num_embedding_dims)(\n inputs)\n lstm_layer = tf.keras.layers.Bidirectional(\n tf.keras.layers.LSTM(HPARAMS.num_lstm_dims))(\n embedding_layer)\n dense_layer = tf.keras.layers.Dense(\n HPARAMS.num_fc_units, activation='relu')(\n lstm_layer)\n outputs = tf.keras.layers.Dense(1, activation='sigmoid')(dense_layer)\n return tf.keras.Model(inputs=inputs, outputs=outputs)\n\n\n# Feel free to use an architecture of your choice.\nmodel = make_bilstm_model()\nmodel.summary()",
"_____no_output_____"
]
],
[
[
"The layers are effectively stacked sequentially to build the classifier:\n\n1. The first layer is an `Input` layer which takes the integer-encoded\n vocabulary.\n2. The next layer is an `Embedding` layer, which takes the integer-encoded\n vocabulary and looks up the embedding vector for each word-index. These\n vectors are learned as the model trains. The vectors add a dimension to the\n output array. The resulting dimensions are: `(batch, sequence, embedding)`.\n3. Next, a bidirectional LSTM layer returns a fixed-length output vector for\n each example.\n4. This fixed-length output vector is piped through a fully-connected (`Dense`)\n layer with 64 hidden units.\n5. The last layer is densely connected with a single output node. Using the\n `sigmoid` activation function, this value is a float between 0 and 1,\n representing a probability, or confidence level.",
"_____no_output_____"
],
[
"### Hidden units\n\nThe above model has two intermediate or \"hidden\" layers, between the input and\noutput, and excluding the `Embedding` layer. The number of outputs (units,\nnodes, or neurons) is the dimension of the representational space for the layer.\nIn other words, the amount of freedom the network is allowed when learning an\ninternal representation.\n\nIf a model has more hidden units (a higher-dimensional representation space),\nand/or more layers, then the network can learn more complex representations.\nHowever, it makes the network more computationally expensive and may lead to\nlearning unwanted patterns—patterns that improve performance on training data\nbut not on the test data. This is called *overfitting*.",
"_____no_output_____"
],
[
"### Loss function and optimizer\n\nA model needs a loss function and an optimizer for training. Since this is a\nbinary classification problem and the model outputs a probability (a single-unit\nlayer with a sigmoid activation), we'll use the `binary_crossentropy` loss\nfunction.",
"_____no_output_____"
]
],
[
[
"model.compile(\n optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])",
"_____no_output_____"
]
],
[
[
"### Create a validation set\n\nWhen training, we want to check the accuracy of the model on data it hasn't seen\nbefore. Create a *validation set* by setting apart a fraction of the original\ntraining data. (Why not use the testing set now? Our goal is to develop and tune\nour model using only the training data, then use the test data just once to\nevaluate our accuracy).\n\nIn this tutorial, we take roughly 10% of the initial training samples (10% of 25000) as labeled data for training and the remaining as validation data. Since the initial train/test split was 50/50 (25000 samples each), the effective train/validation/test split we now have is 5/45/50.\n\nNote that 'train_dataset' has already been batched and shuffled. ",
"_____no_output_____"
]
],
[
[
"validation_fraction = 0.9\nvalidation_size = int(validation_fraction *\n int(training_samples_count / HPARAMS.batch_size))\nprint(validation_size)\nvalidation_dataset = train_dataset.take(validation_size)\ntrain_dataset = train_dataset.skip(validation_size)",
"_____no_output_____"
]
],
[
[
"### Train the model\n\nTrain the model in mini-batches. While training, monitor the model's loss and accuracy on the validation set:",
"_____no_output_____"
]
],
[
[
"history = model.fit(\n train_dataset,\n validation_data=validation_dataset,\n epochs=HPARAMS.train_epochs,\n verbose=1)",
"_____no_output_____"
]
],
[
[
"### Evaluate the model\n\nNow, let's see how the model performs. Two values will be returned. Loss (a number which represents our error, lower values are better), and accuracy.",
"_____no_output_____"
]
],
[
[
"results = model.evaluate(test_dataset, steps=HPARAMS.eval_steps)\nprint(results)",
"_____no_output_____"
]
],
[
[
"### Create a graph of accuracy/loss over time\n\n`model.fit()` returns a `History` object that contains a dictionary with everything that happened during training:",
"_____no_output_____"
]
],
[
[
"history_dict = history.history\nhistory_dict.keys()",
"_____no_output_____"
]
],
[
[
"There are four entries: one for each monitored metric during training and validation. We can use these to plot the training and validation loss for comparison, as well as the training and validation accuracy:",
"_____no_output_____"
]
],
[
[
"acc = history_dict['accuracy']\nval_acc = history_dict['val_accuracy']\nloss = history_dict['loss']\nval_loss = history_dict['val_loss']\n\nepochs = range(1, len(acc) + 1)\n\n# \"-r^\" is for solid red line with triangle markers.\nplt.plot(epochs, loss, '-r^', label='Training loss')\n# \"-b0\" is for solid blue line with circle markers.\nplt.plot(epochs, val_loss, '-bo', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend(loc='best')\n\nplt.show()",
"_____no_output_____"
],
[
"plt.clf() # clear figure\n\nplt.plot(epochs, acc, '-r^', label='Training acc')\nplt.plot(epochs, val_acc, '-bo', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend(loc='best')\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"Notice the training loss *decreases* with each epoch and the training accuracy\n*increases* with each epoch. This is expected when using a gradient descent\noptimization—it should minimize the desired quantity on every iteration.",
"_____no_output_____"
],
[
"## Graph regularization\n\nWe are now ready to try graph regularization using the base model that we built\nabove. We will use the `GraphRegularization` wrapper class provided by the\nNeural Structured Learning framework to wrap the base (bi-LSTM) model to include\ngraph regularization. The rest of the steps for training and evaluating the\ngraph-regularized model are similar to that of the base model.",
"_____no_output_____"
],
[
"### Create graph-regularized model",
"_____no_output_____"
],
[
"To assess the incremental benefit of graph regularization, we will create a new\nbase model instance. This is because `model` has already been trained for a few\niterations, and reusing this trained model to create a graph-regularized model\nwill not be a fair comparison for `model`.",
"_____no_output_____"
]
],
[
[
"# Build a new base LSTM model.\nbase_reg_model = make_bilstm_model()",
"_____no_output_____"
],
[
"# Wrap the base model with graph regularization.\ngraph_reg_config = nsl.configs.make_graph_reg_config(\n max_neighbors=HPARAMS.num_neighbors,\n multiplier=HPARAMS.graph_regularization_multiplier,\n distance_type=HPARAMS.distance_type,\n sum_over_axis=-1)\ngraph_reg_model = nsl.keras.GraphRegularization(base_reg_model,\n graph_reg_config)\ngraph_reg_model.compile(\n optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])",
"_____no_output_____"
]
],
[
[
"### Train the model",
"_____no_output_____"
]
],
[
[
"graph_reg_history = graph_reg_model.fit(\n train_dataset,\n validation_data=validation_dataset,\n epochs=HPARAMS.train_epochs,\n verbose=1)",
"_____no_output_____"
]
],
[
[
"### Evaluate the model",
"_____no_output_____"
]
],
[
[
"graph_reg_results = graph_reg_model.evaluate(test_dataset, steps=HPARAMS.eval_steps)\nprint(graph_reg_results)",
"_____no_output_____"
]
],
[
[
"### Create a graph of accuracy/loss over time",
"_____no_output_____"
]
],
[
[
"graph_reg_history_dict = graph_reg_history.history\ngraph_reg_history_dict.keys()",
"_____no_output_____"
]
],
[
[
"There are five entries in total in the dictionary: training loss, training\naccuracy, training graph loss, validation loss, and validation accuracy. We can\nplot them all together for comparison. Note that the graph loss is only computed\nduring training.",
"_____no_output_____"
]
],
[
[
"acc = graph_reg_history_dict['accuracy']\nval_acc = graph_reg_history_dict['val_accuracy']\nloss = graph_reg_history_dict['loss']\ngraph_loss = graph_reg_history_dict['graph_loss']\nval_loss = graph_reg_history_dict['val_loss']\n\nepochs = range(1, len(acc) + 1)\n\nplt.clf() # clear figure\n\n# \"-r^\" is for solid red line with triangle markers.\nplt.plot(epochs, loss, '-r^', label='Training loss')\n# \"-gD\" is for solid green line with diamond markers.\nplt.plot(epochs, graph_loss, '-gD', label='Training graph loss')\n# \"-b0\" is for solid blue line with circle markers.\nplt.plot(epochs, val_loss, '-bo', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend(loc='best')\n\nplt.show()",
"_____no_output_____"
],
[
"plt.clf() # clear figure\n\nplt.plot(epochs, acc, '-r^', label='Training acc')\nplt.plot(epochs, val_acc, '-bo', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend(loc='best')\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## The power of semi-supervised learning\n\nSemi-supervised learning and more specifically, graph regularization in the\ncontext of this tutorial, can be really powerful when the amount of training\ndata is small. The lack of training data is compensated by leveraging similarity\namong the training samples, which is not possible in traditional supervised\nlearning.\n\nWe define ***supervision ratio*** as the ratio of training samples to the total\nnumber of samples which includes training, validation, and test samples. In this\nnotebook, we have used a supervision ratio of 0.05 (i.e, 5% of the labeled data)\nfor training both the base model as well as the graph-regularized model. We\nillustrate the impact of the supervision ratio on model accuracy in the cell\nbelow.",
"_____no_output_____"
]
],
[
[
"# Accuracy values for both the Bi-LSTM model and the feed forward NN model have\n# been precomputed for the following supervision ratios.\n\nsupervision_ratios = [0.3, 0.15, 0.05, 0.03, 0.02, 0.01, 0.005]\n\nmodel_tags = ['Bi-LSTM model', 'Feed Forward NN model']\nbase_model_accs = [[84, 84, 83, 80, 65, 52, 50], [87, 86, 76, 74, 67, 52, 51]]\ngraph_reg_model_accs = [[84, 84, 83, 83, 65, 63, 50],\n [87, 86, 80, 75, 67, 52, 50]]\n\nplt.clf() # clear figure\n\nfig, axes = plt.subplots(1, 2)\nfig.set_size_inches((12, 5))\n\nfor ax, model_tag, base_model_acc, graph_reg_model_acc in zip(\n axes, model_tags, base_model_accs, graph_reg_model_accs):\n\n # \"-r^\" is for solid red line with triangle markers.\n ax.plot(base_model_acc, '-r^', label='Base model')\n # \"-gD\" is for solid green line with diamond markers.\n ax.plot(graph_reg_model_acc, '-gD', label='Graph-regularized model')\n ax.set_title(model_tag)\n ax.set_xlabel('Supervision ratio')\n ax.set_ylabel('Accuracy(%)')\n ax.set_ylim((25, 100))\n ax.set_xticks(range(len(supervision_ratios)))\n ax.set_xticklabels(supervision_ratios)\n ax.legend(loc='best')\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"It can be observed that as the superivision ratio decreases, model accuracy also\ndecreases. This is true for both the base model and for the graph-regularized\nmodel, regardless of the model architecture used. However, notice that the\ngraph-regularized model performs better than the base model for both the\narchitectures. In particular, for the Bi-LSTM model, when the supervision ratio\nis 0.01, the accuracy of the graph-regularized model is **~20%** higher than\nthat of the base model. This is primarily because of semi-supervised learning\nfor the graph-regularized model, where structural similarity among training\nsamples is used in addition to the training samples themselves.",
"_____no_output_____"
],
[
"## Conclusion\n\nWe have demonstrated the use of graph regularization using the Neural Structured\nLearning (NSL) framework even when the input does not contain an explicit graph.\nWe considered the task of sentiment classification of IMDB movie reviews for\nwhich we synthesized a similarity graph based on review embeddings. We encourage\nusers to experiment further by varying hyperparameters, the amount of\nsupervision, and by using different model architectures.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e74036e8a1a3ec05e4473d56419905ead68926e9 | 112,661 | ipynb | Jupyter Notebook | Module5/.ipynb_checkpoints/Module5 - Lab6-checkpoint.ipynb | 3point14thon/DAT210x-master | 22f24aa0ea462db1fcaeaa672c2fef103b986eae | [
"MIT"
] | null | null | null | Module5/.ipynb_checkpoints/Module5 - Lab6-checkpoint.ipynb | 3point14thon/DAT210x-master | 22f24aa0ea462db1fcaeaa672c2fef103b986eae | [
"MIT"
] | null | null | null | Module5/.ipynb_checkpoints/Module5 - Lab6-checkpoint.ipynb | 3point14thon/DAT210x-master | 22f24aa0ea462db1fcaeaa672c2fef103b986eae | [
"MIT"
] | null | null | null | 251.475446 | 97,626 | 0.903294 | [
[
[
"# DAT210x - Programming with Python for DS",
"_____no_output_____"
],
[
"## Module5- Lab6",
"_____no_output_____"
]
],
[
[
"import random, math\nimport pandas as pd\nimport numpy as np\nimport scipy.io\nfrom sklearn.model_selection import train_test_split\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom sklearn import manifold \nfrom sklearn.neighbors import KNeighborsClassifier\n\nplt.style.use('ggplot') # Look Pretty\n\n\n# Leave this alone until indicated:\nTest_PCA = False",
"_____no_output_____"
]
],
[
[
"### A Convenience Function",
"_____no_output_____"
],
[
"This method is for your visualization convenience only. You aren't expected to know how to put this together yourself, although you should be able to follow the code by now:",
"_____no_output_____"
]
],
[
[
"def Plot2DBoundary(DTrain, LTrain, DTest, LTest):\n # The dots are training samples (img not drawn), and the pics are testing samples (images drawn)\n # Play around with the K values. This is very controlled dataset so it should be able to get perfect classification on testing entries\n # Play with the K for isomap, play with the K for neighbors. \n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title('Transformed Boundary, Image Space -> 2D')\n\n padding = 0.1 # Zoom out\n resolution = 1 # Don't get too detailed; smaller values (finer rez) will take longer to compute\n colors = ['blue','green','orange','red']\n\n\n # ------\n\n # Calculate the boundaries of the mesh grid. The mesh grid is\n # a standard grid (think graph paper), where each point will be\n # sent to the classifier (KNeighbors) to predict what class it\n # belongs to. This is why KNeighbors has to be trained against\n # 2D data, so we can produce this countour. Once we have the \n # label for each point on the grid, we can color it appropriately\n # and plot it.\n x_min, x_max = DTrain[:, 0].min(), DTrain[:, 0].max()\n y_min, y_max = DTrain[:, 1].min(), DTrain[:, 1].max()\n x_range = x_max - x_min\n y_range = y_max - y_min\n x_min -= x_range * padding\n y_min -= y_range * padding\n x_max += x_range * padding\n y_max += y_range * padding\n\n # Using the boundaries, actually make the 2D Grid Matrix:\n xx, yy = np.meshgrid(np.arange(x_min, x_max, resolution),\n np.arange(y_min, y_max, resolution))\n\n # What class does the classifier say about each spot on the chart?\n # The values stored in the matrix are the predictions of the model\n # at said location:\n Z = model.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n\n # Plot the mesh grid as a filled contour plot:\n plt.contourf(xx, yy, Z, cmap=plt.cm.terrain, z=-100)\n\n\n # ------\n\n # When plotting the testing images, used to validate if the algorithm\n # is functioning correctly, size them as 5% of the overall chart size\n x_size = x_range * 0.05\n y_size = y_range * 0.05\n\n # First, plot the images in your TEST dataset\n img_num = 0\n for index in LTest.index:\n # DTest is a regular NDArray, so you'll iterate over that 1 at a time.\n x0, y0 = DTest[img_num,0]-x_size/2., DTest[img_num,1]-y_size/2.\n x1, y1 = DTest[img_num,0]+x_size/2., DTest[img_num,1]+y_size/2.\n\n # DTest = our images isomap-transformed into 2D. But we still want\n # to plot the original image, so we look to the original, untouched\n # dataset (at index) to get the pixels:\n img = df.iloc[index,:].reshape(num_pixels, num_pixels)\n ax.imshow(img,\n aspect='auto',\n cmap=plt.cm.gray,\n interpolation='nearest',\n zorder=100000,\n extent=(x0, x1, y0, y1),\n alpha=0.8)\n img_num += 1\n\n\n # Plot your TRAINING points as well... as points rather than as images\n for label in range(len(np.unique(LTrain))):\n indices = np.where(LTrain == label)\n ax.scatter(DTrain[indices, 0], DTrain[indices, 1], c=colors[label], alpha=0.8, marker='o')\n\n # Plot\n plt.show() ",
"_____no_output_____"
]
],
[
[
"### The Assignment",
"_____no_output_____"
],
[
"Use the same code from Module4/assignment4.ipynb to load up the `face_data.mat` file into a dataframe called `df`. Be sure to calculate the `num_pixels` value, and to rotate the images to being right-side-up instead of sideways. This was demonstrated in the [Lab Assignment 4](https://github.com/authman/DAT210x/blob/master/Module4/assignment4.ipynb) code.",
"_____no_output_____"
]
],
[
[
"mat = scipy.io.loadmat('Datasets/face_data.mat')\ndf = pd.DataFrame(mat['images']).T\nnum_images, num_pixels = df.shape\nnum_pixels = int(math.sqrt(num_pixels))\n\n# Rotate the pictures, so we don't have to crane our necks:\nfor i in range(num_images):\n df.loc[i,:] = df.loc[i,:].values.reshape(num_pixels, num_pixels).T.reshape(-1)",
"_____no_output_____"
]
],
[
[
"Load up your face_labels dataset. It only has a single column, and you're only interested in that single column. You will have to slice the column out so that you have access to it as a \"Series\" rather than as a \"Dataframe\". This was discussed in the the \"Slicin'\" lecture of the \"Manipulating Data\" reading on the course website. Use an appropriate indexer to take care of that. Be sure to print out the labels and compare what you see to the raw `face_labels.csv` so you know you loaded it correctly.",
"_____no_output_____"
]
],
[
[
"y = pd.read_csv('Datasets/face_labels.csv',header=None)\ny = y.iloc[:,0]",
"_____no_output_____"
]
],
[
[
"Do `train_test_split`. Use the same code as on the EdX platform in the reading material, but set the random_state=7 for reproducibility, and the test_size to 0.15 (150%). Your labels are actually passed in as a series (instead of as an NDArray) so that you can access their underlying indices later on. This is necessary so you can find your samples in the original dataframe. The convenience methods we've written for you that handle drawing expect this, so that they can plot your testing data as images rather than as points:",
"_____no_output_____"
]
],
[
[
"x_train, x_test, y_train, y_test = train_test_split(df, y, test_size=.2, random_state=7)",
"_____no_output_____"
]
],
[
[
"### Dimensionality Reduction",
"_____no_output_____"
]
],
[
[
"if Test_PCA:\n # INFO: PCA is used *before* KNeighbors to simplify your high dimensionality\n # image samples down to just 2 principal components! A lot of information\n # (variance) is lost during the process, as I'm sure you can imagine. But\n # you have to drop the dimension down to two, otherwise you wouldn't be able\n # to visualize a 2D decision surface / boundary. In the wild, you'd probably\n # leave in a lot more dimensions, which is better for higher accuracy, but\n # worse for visualizing the decision boundary;\n #\n # Your model should only be trained (fit) against the training data (data_train)\n # Once you've done this, you need use the model to transform both data_train\n # and data_test from their original high-D image feature space, down to 2D\n\n\n # TODO: Implement PCA here. ONLY train against your training data, but\n # transform both your training + test data, storing the results back into\n # data_train, and data_test.\n \n model = PCA(n_components=2)\n model.fit(x_train)\n x_train = model.transform(x_train)\n x_test = model.transform(x_test)\n\nelse:\n # INFO: Isomap is used *before* KNeighbors to simplify your high dimensionality\n # image samples down to just 2 components! A lot of information has been is\n # lost during the process, as I'm sure you can imagine. But if you have\n # non-linear data that can be represented on a 2D manifold, you probably will\n # be left with a far superior dataset to use for classification. Plus by\n # having the images in 2D space, you can plot them as well as visualize a 2D\n # decision surface / boundary. In the wild, you'd probably leave in a lot more\n # dimensions, which is better for higher accuracy, but worse for visualizing the\n # decision boundary;\n \n # Your model should only be trained (fit) against the training data (data_train)\n # Once you've done this, you need use the model to transform both data_train\n # and data_test from their original high-D image feature space, down to 2D\n\n \n # TODO: Implement Isomap here. ONLY train against your training data, but\n # transform both your training + test data, storing the results back into\n # data_train, and data_test.\n \n iso = manifold.Isomap(n_neighbors=5, n_components=2)\n iso.fit(x_train)\n x_train = iso.transform(x_train)\n x_test = iso.transform(x_test)",
"_____no_output_____"
]
],
[
[
"Implement `KNeighborsClassifier` here. You can use any K value from 1 through 20, so play around with it and attempt to get good accuracy. Fit the classifier against your training data and labels.",
"_____no_output_____"
]
],
[
[
"model = KNeighborsClassifier(n_neighbors=5)\nmodel.fit(x_train, y_train)",
"_____no_output_____"
]
],
[
[
"Calculate and display the accuracy of the testing set (data_test and label_test):",
"_____no_output_____"
]
],
[
[
"score = model.score(x_test,y_test)\nprint(score)",
"0.964285714286\n"
]
],
[
[
"Let's chart the combined decision boundary, the training data as 2D plots, and the testing data as small images so we can visually validate performance:",
"_____no_output_____"
]
],
[
[
"Plot2DBoundary(x_train, y_train, x_test, y_test)",
"C:\\Users\\Chris\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:64: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead\n"
]
],
[
[
"After submitting your answers, experiment with using using PCA instead of ISOMap. Are the results what you expected? Also try tinkering around with the test/train split percentage from 10-20%. Notice anything?",
"_____no_output_____"
]
],
[
[
"# .. your code changes above ..",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74061079dc2b2f75edb4d02716cb0b1439f2c73 | 287,848 | ipynb | Jupyter Notebook | 04 - Training a classifier/Working with CIFAR10 dataset.ipynb | victor-iyi/pytorch-examples | 74ad62e385054c9581c611b6ebae164838599317 | [
"MIT"
] | 1 | 2021-11-02T01:00:33.000Z | 2021-11-02T01:00:33.000Z | 04 - Training a classifier/Working with CIFAR10 dataset.ipynb | victor-iyi/pytorch-examples | 74ad62e385054c9581c611b6ebae164838599317 | [
"MIT"
] | null | null | null | 04 - Training a classifier/Working with CIFAR10 dataset.ipynb | victor-iyi/pytorch-examples | 74ad62e385054c9581c611b6ebae164838599317 | [
"MIT"
] | null | null | null | 420.830409 | 94,988 | 0.936063 | [
[
[
"# Working With The CIFAR10 Dataset",
"_____no_output_____"
],
[
"This is it. You've seen how to define a simple convolutional neural network, compute loss w.r.t. the graph Variables, and make gradient updates manually and with `torch.nn.optim` package.\nNow you might be thinking:\n\n### What about the data?\n\nGenerally, when you have to deal with image, text, audio or video data, you can use standard python packages that load data into a numpy array. Then you can convert this array into a `torch.*Tensor`.\n\n- For images, packages such as `Pillow`, `OpenCV` are useful.\n- For audio, packages such as `scipy` and `librosa`.\n- For text, either raw Python or Cython based loading, or `NLTK` and `SpaCy` are useful.\n\nSpecifically for [Computer vision](), the creators of pytorch have generously created a package called `torchvision`, that has data loaders for common datasets such as Imagenet, CIFAR10, MNIST, etc. and data transformers for images, viz., `torchvision.datasets` and `torch.utils.data.DataLoader`. This provides a huge convinence from writing boiler plate code.\n\nWe will use the **CIFAR10 dataset**. It has the classes: *‘airplane’, ‘automobile’, ‘bird’, ‘cat’, ‘deer’, ‘dog’, ‘frog’, ‘horse’, ‘ship’, ‘truck’*. The images in CIFAR-10 are of size `3x32x32`, i.e. 3-channel color images of `32x32` pixels in size.\n\n\n\n### Training an image classifier\n\nWe will do the following steps in order:\n\n1. Load and normalizing the CIFAR10 training and test datasets using `torchvision`.\n2. Define a Convolution Neural Network.\n3. Define a loss function.\n4. Train the network on the training data.\n5. Test the network on the test data.\n\n#### 1. Loading and normalizing CIFAR10\nUsing `torchvision`, it’s extremely easy to load CIFAR10.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n\n# file manipulation\nimport os.path\n\n# arrays and visualization\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# pytorch imports\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n# Special package provided by pytorch\nimport torchvision\nimport torchvision.transforms as transforms",
"_____no_output_____"
]
],
[
[
"Let's define some *Hyperparameters* we're gonna need later on.",
"_____no_output_____"
]
],
[
[
"## Hyperparameters.\n\n\n# image channel 3=RGB, 1=Grayscale\nimg_channels = 3\n\n# Class labels.\nclasses = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\nnum_classes = len(classes)\n\n# Data directory.\ndata_dir = '../datasets' # Dataset directory.\ndownload = True # Download dataset iff not already downloaded.\nnormalize = True # Maybe normalize training data.\n\n# Training parameters\nbatch_size = 16 # Mini-batch size.\nlr = 1e-2 # Optimizer's learning rate.\nepochs = 5 # Number of full passes over entire dataset.",
"_____no_output_____"
]
],
[
[
"The output of the `torchvision` dataset are PILImage images of range [0, 1]. We transform them to Tensors of normalized range [-1, 1].\n\nDefine the data directory, i.e. where the data should be downloaded to. With the use of `os.path` module.\n\n**NOTE:** `data_dir` could be modified to fit your use.",
"_____no_output_____"
]
],
[
[
"# Should normalize images or not.\n# Normalization helps convergence.\nif normalize:\n # Transform rule: Convert to Tensor, Normalize images in range -1 to 1.\n transform = transforms.Compose([transforms.ToTensor(), \n transforms.Normalize((0.5, 0.5, 0.5), \n (0.5, 0.5, 0.5))])\nelse:\n # Transform rule: Convert to Tensor without normalizing image\n transform = transforms.Compose([transforms.ToTensor()])\n\n# Download the training set and apply the transform rule to each.\ntrainset = torchvision.datasets.CIFAR10(root=data_dir, train=True, download=download, transform=transform)\n# Load the training set into mini-batches and shuffle them\ntrainset = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)\n\n# Download the testing set and apply the transform rule to each.\ntestset = torchvision.datasets.CIFAR10(root=data_dir, train=False, download=download, transform=transform)\n# Load the testing set into mini-batches and shuffle them as well.\ntestset = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=True, num_workers=2)",
"Files already downloaded and verified\nFiles already downloaded and verified\n"
],
[
"# Helper function to plot images and labels\ndef imshow(images, labels, pred=None, smooth=True):\n images = images / 2 + 0.5 if normalize else images\n # Create figure with sub-plots.\n fig, axes = plt.subplots(4, 4)\n\n # Adjust vertical spacing if we need to print ensemble and best-net.\n wspace, hspace = 0.2, 0.8 if pred is not None else 0.4\n fig.subplots_adjust(hspace=hspace, wspace=wspace)\n\n for i, ax in enumerate(axes.flat):\n # Interpolation type.\n smooth = 'spline16' if smooth else 'nearest'\n\n # Plot image.\n ax.imshow(np.transpose(images[i], (1, 2, 0)), interpolation=smooth)\n \n # Name of the true class.\n labels_name = classes[labels[i]]\n\n # Show true and predicted classes.\n if pred is None:\n xlabel = f'True: {labels_name}'\n else:\n # Name of the predicted class.\n pred_name = classes[pred[i]]\n \n xlabel = f'True: {labels_name}\\nPred: {pred_name}'\n\n # Show the classes as the label on the x-axis.\n ax.set_xlabel(xlabel)\n \n # Remove ticks from the plot.\n ax.set_xticks([])\n ax.set_yticks([])\n \n # Ensure the plot is shown correctly with multiple plots\n # in a single Notebook cell.\n plt.show()\n\n\n# Visualization function to visualize dataset.\ndef visualize(data, smooth=False):\n # Iterate over the data.\n data_iter = iter(data)\n \n # Unpack images and labels.\n images, labels = data_iter.next()\n \n # Free up memory\n del data_iter\n \n # Call to helper function for plotting images.\n imshow(images, labels=labels, smooth=smooth)\n\n\n\n# Let's visualize some training set.\nvisualize(trainset)",
"_____no_output_____"
]
],
[
[
"### 2. Define a Convolution Neural Network\n\nIt's time to define our neural network. You've already seen how to define a simple convolutional neural network in the last section. But this time, instead of a single color channel, we have 3-color channels, because the CIFAR10 dataset contains colored images.",
"_____no_output_____"
]
],
[
[
"class Network(nn.Module):\n \n def __init__(self, **kwargs):\n super(Network, self).__init__()\n \n # Hyper-parameters\n self._img_channels = kwargs.get('img_channels')\n self._num_classes = kwargs.get('num_classes')\n \n # 2 convolutional & 3 fully connected layers\n self.conv1 = nn.Conv2d(self._img_channels, 16, 2)\n self.conv2 = nn.Conv2d(16, 32, 2)\n flatten_size = self.conv2.out_channels * 7 * 7\n self.fc1 = nn.Linear(flatten_size, 128)\n self.fc2 = nn.Linear(128, 64)\n self.fc3 = nn.Linear(64, self._num_classes)\n \n def forward(self, x):\n # Convolutional layers\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2(x), 2))\n # Flatten layer\n x = x.view(-1, self._flatten(x))\n # Fully connected layers\n x = F.relu(self.fc1(x)) # relu + linear\n x = F.dropout(x, p=0.2) # 20% dropout\n x = F.relu(self.fc2(x)) # relu + linear\n # Output layer\n x = self.fc3(x) # linear\n return x\n \n def _flatten(self, x):\n size = x.size()[1:] # input shape excluding batch dim.\n return torch.Tensor(size).numel()",
"_____no_output_____"
],
[
"# Instantiate the network and pass in our parameters.\nnet = Network(img_channels=img_channels, num_classes=len(classes))",
"_____no_output_____"
]
],
[
[
"### 3. Define a Loss function and optimizer\n\nLet’s use a Classification Cross-Entropy loss and Adam optimizer.",
"_____no_output_____"
]
],
[
[
"# Loss function criterion\nloss_func = nn.CrossEntropyLoss()\n# Adam optimizer\noptimizer = optim.Adam(net.parameters(), lr=lr)",
"_____no_output_____"
]
],
[
[
"### 4. Train the Network\n\nThis is when things start to get interesting. We simply have to loop over our data iterator, and feed the inputs to the network and optimize it.",
"_____no_output_____"
]
],
[
[
"# Loop over the data multiple times.\nfor epoch in range(epochs):\n\n # Loop through the training dataset (batch by batch).\n for i, data in enumerate(trainset):\n \n # Get the inputs and labels.\n inputs, labels = data\n \n # Wrap them in Variable (explained in section 2).\n inputs, labels = Variable(inputs), Variable(labels)\n \n # Zero the optimizer gradient buffer\n # to prevent gradient accumulation.\n optimizer.zero_grad()\n \n # Forward and backward propagation.\n outputs = net(inputs)\n loss = loss_func(outputs, labels)\n loss.backward()\n \n # Update learnable parameters w.r.t the loss.\n optimizer.step()\n \n # Print statistics.\n print(f'\\rEpoch: {epoch+1:,}\\tIter: {i+1:,}\\tLoss: {loss.data[0]:.4f}', end='')\n\n # Line break.\n print()\n\n\nprint('\\nFinished training!')",
"Epoch: 1\tIter: 3,125\tLoss: 1.7392\nEpoch: 2\tIter: 3,125\tLoss: 1.8330\nEpoch: 3\tIter: 3,125\tLoss: 2.0041\nEpoch: 4\tIter: 3,125\tLoss: 1.7069\nEpoch: 5\tIter: 3,125\tLoss: 1.5338\n\nFinished training!\n"
]
],
[
[
"### 5. Test the network on the test data\n\nWe have trained the network for 5 epochs (passes over the training data). Let's check if the network has learnt anything.\n\nHow we check this is by comparing the ground-truth labels over the one the network predicted. We'll keep track of the ones predicted correctly by creating a list, and appending to the list if the prediction was the same as the ground-truth.\n\nAlright, that been said, let's familarize ourselves with the data one more time by plotting a few from the `testset`.",
"_____no_output_____"
]
],
[
[
"# Look at some test data.\nvisualize(testset)",
"_____no_output_____"
]
],
[
[
"Okay, now let's make some predictions with our network.",
"_____no_output_____"
]
],
[
[
"# Let's make some predictions on the testset.\ntest_iter = iter(testset)\nimages, labels = test_iter.next()\n\n# Convert images to `autograd.Variable` \n# before passing through the network.\noutput = net(Variable(images))",
"_____no_output_____"
]
],
[
[
"The outputs are \"energies\" for the 10 classes. *Higher the energy for a class, the more the network thinks that the image is of the particular class*. So, let’s get the index of the highest energy:",
"_____no_output_____"
]
],
[
[
"# torch.max returns a tuple: (value, index)\n# Take the argmax of the predicted output.\n_, predictions = torch.max(output.data, dim=1)\n\n# Visualize the predictions.\nimshow(images, labels=labels, pred=predictions, smooth=True)",
"_____no_output_____"
]
],
[
[
"Maybe not so bad, huh? Let's see how the result performs on the entire dataset.",
"_____no_output_____"
]
],
[
[
"# Keep track of correct prediction and total.\ncorrect, total = 0, 0\n\n# Looping through the testset.\nfor data in testset:\n \n # Unpack the images and labels\n # from each mini-batch.\n images, labels = data\n \n # Pass image through the network\n # to make predictions.\n outputs = net(Variable(images))\n \n # Pretrieve the index with maximum score.\n _, pred = torch.max(outputs.data, dim=1)\n \n # Get the batch size and add on to the total.\n total += labels.size(0)\n \n # Count the number of correct predictions.\n # pred == outputs means where the predictions\n # equals to the ground-truth.\n correct += (pred == labels).sum()\n\n\n# Print the accuracy on the testset.\nprint('Accuracy on testset = {:.2%}'.format(correct/total))",
"Accuracy on testset = 46.46%\n"
]
],
[
[
"Well, that's slightly better than random guessing, which is 10% accuracy (randomly picking a class out of 10 classes). Seems like the network learnt something.\n\nHmmm, what are the classes that performed well, and the classes that did not perform well:",
"_____no_output_____"
]
],
[
[
"# Each index in the `correct_class` stores\n# the correct classification for that class;\n# while `total_class` stores the total number\n# of times we go through the class.\ncorrect_class = torch.zeros(10)\ntotal_class = torch.zeros(10)\n\n# Loop through all dataset\n# one batch at a time.\nfor data in testset:\n # Get the current batch images and labels\n images, labels = data\n \n # Pass the images through the network\n outputs = net(Variable(images))\n \n # Take the index of the maximum scores\n # returned by the network.\n _, pred = torch.max(outputs.data, dim=1)\n \n # Where the pred equals the labels will\n # return 1; and 0 otherwise.\n correct = (pred == labels).squeeze()\n \n # Loop through the batch labels\n for i, label in enumerate(labels):\n # Add on the correct predictions\n # and total for the current label.\n correct_class[label] += correct[i]\n total_class[label] += 1\n\n\n# Calculate accuracy and sort in descending order\naccuracy = correct_class / total_class\naccuracy, _ = torch.sort(accuracy, descending=True)\n\nfor i, acc in enumerate(accuracy):\n print(f'Accuracy of {classes[i]} \\t = {acc:.2%}')",
"Accuracy of plane \t = 73.20%\nAccuracy of car \t = 62.40%\nAccuracy of bird \t = 61.30%\nAccuracy of cat \t = 58.40%\nAccuracy of deer \t = 49.80%\nAccuracy of dog \t = 47.10%\nAccuracy of frog \t = 35.60%\nAccuracy of horse \t = 31.50%\nAccuracy of ship \t = 23.50%\nAccuracy of truck \t = 21.80%\n"
]
],
[
[
"Okay, so what next?\n\nHow do we run these neural networks on the GPU?\n\n### Training on GPU\n\nJust like how you transfer a Tensor on to the GPU, you transfer the neural net onto the GPU. This will recursively go over all modules and convert their parameters and buffers to CUDA tensors:\n\n```python\nnet.cuda()\n```\nRemember that you will have to send the inputs and targets at every step to the GPU too:\n\n```python\ninputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())\n```\n\nWhy don't I notice **MASSIVE** speedup compared to CPU? Because your network is really small.\n\n**Exercise:** Try increasing the width of your network (argument 2 of the first `nn.Conv2d`, and argument 1 of the second `nn.Conv2d` – they need to be the same number), see what kind of speedup you get.\n\n### Goals achieved:\n\n- Understanding PyTorch’s Tensor library and neural networks at a high level.\n- Train a small neural network to classify images\n\n### Training on multiple GPUs\n\nIf you want to see even more MASSIVE speedup using all of your GPUs, please check out Optional: \n[Data Parallelism](../).\n\n\n### Where do I go next?\n\n- Train neural nets to play video games\n- Train a state-of-the-art ResNet network on imagenet\n- Train a face generator using Generative Adversarial Networks\n- Train a word-level language model using Recurrent LSTM networks\n- [More examples](https://github.com/pytorch/examples)\n- [More tutorials](https://github.com/pytorch/tutorials)\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7406392934a38b7f6be190d7c6a5937a2faf678 | 233,352 | ipynb | Jupyter Notebook | bike rental prediction/Binesh Kumar's First Neural Network.ipynb | TechieBIN/graphdef | 03f4790e98a3c2e45d9161db42de5e3afd1fcda6 | [
"MIT"
] | null | null | null | bike rental prediction/Binesh Kumar's First Neural Network.ipynb | TechieBIN/graphdef | 03f4790e98a3c2e45d9161db42de5e3afd1fcda6 | [
"MIT"
] | 1 | 2021-06-01T23:20:59.000Z | 2021-06-01T23:20:59.000Z | bike rental prediction/Binesh Kumar's First Neural Network.ipynb | thedatasense/graphdef | 03f4790e98a3c2e45d9161db42de5e3afd1fcda6 | [
"MIT"
] | null | null | null | 236.905584 | 165,612 | 0.861364 | [
[
[
"# Your first neural network\n\nIn this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.\n\n",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"## Load and prepare the data\n\nA critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!",
"_____no_output_____"
]
],
[
[
"data_path = 'Bike-Sharing-Dataset/hour.csv'\n\nrides = pd.read_csv(data_path)",
"_____no_output_____"
],
[
"rides.head()",
"_____no_output_____"
]
],
[
[
"## Checking out the data\n\nThis dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the `cnt` column. You can see the first few rows of the data above.\n\nBelow is a plot showing the number of bike riders over the first 10 days or so in the data set. (Some days don't have exactly 24 entries in the data set, so it's not exactly 10 days.) You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.",
"_____no_output_____"
]
],
[
[
"rides[:24*10].plot(x='dteday', y='cnt')",
"_____no_output_____"
]
],
[
[
"### Dummy variables\nHere we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to `get_dummies()`.",
"_____no_output_____"
]
],
[
[
"dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']\nfor each in dummy_fields:\n dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)\n rides = pd.concat([rides, dummies], axis=1)\n\nfields_to_drop = ['instant', 'dteday', 'season', 'weathersit', \n 'weekday', 'atemp', 'mnth', 'workingday', 'hr']\ndata = rides.drop(fields_to_drop, axis=1)\ndata.head()",
"_____no_output_____"
]
],
[
[
"### Scaling target variables\nTo make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.\n\nThe scaling factors are saved so we can go backwards when we use the network for predictions.",
"_____no_output_____"
]
],
[
[
"quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']\n# Store scalings in a dictionary so we can convert back later\nscaled_features = {}\nfor each in quant_features:\n mean, std = data[each].mean(), data[each].std()\n scaled_features[each] = [mean, std]\n data.loc[:, each] = (data[each] - mean)/std",
"_____no_output_____"
]
],
[
[
"### Splitting the data into training, testing, and validation sets\n\nWe'll save the data for the last approximately 21 days to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.",
"_____no_output_____"
]
],
[
[
"# Save data for approximately the last 21 days \ntest_data = data[-21*24:]\n\n# Now remove the test data from the data set \ndata = data[:-21*24]\n\n# Separate the data into features and targets\ntarget_fields = ['cnt', 'casual', 'registered']\nfeatures, targets = data.drop(target_fields, axis=1), data[target_fields]\ntest_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]",
"_____no_output_____"
]
],
[
[
"We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).",
"_____no_output_____"
]
],
[
[
"# Hold out the last 60 days or so of the remaining data as a validation set\ntrain_features, train_targets = features[:-60*24], targets[:-60*24]\nval_features, val_targets = features[-60*24:], targets[-60*24:]",
"_____no_output_____"
]
],
[
[
"## Time to build the network\n\nBelow you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.\n\n<img src=\"assets/neural_network.png\" width=300px>\n\nThe network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called *forward propagation*.\n\nWe use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called *backpropagation*.\n\n> **Hint:** You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.\n\nBelow, you have these tasks:\n1. Implement the sigmoid function to use as the activation function. Set `self.activation_function` in `__init__` to your sigmoid function.\n2. Implement the forward pass in the `train` method.\n3. Implement the backpropagation algorithm in the `train` method, including calculating the output error.\n4. Implement the forward pass in the `run` method.\n ",
"_____no_output_____"
]
],
[
[
"class NeuralNetwork(object):\n def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # Set number of nodes in input, hidden and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Initialize weights\n self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5, \n (self.input_nodes, self.hidden_nodes))\n\n self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5, \n (self.hidden_nodes, self.output_nodes))\n self.lr = learning_rate\n \n #### TODO: Set self.activation_function to your implemented sigmoid function ####\n #\n # Note: in Python, you can define a function with a lambda expression,\n # as shown below.\n self.activation_function = lambda x : 1 / (1 + np.exp(-x)) # Replace 0 with your sigmoid calculation.\n \n ### If the lambda code above is not something you're familiar with,\n # You can uncomment out the following three lines and put your \n # implementation there instead.\n #\n #def sigmoid(x):\n # return 0 # Replace 0 with your sigmoid calculation here\n #self.activation_function = sigmoid\n \n \n def train(self, features, targets):\n ''' Train the network on batch of features and targets. \n \n Arguments\n ---------\n \n features: 2D array, each row is one data record, each column is a feature\n targets: 1D array of target values\n \n '''\n n_records = features.shape[0]\n delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)\n delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)\n for X, y in zip(features, targets):\n #### Implement the forward pass here ####\n ### Forward pass ###\n # TODO: Hidden layer - Replace these values with your calculations.\n hidden_inputs = np.dot(X,self.weights_input_to_hidden) # signals into hidden layer\n hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer\n\n # TODO: Output layer - Replace these values with your calculations.\n final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output) # signals into final output layer\n final_outputs = final_inputs\n \n #### Implement the backward pass here ####\n ### Backward pass ###\n\n # TODO: Output error - Replace this value with your calculations.\n error = y - final_outputs # Output layer error is the difference between desired target and actual output.\n output_error_term = error\n # TODO: Calculate the hidden layer's contribution to the error\n hidden_error = np.dot(output_error_term,self.weights_hidden_to_output.T)\n \n # TODO: Backpropagated error terms - Replace these values with your calculations.\n \n hidden_error_term = hidden_error * (hidden_outputs * (1 - hidden_outputs))\n\n # Weight step (input to hidden)\n delta_weights_i_h += hidden_error_term * X[:,None]\n # Weight step (hidden to output)\n delta_weights_h_o += output_error_term * hidden_outputs[:,None]\n\n # TODO: Update the weights - Replace these values with your calculations.\n self.weights_hidden_to_output += self.lr * (delta_weights_h_o / n_records) # update hidden-to-output weights with gradient descent step\n self.weights_input_to_hidden += self.lr * (delta_weights_i_h / n_records) # update input-to-hidden weights with gradient descent step\n \n def run(self, features):\n ''' Run a forward pass through the network with input features \n \n Arguments\n ---------\n features: 1D array of feature values\n '''\n \n #### Implement the forward pass here ####\n # TODO: Hidden layer - replace these values with the appropriate calculations.\n hidden_inputs = (np.dot(features,self.weights_input_to_hidden)) # signals into hidden layer\n hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer\n \n # TODO: Output layer - Replace these values with the appropriate calculations.\n final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output) # signals into final output layer\n final_outputs = final_inputs # signals from final output layer \n \n return final_outputs",
"_____no_output_____"
],
[
"def MSE(y, Y):\n return np.mean((y-Y)**2)",
"_____no_output_____"
]
],
[
[
"## Unit tests\n\nRun these unit tests to check the correctness of your network implementation. This will help you be sure your network was implemented correctly befor you starting trying to train it. These tests must all be successful to pass the project.",
"_____no_output_____"
]
],
[
[
"import unittest\n\ninputs = np.array([[0.5, -0.2, 0.1]])\ntargets = np.array([[0.4]])\ntest_w_i_h = np.array([[0.1, -0.2],\n [0.4, 0.5],\n [-0.3, 0.2]])\ntest_w_h_o = np.array([[0.3],\n [-0.1]])\n\nclass TestMethods(unittest.TestCase):\n \n ##########\n # Unit tests for data loading\n ##########\n \n def test_data_path(self):\n # Test that file path to dataset has been unaltered\n self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')\n \n def test_data_loaded(self):\n # Test that data frame loaded\n self.assertTrue(isinstance(rides, pd.DataFrame))\n \n ##########\n # Unit tests for network functionality\n ##########\n\n def test_activation(self):\n network = NeuralNetwork(3, 2, 1, 0.5)\n # Test that the activation function is a sigmoid\n self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))\n\n def test_train(self):\n # Test that weights are updated correctly on training\n network = NeuralNetwork(3, 2, 1, 0.5)\n network.weights_input_to_hidden = test_w_i_h.copy()\n network.weights_hidden_to_output = test_w_h_o.copy()\n \n network.train(inputs, targets)\n self.assertTrue(np.allclose(network.weights_hidden_to_output, \n np.array([[ 0.37275328], \n [-0.03172939]])))\n self.assertTrue(np.allclose(network.weights_input_to_hidden,\n np.array([[ 0.10562014, -0.20185996], \n [0.39775194, 0.50074398], \n [-0.29887597, 0.19962801]])))\n\n def test_run(self):\n # Test correctness of run method\n network = NeuralNetwork(3, 2, 1, 0.5)\n network.weights_input_to_hidden = test_w_i_h.copy()\n network.weights_hidden_to_output = test_w_h_o.copy()\n\n self.assertTrue(np.allclose(network.run(inputs), 0.09998924))\n\nsuite = unittest.TestLoader().loadTestsFromModule(TestMethods())\nunittest.TextTestRunner().run(suite)",
".....\n----------------------------------------------------------------------\nRan 5 tests in 0.007s\n\nOK\n"
]
],
[
[
"## Training the network\n\nHere you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.\n\nYou'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.\n\n### Choose the number of iterations\nThis is the number of batches of samples from the training data we'll use to train the network. The more iterations you use, the better the model will fit the data. However, if you use too many iterations, then the model with not generalize well to other data, this is called overfitting. You want to find a number here where the network has a low training loss, and the validation loss is at a minimum. As you start overfitting, you'll see the training loss continue to decrease while the validation loss starts to increase.\n\n### Choose the learning rate\nThis scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.\n\n### Choose the number of hidden nodes\nThe more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose.",
"_____no_output_____"
]
],
[
[
"import sys\n\n### Set the hyperparameters here ###\niterations = 4000\nlearning_rate = 0.785\nhidden_nodes = 16\noutput_nodes = 1\n\nN_i = train_features.shape[1]\nnetwork = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)\n\nlosses = {'train':[], 'validation':[]}\nfor ii in range(iterations):\n # Go through a random batch of 128 records from the training data set\n batch = np.random.choice(train_features.index, size=128)\n X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']\n \n network.train(X, y)\n \n # Printing out the training progress\n train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)\n val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)\n sys.stdout.write(\"\\rProgress: {:2.1f}\".format(100 * ii/float(iterations)) \\\n + \"% ... Training loss: \" + str(train_loss)[:5] \\\n + \" ... Validation loss: \" + str(val_loss)[:5])\n sys.stdout.flush()\n \n losses['train'].append(train_loss)\n losses['validation'].append(val_loss)",
"Progress: 100.0% ... Training loss: 0.058 ... Validation loss: 0.121"
],
[
"plt.plot(losses['train'], label='Training loss')\nplt.plot(losses['validation'], label='Validation loss')\nplt.legend()\n_ = plt.ylim()",
"_____no_output_____"
]
],
[
[
"## Check out your predictions\n\nHere, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(figsize=(8,4))\n\nmean, std = scaled_features['cnt']\npredictions = network.run(test_features).T*std + mean\nax.plot(predictions[0], label='Prediction')\nax.plot((test_targets['cnt']*std + mean).values, label='Data')\nax.set_xlim(right=len(predictions))\nax.legend()\n\ndates = pd.to_datetime(rides.ix[test_data.index]['dteday'])\ndates = dates.apply(lambda d: d.strftime('%b %d'))\nax.set_xticks(np.arange(len(dates))[12::24])\n_ = ax.set_xticklabels(dates[12::24], rotation=45)",
"_____no_output_____"
]
],
[
[
"## OPTIONAL: Thinking about your results(this question will not be evaluated in the rubric).\n \nAnswer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does?\n\n> **Note:** You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter\n\n#### Your answer below\n\nThe model did pretty good for the regular weeks. It couldn't accurately predict holiday weekend at the end of december. Adding holiday features would help the model make accurate predictions. \n\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7406983111aa84187854ed72c602bcd0831e769 | 17,911 | ipynb | Jupyter Notebook | notebooks/zcu104/Tensil TCU Demo - ResNet-20 CIFAR.ipynb | tensil-ai/tensil | 293de1ea42d567c2041e8733bab25f678f20433c | [
"Apache-2.0"
] | 134 | 2022-03-04T21:06:49.000Z | 2022-03-30T21:35:54.000Z | notebooks/zcu104/Tensil TCU Demo - ResNet-20 CIFAR.ipynb | tensil-ai/tensil | 293de1ea42d567c2041e8733bab25f678f20433c | [
"Apache-2.0"
] | 17 | 2022-03-04T19:33:16.000Z | 2022-03-28T03:14:09.000Z | notebooks/zcu104/Tensil TCU Demo - ResNet-20 CIFAR.ipynb | tensil-ai/tensil | 293de1ea42d567c2041e8733bab25f678f20433c | [
"Apache-2.0"
] | 5 | 2022-03-07T02:50:45.000Z | 2022-03-19T01:53:12.000Z | 63.514184 | 11,048 | 0.787784 | [
[
[
"# Tensil TCU Demo - ResNet-20 CIFAR",
"_____no_output_____"
],
[
"### Import the TCU driver",
"_____no_output_____"
]
],
[
[
"import sys\nsys.path.append('/home/xilinx')\n\n# Needed to run inference on TCU\nimport time\nimport numpy as np\nimport pynq\nfrom pynq import Overlay\nfrom tcu_pynq.driver import Driver\nfrom tcu_pynq.architecture import zcu104\n\n# Needed for unpacking and displaying image data\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport pickle",
"_____no_output_____"
]
],
[
[
"### Configure the fabric and driver",
"_____no_output_____"
]
],
[
[
"overlay = Overlay('/home/xilinx/tensil_zcu104.bit')\ntcu = Driver(zcu104, overlay.axi_dma_0)",
"_____no_output_____"
]
],
[
[
"### Read CIFAR-10 images",
"_____no_output_____"
]
],
[
[
"def unpickle(file):\n with open(file, 'rb') as fo:\n d = pickle.load(fo, encoding='bytes')\n return d\n\ncifar = unpickle('/home/xilinx/cifar-10-batches-py/test_batch')\ndata = cifar[b'data']\nlabels = cifar[b'labels']\n\ndata = data[10:20]\nlabels = labels[10:20]\n\ndata_norm = data.astype('float32') / 255\ndata_mean = np.mean(data_norm, axis=0)\ndata_norm -= data_mean\n\ncifar_meta = unpickle('/home/xilinx/cifar-10-batches-py/batches.meta')\nlabel_names = [b.decode() for b in cifar_meta[b'label_names']]\n\ndef show_img(data, n):\n plt.imshow(np.transpose(data[n].reshape((3, 32, 32)), axes=[1, 2, 0]))\n\ndef get_img(data, n):\n img = np.transpose(data_norm[n].reshape((3, 32, 32)), axes=[1, 2, 0])\n img = np.pad(img, [(0, 0), (0, 0), (0, tcu.arch.array_size - 3)], 'constant', constant_values=0)\n return img.reshape((-1, tcu.arch.array_size))\n\ndef get_label(labels, label_names, n):\n label_idx = labels[n]\n name = label_names[label_idx]\n return (label_idx, name)",
"_____no_output_____"
],
[
"# CIFAR-10 input\nn = 9\nimg = get_img(data, n)\nlabel_idx, label = get_label(labels, label_names, n)\nshow_img(data, n)",
"_____no_output_____"
]
],
[
[
"## Demo: ResNet-20 inference",
"_____no_output_____"
],
[
"### Load the model",
"_____no_output_____"
]
],
[
[
"tcu.load_model('/home/xilinx/resnet20v2_cifar_onnx_zcu104.tmodel')",
"_____no_output_____"
]
],
[
[
"### Run inference",
"_____no_output_____"
]
],
[
[
"inputs = {'x:0': img}\n\nstart = time.time()\noutputs = tcu.run(inputs)\nend = time.time()\nprint(\"Ran inference in {:.4}s\".format(end - start))\nprint()\n\nclasses = outputs['Identity:0'][:10]\nresult_idx = np.argmax(classes)\nresult = label_names[result_idx]\n\nprint(\"Output activations:\")\nprint(classes)\nprint()\nprint(\"Result: {} (idx = {})\".format(result, result_idx))\nprint(\"Actual: {} (idx = {})\".format(label, label_idx))",
"Ran inference in 0.01865s\n\nOutput activations:\n[-13.69140625 -12.359375 -7.90625 -6.30859375 -8.296875\n -12.2421875 15.17578125 -15.0390625 -10.5703125 -9.12109375]\n\nResult: frog (idx = 6)\nActual: frog (idx = 6)\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74079b953fe6c6d1dd54ab326c4fd88682e9a3b | 41,449 | ipynb | Jupyter Notebook | 01_numpy.ipynb | ejdecena/herramientas | 3b1432c50da01e9566a6ad0ecebd54287f028892 | [
"MIT"
] | 3 | 2019-09-16T16:58:21.000Z | 2019-12-16T18:30:26.000Z | 01_numpy.ipynb | ejdecena/herramientas | 3b1432c50da01e9566a6ad0ecebd54287f028892 | [
"MIT"
] | null | null | null | 01_numpy.ipynb | ejdecena/herramientas | 3b1432c50da01e9566a6ad0ecebd54287f028892 | [
"MIT"
] | 1 | 2019-11-13T22:24:14.000Z | 2019-11-13T22:24:14.000Z | 25.649134 | 956 | 0.546768 | [
[
[
"\n# Librería NumPy.\n***",
"_____no_output_____"
],
[
"[*NumPy*](https://docs.scipy.org/doc/numpy/user/index.html) (*Numerical Python*) es una librería para el cómputo científico. Esta librería contiene muchas funciones matemáticas que permiten realizar operaciones de álgebra lineal, manejar matrices y vectores, generar números pseudo-aleatorios, etc. De forma muy general, el computo científico se basa en operar con arreglos de números; a veces estos arreglos representan matrices y vectores, y las operaciones necesarias son fundamentalmente las del álgebra lineal. En otros casos, como en el análisis de datos, los arreglos de números no necesariamente (o no siempre) son vectores y matrices en estricto sentido matemático. Por ejemplo casi cualquier conjunto de datos puede ser pensado como un arreglo de números. Una imagen es un arreglo bidimensional de números donde cada número representa el brillo de un pixel. Un sonido es un arreglo unidimensional que representa intensidad versus tiempo.\n\n*NumPy* no forma parte de la instalación estándar de *Python*, así que debe instalarse por separado. Desde una cónsola o terminal, basta con teclear:\n\n```bash\npip install numpy\n```\n\n*NumPy* introduce objetos *`array`* que son similares a las listas en *Python*, pero que pueden ser manipulados por numerosas funciones contenidas en la librería. El tamaño de los arrays es **inmutable** y no se permiten elementos vacíos.\n\nEn definitiva, para el cómputo científico es necesario contar con formas eficientes de almacenar y manipular arreglos de números y *NumPy* ha sido diseñado para esta tarea. El código escrito en *NumPy* suele ser más corto que el código equivalente en *Python* puro. El uso de *loops* es reducido, ya que muchas operaciones se aplican directamente sobre arreglos (*arrays*). Esto se conoce como *vectorizar el código*, internamente los loops siguen estando presentes pero son ejecutados por rutinas optimizadas escritas en lenguajes como _C_ o *Fortran*. Además, *NumPy* provee de muchas funciones matemáticas/científicas listas para usar. Esto reduce la cantidad de código que debemos escribir reduciendo así las posibilidades de cometer errores, y lo más importante, es que las funciones de *NumPy* están escritas usando implementaciones eficientes y confiables.\n\nLa siguiente *hoja-resumen* condensa los principales comandos de *NumPy* que revisaremos en este *Notebook* y que luego podrá servir para consulta rápida.\n\n\n\nPara poder usar *NumPy* debemos importarlo, la forma más común de importar *NumPy* es la siguiente:",
"_____no_output_____"
]
],
[
[
"import numpy as np",
"_____no_output_____"
]
],
[
[
"## Arreglos (arrays).",
"_____no_output_____"
],
[
"*NumPy* usa una estructura de datos llamada *array* (arreglos). Los arreglos de *NumPy* son similares a las listas de *Python*, pero son más eficientes para realizar tareas numéricas. La eficiencia deriva de las siguientes características:\n\n* Las listas de *Python* son muy generales, pudiendo contener objetos de distinto tipo. Además los objetos son asignados dinámicamente; es decir, el tamaño de una lista en *Python* no está predefinido y siempre podemos agregar más y más elementos.\n\n* Por el contrario, los arreglos de *NumPy* son **estáticos** y **homogéneos**. El tipo de los objetos se determina cuando el array es creado (de forma automática o por el usuario) lo que permite hacer uso eficiente de la memoria.\n\n* Otra razón por la cual los arreglos en *NumPy* son más eficientes que las listas, es que en *Python* todo es un objeto, incluso los números! Por ejemplo en lenguaje *C* un entero es esencialmente un rótulo que conecta un lugar en la memoria de la computadora cuyos bytes se usan para codificar el valor de ese entero. Sin embargo, en *Python* un entero es un objeto más complejo que contiene más información que simplemente el valor del número. Esto da flexibilidad a *Python*, pero el costo es que es más lento que un lenguaje como _C_. Este costo es aún mayor cuando combinamos muchos de estos objetos en un objeto más complejo, por ejemplo cuando combinamos enteros dentro de una lista en *Python*.\n\nOtra ventaja de los arreglos de *NumPy* es que se comportan de forma similar a los vectores y matrices usados en matemáticas.",
"_____no_output_____"
],
[
"## Creando arreglos.",
"_____no_output_____"
],
[
"Existen varias rutinas para crear arreglos de *NumPy* a partir de:\n\n* Listas o tuplas de *Python*.\n* Rangos numéricos.\n* Números aletorios.\n* Ceros y unos.\n* Archivos.\n\n### A partir de listas y tuplas:",
"_____no_output_____"
]
],
[
[
"v = np.array([1, 2, 3, 4 , 5, 6])\nv",
"_____no_output_____"
],
[
"M = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\nM",
"_____no_output_____"
]
],
[
[
"El atributo *`shape`* de los objetos *array* retorna la *forma* de los arreglos; esto es el número de dimensiones y número de elementos del *array* en forma de tupla:",
"_____no_output_____"
]
],
[
[
"v.shape, M.shape",
"_____no_output_____"
]
],
[
[
"### A partir de un rango numérico:",
"_____no_output_____"
],
[
"Una forma de crear arreglos en *NumPy* desde cero es usando *rangos*. Por ejemplo podemos crear un arreglo conteniendo números igualmente espaciados en el intervalo \\[desde, hasta) usando la función *`arange`*:",
"_____no_output_____"
]
],
[
[
"np.arange(0, 10, 2) # desde, hasta(sin incluir), paso.",
"_____no_output_____"
]
],
[
[
"Otra función para crear rangos es *`linspace`* que devuelve números igualmente espaciados en el intervalo \\[*desde*, *hasta*, *elementos*\\] (es decir incluyendo el *hasta*). Otra diferencia de la función *`linspace`* con *`arange`* es que no se especifica el paso sino la **cantidad total** de números que contendrá el arreglo:",
"_____no_output_____"
]
],
[
[
"np.linspace(1, 10, 5) # desde, hasta, elementos (elementos es opcional).",
"_____no_output_____"
]
],
[
[
"### A partir de números aleatorios:",
"_____no_output_____"
],
[
"Los números aleatorios son usados en muchos problemas científicos. En la práctica las computadoras son capaces solo de generar números *pseudo-aleatorios*; es decir, números que para los fines prácticos *lucen* como números aleatorios.\n\nTodas las rutinas para generar números aleatorios viven dentro del módulo *`random`* de *NumPy*. *Python* usa un algortimo llamado [Mersenne Twister](https://en.wikipedia.org/wiki/Mersenne_twister) para generar números pseudo-aleatorios. Este algorítmo es más que suficiente para fines científicos, pero no es útil en el caso que necesitemos números pseudo-aleatorios para usar en *criptografía*.\n\nLa función mas simple es *`rand`*. Esta función crea un arreglo a partir de una distribución *uniforme* en el intervalo \\[0, 1):",
"_____no_output_____"
]
],
[
[
"np.random.rand(2, 5) # arreglo con forma (2, 5).",
"_____no_output_____"
]
],
[
[
"De forma similar, la función *`randn`* devuelve muestras a partir de la *distribución normal estándar* (media = 0, desviación estándard = 1):",
"_____no_output_____"
]
],
[
[
"np.random.randn(10) # Muestra de 10 elementos de la distribución normal estándar.",
"_____no_output_____"
]
],
[
[
"Más general, la función *`normal`* devuelve una muestra de una *distribución normal* dada una media, una desviación estándar y el tamaño de la muestra:",
"_____no_output_____"
]
],
[
[
"media = 5\ndesv = 2\nmuestra = 5\nnp.random.normal(media, desv, muestra)",
"_____no_output_____"
]
],
[
[
"### A partir de ceros y unos:",
"_____no_output_____"
],
[
"Las funciones *`zeros`* y *`ones`* retornan arreglos de *ceros* y *unos* dados el número de elementos:",
"_____no_output_____"
]
],
[
[
"np.zeros(5) # Arreglo de 5 elementos cero.",
"_____no_output_____"
],
[
"np.ones(7) # Arreglo de 7 elementos uno.",
"_____no_output_____"
],
[
"np.zeros(shape = (2, 3)) # Arreglo de ceros especificando el shape.",
"_____no_output_____"
],
[
"np.ones(shape = (3, 3)) # Arreglo de unos especificando el shape.",
"_____no_output_____"
]
],
[
[
"## Indexado y rebanado de arreglos.",
"_____no_output_____"
],
[
"Los arreglos de *NumPy*, al igual que las listas se pueden *indexar* y se pueden tomar rebanadas (slices). La sintaxis es una generalización de la usada para las listas de *Python*. Una de las diferencias es que podemos indexar de acuerdo a las distintas dimensiones de un arreglo.",
"_____no_output_____"
]
],
[
[
"M[0] # El primer elemento de M.",
"_____no_output_____"
],
[
"M[0, 1] # El primer elemento de M y obtenemos el segundo elemento.",
"_____no_output_____"
],
[
"M[0][1] # Forma alternativa.",
"_____no_output_____"
],
[
"M[1:] # A partir de la fila 1, todo.",
"_____no_output_____"
],
[
"M[1,:] # Fila 1, todas las columnas.",
"_____no_output_____"
],
[
"M[1] # Forma equivalente.",
"_____no_output_____"
],
[
"M[:, 1] # Todas las dilas de la columna 1.",
"_____no_output_____"
],
[
"M[:, 1:] # Todas las columnas a partir de la columna 1.",
"_____no_output_____"
],
[
"M[::-1] # Los elementos de M en reversa.",
"_____no_output_____"
]
],
[
[
"Es importante acotar que al tomar *rebanadas* (*slices*) *NumPy* NO genera un nuevo arreglo; sino una **vista** (*view*) del arreglo original. Por lo tanto, si a una rebanada le asignamos un número, se lo estaremos asignando al arreglo original, como se puede ver en el siguiente ejemplo:",
"_____no_output_____"
]
],
[
[
"M[0, 0] = 0\nM",
"_____no_output_____"
]
],
[
[
"Para crear copias se puede usar la función *`np.copy()`* o el método *`.copy()`* de los objetos *`array`*.",
"_____no_output_____"
],
[
"## Funciones Universales (Ufunc).",
"_____no_output_____"
],
[
"*NumPy* provee de varias funciones matemáticas. Esto puede parecer redundante ya que la librería estandard de *Python* ya provee de este tipo de funciones. La **diferencia**, es que la funciones matemáticas de *NumPy* (como otras funciones) pueden ser aplicadas en **un solo paso** a todos los elementos de un arreglo.\n\nPor ejemplo, si quisieramos calcular la raíz cuadrada de todos los elementos de una lista de *Python* deberíamos hacer un loop sobre cada elementos de la lista y computar la raíz cuadrada a cada elemento (y posiblemente almacenarlo en otra lista). Con *NumPy* podemos hacer esto mismo en una sola linea:",
"_____no_output_____"
]
],
[
[
"np.sqrt(M)",
"_____no_output_____"
]
],
[
[
"Funciones como *`sqrt`*, que operan sobre arreglos *elemento-a-elemento* se conocen como [***funciones universales***](http://docs.scipy.org/doc/numpy/reference/ufuncs.html) (usualmente abreviadas como *ufunc*).\n\nUna de las ventajas de usar *ufuncs* es que permiten escribir código más breve. Otra ventaja es que los cómputos son más rápidos que usando loops de *Python*. Detrás de escena *NumPy* sí realiza un loop, pero este se ejecuta en lenguaje _C_ o *Fortran*, por lo que hay una ganancia considerable en velocidad en comparación con el código en *Python* puro. Además, el código usado por *NumPy* es código que suele estar optimizado gracias a los años de labor de programadores y científicos.\n\nEsta forma de omitir loops y escribir operaciones sobre vectores se llama *vectorización*.\n\nVeamos otro ejemplo, como sumar todos los elementos de un arreglo:",
"_____no_output_____"
]
],
[
[
"np.sum(M)",
"_____no_output_____"
]
],
[
[
"En el ejemplo anterior la suma se hizo sobre el arreglo \"aplanado\". Hay veces que esto no es lo que queremos, si no que necesitamos sumar sobre alguna de las dimensiones del arreglo:",
"_____no_output_____"
]
],
[
[
"np.sum(M, axis = 0) # Sumariza por columnas.",
"_____no_output_____"
],
[
"np.sum(M, axis = 1) # Sumariza por filas.",
"_____no_output_____"
]
],
[
[
"## Broadcasting.",
"_____no_output_____"
],
[
"Otro elemento que facilita vectorizar código es la capacidad de operar sobre arreglos que no tienen las mismas dimensiones. Esto se llama *broadcasting* y no es más que un conjunto de reglas que permiten aplicar operaciones binarias (suma, multiplicación etc.) a arreglos de distinto tamaño.\n\nConsideremos el siguiente ejemplo:",
"_____no_output_____"
]
],
[
[
"a = np.array([0, 1, 2])\nb = np.array([2, 2, 2])\na + b",
"_____no_output_____"
]
],
[
[
"Esto no es nada sorprendente, lo que hemos hecho es sumar elemento-a-elemento. Fíjese que el arreglo `b` contiene 3 veces el número 2. Gracias al *broadcasting* es posible obtener el mismo resultado al hacer:",
"_____no_output_____"
]
],
[
[
"a + 2",
"_____no_output_____"
]
],
[
[
"Esto no sólo funciona para arreglos y números, también funciona para dos arreglos:",
"_____no_output_____"
]
],
[
[
"M + b",
"_____no_output_____"
]
],
[
[
"En ambos casos lo que está sucediendo es como si antes de realizar la suma extendieramos una de las partes para que las dimensiones conincidan, por ejemplo repetir 3 veces el número 2 o tres veces el vector b. En realidad tal repetición no se realiza, pero es una forma útil de pensar la operación.\n\nEs claro que el *broadcasting* NO puede funcionar para cualquier par de arreglos. La siguiente operación funciona:",
"_____no_output_____"
]
],
[
[
"M[1:, :] + b",
"_____no_output_____"
]
],
[
[
"Mientras que la siguiente dará un error:",
"_____no_output_____"
]
],
[
[
"M + b[:2]",
"_____no_output_____"
]
],
[
[
"El error es claro, *NumPy* no sabe cómo hacer para encajar las dimensiones de estos dos arreglos. Más detalles sobre broadcasting [aquí](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).",
"_____no_output_____"
],
[
"## Comparaciones y máscaras de booleanos.",
"_____no_output_____"
],
[
"Así como es posible sumar un número a un arreglo, también es posible hacer comparaciones *elemento-a-elemento*:",
"_____no_output_____"
]
],
[
[
"M > 3",
"_____no_output_____"
]
],
[
[
"Es muy común usar el resultado de tal comparación para obtener valores de un arreglo que cumplan con cierto criterio, como:",
"_____no_output_____"
]
],
[
[
"M[M > 3]",
"_____no_output_____"
]
],
[
[
"O incluso combinando arreglos, como:",
"_____no_output_____"
]
],
[
[
"M[a == 2]",
"_____no_output_____"
]
],
[
[
"## Medidas de centralidad y dispersión.",
"_____no_output_____"
],
[
"*NumPy* nos permite calcular la media, la mediana y la varianza a partir de arrays de forma muy simple. Por ejemplo para calcular la media podemos usar la función np.mean():",
"_____no_output_____"
]
],
[
[
"np.mean(v)",
"_____no_output_____"
]
],
[
[
"Una forma alternativa es usar el método *`.mean()`* de un objeto *array*:",
"_____no_output_____"
]
],
[
[
"v.mean()",
"_____no_output_____"
]
],
[
[
"Las funciones y métodos *`var`* y *`std`* calculan la *varianza* y la *desviación* de un *array*:",
"_____no_output_____"
]
],
[
[
"np.var(v) # Varianza de los elementos de v.",
"_____no_output_____"
],
[
"v.var() # Forma alternativa.",
"_____no_output_____"
],
[
"np.std(v) # Desviación estándar de v.",
"_____no_output_____"
],
[
"v.std() # Forma alternativa.",
"_____no_output_____"
]
],
[
[
"Existen otras medidas para caracterizar los datos, llamadas de forma, como son la [*curtosis*](https://es.wikipedia.org/wiki/Curtosis) y el [*sesgo*](https://es.wikipedia.org/wiki/Sesgo_estadístico) (o asimetría estadística).\n\nEstás medidas son menos usadas en parte porque su interpretación es menos intuitiva que otras medidas, como la media o la varianza, al punto que la interpretación correcta de estas medidas ha sido objeto de varias discusiones y malos entendidos a los largo de los años. Otra razón para su menor uso es que históricamente gran parte de la estadística se ha basado en el uso de Gausianas (o en asumir que los datos son Gaussianos) para las cuales la curtosis y el sesgo son cero.",
"_____no_output_____"
],
[
"## Cuantil.",
"_____no_output_____"
],
[
"Los *cuantiles* son puntos de corte que dividen al conjunto de datos en grupos de igual tamaño. Existen varios nombres para los cuantiles según la cantidad de divisiones que nos interesen.\n\n* Los *cuartiles* son los tres puntos que dividen a la distribución en 4 partes iguales, se corresponden con los cuantiles 0.25, 0.50 y 0.75.\n* Los *quintiles* dividen a la distribución en cinco partes (corresponden a los cuantiles 0.20, 0.40, 0.60 y 0.80);\n* Los deciles, que dividen a la distribución en diez partes.\n* Los percentiles, que dividen a la distribución en cien partes.\n* La mediana es el percentil 50 o el cuartil 0.5.\n\nEn *Python* el cálculo de estos estadísticos puede realizarse fácilmente usando funciones predefinidas en *NumPy*:",
"_____no_output_____"
]
],
[
[
"x = np.random.rand(100)\nnp.percentile(x , [25, 50, 75])",
"_____no_output_____"
]
],
[
[
"### Z-score.",
"_____no_output_____"
],
[
"El *Z-score* es una cantidad adimensional que expresa el número de desviaciones estándar que un dato está por encima o por debajo de la media. Si el *Z-score* es positivo el dato está por encima de la media, y cuando es negativo está por debajo de la media. Se calcula como:\n\n$$z = \\frac{x - \\mu}{\\sigma}$$\n\nDonde:\n\n$\\mu$ es la media de la población y $\\sigma$ es la desviación estándar de la población.\n\nEl proceso de restar la media y dividir por la desviación estándar se llama *normalización* o *estandarización*.",
"_____no_output_____"
],
[
"### Error estándar.",
"_____no_output_____"
],
[
"El *error estándar* es la desviación estándar de alguna medida estimada, por lo general la media (aunque podría ser cualquier otra cantidad).\n\nSi tomamos un conjunto de datos y calculamos la media de esos datos y luego tomamos otra muestra y calculamos la media y luego otra y otra, obtendremos que los valores de la media no son siempre los mismos. Si tomamos todas esas medias obtendremos una distribución de medias con una media y desviación estándar, esa desviación estándar será el error estándar de la media. El *error estándar de la media* se suele estimar como:\n\n$$\\frac{\\sigma}{\\sqrt{n}}$$",
"_____no_output_____"
],
[
"Donde $\\sigma$ es la desviación estándar de los datos y $n$ es la cantidad de datos.",
"_____no_output_____"
],
[
"La medidas de centralidad y dispersión antes mencionadas son útiles porque resumen en pocos números una gran cantidad de datos. Sin embargo, al sintetizar la información, también pueden ocultarla. Es por ello que siempre es buena idea visualizar la distribución de los datos. En el notebook de [**Matplotlib**](03_matplotlib.ipynb) veremos cómo usar *Python* para graficar y visualizar datos.",
"_____no_output_____"
],
[
"## Funciones básicas de Álgebra Lineal.",
"_____no_output_____"
],
[
"*NumPy* tiene diversas funciones para el cálculo de Álgebra Lineal, agrupadas mayormente en el submódulo *`numpy.linalg`*. A continuación presentamos algunas de las funciones principales del Álgebra Lineal de *NumPy*.",
"_____no_output_____"
]
],
[
[
"np.dot(a, b) # Producto escalar de los vectores a y b.",
"_____no_output_____"
],
[
"a@b # Forma alternativa del producto escalar de los vectores a y b con el operador @.",
"_____no_output_____"
],
[
"I = np.identity(3) # Retorna la matriz identidad de 3x3.\nI",
"_____no_output_____"
],
[
"np.dot(M, I) # Multiplicación de las matrices M e I.",
"_____no_output_____"
],
[
"M@I # Forma alternativa de la multiplicación de las matrices M e I con el operador @.",
"_____no_output_____"
],
[
"M.T # Matriz transpuesta de M.",
"_____no_output_____"
],
[
"np.diagonal(M) # Retorna la diagonal de la matriz M.",
"_____no_output_____"
],
[
"np.diagonal(M, 1) # Retorna la primera diagonal de M (encima de la diagonal principal)",
"_____no_output_____"
],
[
"np.trace(M) # Retorna la suma de los elementos de la diagonal de M.",
"_____no_output_____"
],
[
"np.argmax(M) # Retorna el índice del mayor elemento en M.",
"_____no_output_____"
],
[
"np.argmin(M, axis = 0) # Retorna los índices de los menores elementos por columna. ",
"_____no_output_____"
],
[
"np.linalg.det(M) # Retorna el determinante de M.",
"_____no_output_____"
],
[
"np.linalg.inv(M) # Retorna la matriz inversa de M.",
"_____no_output_____"
],
[
"np.linalg.solve(M, b) # Resuelve el sistema lineal de ecuaciones Mx = b.",
"_____no_output_____"
]
],
[
[
"## Referencias adicionales.",
"_____no_output_____"
],
[
"Para ver operaciones adicionales de *NumPy* podemos consultar los siguientes repositorios:\n* [Numpy Operations.](https://github.com/tirthajyoti/Machine-Learning-with-Python/blob/master/Pandas%20and%20Numpy/Numpy_operations.ipynb)\n* [Numpy and Pandas quick basics](https://github.com/tirthajyoti/Machine-Learning-with-Python/blob/master/Pandas%20and%20Numpy/Numpy_Pandas_Quick.ipynb)\n* [Tools Numpy.](https://github.com/ageron/handson-ml2/blob/master/tools_numpy.ipynb)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e7407c68d1b21100b76f70b59bf9d99f4ade284b | 22,880 | ipynb | Jupyter Notebook | notebooks/VGG19_training.ipynb | tranic/histopathology_cancer_detection | 6e9916cd73ebf8a5a1dfefe6f6c203a8e760c2db | [
"MIT"
] | 4 | 2020-08-14T14:48:27.000Z | 2021-06-15T17:01:34.000Z | notebooks/VGG19_training.ipynb | tranic/dl_histopathology_cancer_detection | 771932bfd4a0db2af733fe25953af324bc83771b | [
"MIT"
] | 3 | 2020-07-21T19:24:45.000Z | 2020-07-24T12:09:14.000Z | notebooks/VGG19_training.ipynb | tranic/dl_histopathology_cancer_detection | 771932bfd4a0db2af733fe25953af324bc83771b | [
"MIT"
] | null | null | null | 59.428571 | 483 | 0.582998 | [
[
[
"from google.colab import drive\ndrive.mount('/content/drive/')",
"Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n\nEnter your authorization code:\n··········\nMounted at /content/drive/\n"
]
],
[
[
"# BASIC SETUP",
"_____no_output_____"
]
],
[
[
"# BASIC SETUP\n! [ ! -z \"$COLAB_GPU\" ] && pip install torch skorch && pip install neptune-client\n\n!cp \"drive/My Drive/dl_project_data/repo/data_loading.py\" .\n!mkdir ./helper_scripts/\n!cp \"drive/My Drive/dl_project_data/repo/helper_scripts/visual_helpers.py\" ./helper_scripts\n!cp \"drive/My Drive/dl_project_data/repo/architecture.py\" .\n!cp \"drive/My Drive/dl_project_data/repo/model_training.py\" .\n!mkdir ./train/\n# Creates RAM-Disk for potential speed-up\n!sudo mount -t tmpfs -o size=7g tmpfs train\n#!for i in 0; do cp \"drive/My Drive/dl_project_data/train/$i.tar\" ./train/; tar -xf \"./train/$i.tar\" -C ./train/; rm \"./train/$i.tar\"; done;\n!for i in 0 1 2 3 4 5 6 7 8 9 10 11; do cp \"drive/My Drive/dl_project_data/train/$i.tar\" ./train/; tar -xf \"./train/$i.tar\" -C ./train/; rm \"./train/$i.tar\"; done;",
"Requirement already satisfied: torch in /usr/local/lib/python3.6/dist-packages (1.5.1+cu101)\nCollecting skorch\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/42/21/4936b881b33de285faa0b36209afe4f9724a0875b2225abdc63b23d384a3/skorch-0.8.0-py3-none-any.whl (113kB)\n\u001b[K |████████████████████████████████| 122kB 5.7MB/s \n\u001b[?25hRequirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from torch) (0.16.0)\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from torch) (1.18.5)\nRequirement already satisfied: scikit-learn>=0.19.1 in /usr/local/lib/python3.6/dist-packages (from skorch) (0.22.2.post1)\nRequirement already satisfied: scipy>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from skorch) (1.4.1)\nRequirement already satisfied: tabulate>=0.7.7 in /usr/local/lib/python3.6/dist-packages (from skorch) (0.8.7)\nRequirement already satisfied: tqdm>=4.14.0 in /usr/local/lib/python3.6/dist-packages (from skorch) (4.41.1)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn>=0.19.1->skorch) (0.16.0)\nInstalling collected packages: skorch\nSuccessfully installed skorch-0.8.0\nCollecting neptune-client\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/2e/58/c96e6c11608a4e57705b1b955a07bf58f44a829f98031ad779bd0559f3c9/neptune-client-0.4.118.tar.gz (90kB)\n\u001b[K |████████████████████████████████| 92kB 3.6MB/s \n\u001b[?25hCollecting bravado\n Downloading https://files.pythonhosted.org/packages/2a/cc/b3c8dadc3f51fa184db10172f031c1c5206b0e67f3207217bbdd326e81a4/bravado-10.6.2-py2.py3-none-any.whl\nRequirement already satisfied: click>=7.0 in /usr/local/lib/python3.6/dist-packages (from neptune-client) (7.1.2)\nCollecting future>=0.17.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/45/0b/38b06fd9b92dc2b68d58b75f900e97884c45bedd2ff83203d933cf5851c9/future-0.18.2.tar.gz (829kB)\n\u001b[K |████████████████████████████████| 829kB 14.4MB/s \n\u001b[?25hCollecting py3nvml\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/53/b3/cb30dd8cc1198ae3fdb5a320ca7986d7ca76e23d16415067eafebff8685f/py3nvml-0.2.6-py3-none-any.whl (55kB)\n\u001b[K |████████████████████████████████| 61kB 8.7MB/s \n\u001b[?25hRequirement already satisfied: oauthlib>=2.1.0 in /usr/local/lib/python3.6/dist-packages (from neptune-client) (3.1.0)\nRequirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from neptune-client) (1.0.5)\nRequirement already satisfied: Pillow>=1.1.6 in /usr/local/lib/python3.6/dist-packages (from neptune-client) (7.0.0)\nCollecting PyJWT\n Downloading https://files.pythonhosted.org/packages/87/8b/6a9f14b5f781697e51259d81657e6048fd31a113229cf346880bb7545565/PyJWT-1.7.1-py2.py3-none-any.whl\nRequirement already satisfied: requests>=2.20.0 in /usr/local/lib/python3.6/dist-packages (from neptune-client) (2.23.0)\nRequirement already satisfied: requests-oauthlib>=1.0.0 in /usr/local/lib/python3.6/dist-packages (from neptune-client) (1.3.0)\nRequirement already satisfied: six>=1.12.0 in /usr/local/lib/python3.6/dist-packages (from neptune-client) (1.15.0)\nCollecting websocket-client>=0.35.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/4c/5f/f61b420143ed1c8dc69f9eaec5ff1ac36109d52c80de49d66e0c36c3dfdf/websocket_client-0.57.0-py2.py3-none-any.whl (200kB)\n\u001b[K |████████████████████████████████| 204kB 13.2MB/s \n\u001b[?25hCollecting GitPython>=2.0.8\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/f9/1e/a45320cab182bf1c8656107b3d4c042e659742822fc6bff150d769a984dd/GitPython-3.1.7-py3-none-any.whl (158kB)\n\u001b[K |████████████████████████████████| 163kB 19.5MB/s \n\u001b[?25hRequirement already satisfied: packaging in /usr/local/lib/python3.6/dist-packages (from neptune-client) (20.4)\nRequirement already satisfied: python-dateutil in /usr/local/lib/python3.6/dist-packages (from bravado->neptune-client) (2.8.1)\nCollecting msgpack-python\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/8a/20/6eca772d1a5830336f84aca1d8198e5a3f4715cd1c7fc36d3cc7f7185091/msgpack-python-0.5.6.tar.gz (138kB)\n\u001b[K |████████████████████████████████| 143kB 20.4MB/s \n\u001b[?25hCollecting simplejson\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/73/96/1e6b19045375890068d7342cbe280dd64ae73fd90b9735b5efb8d1e044a1/simplejson-3.17.2-cp36-cp36m-manylinux2010_x86_64.whl (127kB)\n\u001b[K |████████████████████████████████| 133kB 26.5MB/s \n\u001b[?25hRequirement already satisfied: pyyaml in /usr/local/lib/python3.6/dist-packages (from bravado->neptune-client) (3.13)\nRequirement already satisfied: typing-extensions in /usr/local/lib/python3.6/dist-packages (from bravado->neptune-client) (3.7.4.2)\nCollecting bravado-core>=5.16.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/76/11/18e9d28a156c33f2d5f15a5e155dc7130250acb0a569255a2b6b307b596d/bravado_core-5.17.0-py2.py3-none-any.whl (67kB)\n\u001b[K |████████████████████████████████| 71kB 8.8MB/s \n\u001b[?25hCollecting monotonic\n Downloading https://files.pythonhosted.org/packages/ac/aa/063eca6a416f397bd99552c534c6d11d57f58f2e94c14780f3bbf818c4cf/monotonic-1.5-py2.py3-none-any.whl\nCollecting xmltodict\n Downloading https://files.pythonhosted.org/packages/28/fd/30d5c1d3ac29ce229f6bdc40bbc20b28f716e8b363140c26eff19122d8a5/xmltodict-0.12.0-py2.py3-none-any.whl\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas->neptune-client) (2018.9)\nRequirement already satisfied: numpy>=1.13.3 in /usr/local/lib/python3.6/dist-packages (from pandas->neptune-client) (1.18.5)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20.0->neptune-client) (3.0.4)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20.0->neptune-client) (2.10)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20.0->neptune-client) (1.24.3)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20.0->neptune-client) (2020.6.20)\nCollecting gitdb<5,>=4.0.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/48/11/d1800bca0a3bae820b84b7d813ad1eff15a48a64caea9c823fc8c1b119e8/gitdb-4.0.5-py3-none-any.whl (63kB)\n\u001b[K |████████████████████████████████| 71kB 8.4MB/s \n\u001b[?25hRequirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.6/dist-packages (from packaging->neptune-client) (2.4.7)\nCollecting jsonref\n Downloading https://files.pythonhosted.org/packages/07/92/f8e4ac824b14af77e613984e480fa818397c72d4141fc466decb26752749/jsonref-0.2-py3-none-any.whl\nRequirement already satisfied: jsonschema[format]>=2.5.1 in /usr/local/lib/python3.6/dist-packages (from bravado-core>=5.16.1->bravado->neptune-client) (2.6.0)\nCollecting swagger-spec-validator>=2.0.1\n Downloading https://files.pythonhosted.org/packages/09/de/e78cefbf5838b434b63a789264b79821cb2267f1498fbed23ef8590133e4/swagger_spec_validator-2.7.3-py2.py3-none-any.whl\nRequirement already satisfied: msgpack>=0.5.2 in /usr/local/lib/python3.6/dist-packages (from bravado-core>=5.16.1->bravado->neptune-client) (1.0.0)\nCollecting smmap<4,>=3.0.1\n Downloading https://files.pythonhosted.org/packages/b0/9a/4d409a6234eb940e6a78dfdfc66156e7522262f5f2fecca07dc55915952d/smmap-3.0.4-py2.py3-none-any.whl\nCollecting rfc3987; extra == \"format\"\n Downloading https://files.pythonhosted.org/packages/65/d4/f7407c3d15d5ac779c3dd34fbbc6ea2090f77bd7dd12f207ccf881551208/rfc3987-1.3.8-py2.py3-none-any.whl\nCollecting webcolors; extra == \"format\"\n Downloading https://files.pythonhosted.org/packages/12/05/3350559de9714b202e443a9e6312937341bd5f79f4e4f625744295e7dd17/webcolors-1.11.1-py3-none-any.whl\nCollecting strict-rfc3339; extra == \"format\"\n Downloading https://files.pythonhosted.org/packages/56/e4/879ef1dbd6ddea1c77c0078cd59b503368b0456bcca7d063a870ca2119d3/strict-rfc3339-0.7.tar.gz\nBuilding wheels for collected packages: neptune-client, future, msgpack-python, strict-rfc3339\n Building wheel for neptune-client (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for neptune-client: filename=neptune_client-0.4.118-py2.py3-none-any.whl size=149617 sha256=3dddb5d745de2d3413e3090f41e19d0c4630dfded36e23e42c189b2f1155e8ca\n Stored in directory: /root/.cache/pip/wheels/f2/0e/03/b7fcb3255a708609c21d8243cf6ba4f0419a625474e3c9c01c\n Building wheel for future (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for future: filename=future-0.18.2-cp36-none-any.whl size=491057 sha256=05c82dc8dad0aedeabb4ee9e005c477bdd2835ac033f3ddb301a7fbfa27c87d8\n Stored in directory: /root/.cache/pip/wheels/8b/99/a0/81daf51dcd359a9377b110a8a886b3895921802d2fc1b2397e\n Building wheel for msgpack-python (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for msgpack-python: filename=msgpack_python-0.5.6-cp36-cp36m-linux_x86_64.whl size=304243 sha256=4ba4a098d8d2d27e26daa8fbff2c8b017c7da54c51c48e9fa7444fd70be41e8a\n Stored in directory: /root/.cache/pip/wheels/d5/de/86/7fa56fda12511be47ea0808f3502bc879df4e63ab168ec0406\n Building wheel for strict-rfc3339 (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for strict-rfc3339: filename=strict_rfc3339-0.7-cp36-none-any.whl size=18120 sha256=84a3bdf4224a21ac28df0321dd9b0b42fd19655c3963fb3537d82935a408d8de\n Stored in directory: /root/.cache/pip/wheels/bb/af/c9/b6e9fb5f9b2470e4ed2a7241c9ab3a8cdd3bc8555ae02ca2e6\nSuccessfully built neptune-client future msgpack-python strict-rfc3339\nInstalling collected packages: msgpack-python, simplejson, jsonref, swagger-spec-validator, bravado-core, monotonic, bravado, future, xmltodict, py3nvml, PyJWT, websocket-client, smmap, gitdb, GitPython, neptune-client, rfc3987, webcolors, strict-rfc3339\n Found existing installation: future 0.16.0\n Uninstalling future-0.16.0:\n Successfully uninstalled future-0.16.0\nSuccessfully installed GitPython-3.1.7 PyJWT-1.7.1 bravado-10.6.2 bravado-core-5.17.0 future-0.18.2 gitdb-4.0.5 jsonref-0.2 monotonic-1.5 msgpack-python-0.5.6 neptune-client-0.4.118 py3nvml-0.2.6 rfc3987-1.3.8 simplejson-3.17.2 smmap-3.0.4 strict-rfc3339-0.7 swagger-spec-validator-2.7.3 webcolors-1.11.1 websocket-client-0.57.0 xmltodict-0.12.0\n"
]
],
[
[
"# IMPORTS\n**You should not have to change anything here.**",
"_____no_output_____"
]
],
[
[
"# IMPORTS\nfrom collections import OrderedDict\n\nimport torch\nfrom torch import nn\nfrom torchvision import models, transforms\n\nimport skorch.callbacks as scb\nfrom skorch import NeuralNetBinaryClassifier\n\nimport model_training as md\nimport architecture as arch\nfrom data_loading import ToTensor, Normalize, RandomRotation, RandomHorizontalFlip\n\n# Skorch uses some depricated scikit code - works just fine, so surpress warnings for clean output\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
]
],
[
[
"# CLASSIFIER PARAMETRIZATION\n\nHere you can parametrize your model and set loss, optimizer, learning rate, etc. \n\nFor further information on what can be set and how, please refer to the [skorch documentation](https://skorch.readthedocs.io/en/stable/classifier.html#skorch.classifier.NeuralNetClassifier).",
"_____no_output_____"
]
],
[
[
"# CLASSIFIER PARAMETRIZATION\nclassifier = NeuralNetBinaryClassifier(\n arch.VGG19,\n optimizer = torch.optim.Adamax, \n max_epochs = 30,\n lr = 0.002,\n batch_size = 128,\n iterator_train__shuffle = True, # Shuffle training data on each epoch\n train_split = None,\n callbacks = [scb.LRScheduler(policy = 'ExponentialLR', gamma = 0.9)], \n device ='cuda')",
"_____no_output_____"
]
],
[
[
"# CLASSIFIER TRAINING\n\nAfter you have added the shared folder with the data to your drive as a shortcut, you should not have to change anything here. At least for now.\n\n**IF YOU WANT TO TRAIN WITH THE FULL DATASET, JUST REMOVE** *_small* **FROM THE CSV FILE.** ",
"_____no_output_____"
]
],
[
[
"# CLASSIFIER TRAINING\nmd.train_model(classifier, \n train_labels = \"drive/My Drive/dl_project_data/train/train_split.csv\", \n test_labels = \"drive/My Drive/dl_project_data/train/test_split.csv\", \n file_dir = \"train\", \n train_transform = transforms.Compose([transforms.ToPILImage(),\n #transforms.Pad(64, padding_mode='reflect'), # 96 + 2*64 = 224\n transforms.RandomHorizontalFlip(), # TODO: model expects normalized channel values (substract means)\n transforms.RandomVerticalFlip(),\n transforms.RandomRotation(20),\n transforms.ToTensor()]),\n test_transform = transforms.ToTensor(),\n in_memory = False,\n output_path = \".\",\n #output_path = \"drive/My Drive/dl_project_data/output\",\n logger = {\n \"api_token\": \"\",\n \"project_qualified_name\": \"elangenhan/hcd-experiments\",\n \"experiment_name\": \"VGG19\"\n }\n)",
"https://ui.neptune.ai/elangenhan/hcd-experiments/e/HCDEX-3\nStarting Training for <class 'architecture.VGG19'> \n \u001b[1mModel-Params:\u001b[0m\n \u001b[1mCriterion:\u001b[0m <class 'torch.nn.modules.loss.BCEWithLogitsLoss'>\n \u001b[1mOptimizer:\u001b[0m <class 'torch.optim.adamax.Adamax'>\n \u001b[1mLearning Rate:\u001b[0m 0.002\n \u001b[1mEpochs:\u001b[0m 2\n \u001b[1mBatch size:\u001b[0m 128\n \n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74084f0690e5778593d38cccaeca565647ea51a | 3,118 | ipynb | Jupyter Notebook | source_nbs/12_0_problem_type_utils.ipynb | vishalbelsare/bert-multitask-learning | 688c2bab1dcbcd8ab6c795c116d252a19b66b793 | [
"Apache-2.0"
] | 456 | 2018-12-11T09:43:10.000Z | 2021-11-14T17:33:21.000Z | source_nbs/12_0_problem_type_utils.ipynb | vishalbelsare/bert-multitask-learning | 688c2bab1dcbcd8ab6c795c116d252a19b66b793 | [
"Apache-2.0"
] | 57 | 2018-12-24T05:59:53.000Z | 2021-11-16T05:58:52.000Z | source_nbs/12_0_problem_type_utils.ipynb | vishalbelsare/bert-multitask-learning | 688c2bab1dcbcd8ab6c795c116d252a19b66b793 | [
"Apache-2.0"
] | 123 | 2018-12-25T03:41:03.000Z | 2021-11-12T18:00:53.000Z | 31.494949 | 104 | 0.542335 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e740c9686b798fa3f3e36ec2e42bb869fce06752 | 980,679 | ipynb | Jupyter Notebook | MARDA_Image_recognition_3_hurricanes.ipynb | dbuscombe-usgs/HurricaneHarvey_buildingdamage | 375b2eb7e14d11dd5aae357dd84a6db57ba88599 | [
"MIT"
] | null | null | null | MARDA_Image_recognition_3_hurricanes.ipynb | dbuscombe-usgs/HurricaneHarvey_buildingdamage | 375b2eb7e14d11dd5aae357dd84a6db57ba88599 | [
"MIT"
] | null | null | null | MARDA_Image_recognition_3_hurricanes.ipynb | dbuscombe-usgs/HurricaneHarvey_buildingdamage | 375b2eb7e14d11dd5aae357dd84a6db57ba88599 | [
"MIT"
] | 2 | 2021-07-22T20:06:30.000Z | 2021-08-17T02:45:59.000Z | 980,679 | 980,679 | 0.95811 | [
[
[
"# Image recognition: recognizing hurricane damage\n\n#### Daniel Buscombe, MARDA Science\n\n\n\nMIT License\n\nCopyright (c) 2020, Marda Science LLC\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.",
"_____no_output_____"
],
[
"## Introduction\n\nThis tutorial shows how to do whole image classification, or image `recognition`, starting from JPEG image files, first leveraging pre-trained weights (transfer learning)\n\nWe demonstrate the workflow on a hurricane damage dataset reported [here](https://www.kaggle.com/kmader/satellite-images-of-hurricane-damage)\n\nData originally taken from [here](https://ieee-dataport.org/open-access/detecting-damaged-buildings-post-hurricane-satellite-imagery-based-customized) and can be cited with [this](http://dx.doi.org/10.21227/sdad-1e56) and the original paper is [here](https://arxiv.org/abs/1807.01688)\n\nOur trained model will be able to predict whether or not a property was damaged by a hurricane, to within 99%\n",
"_____no_output_____"
],
[
"## Setup",
"_____no_output_____"
]
],
[
[
"!pip install tf-nightly --quiet",
"\u001b[K |████████████████████████████████| 523.4MB 32kB/s \n\u001b[K |████████████████████████████████| 2.9MB 42.0MB/s \n\u001b[K |████████████████████████████████| 460kB 59.7MB/s \n\u001b[?25h"
],
[
"import requests, os\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import models\n\ntf.__version__",
"_____no_output_____"
],
[
"from glob import glob\nimport matplotlib.pyplot as plt\nimport numpy as np",
"_____no_output_____"
],
[
"# from https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url\n\ndef download_file_from_google_drive(id, destination):\n URL = \"https://docs.google.com/uc?export=download\"\n\n session = requests.Session()\n\n response = session.get(URL, params = { 'id' : id }, stream = True)\n token = get_confirm_token(response)\n\n if token:\n params = { 'id' : id, 'confirm' : token }\n response = session.get(URL, params = params, stream = True)\n\n save_response_content(response, destination) \n\ndef get_confirm_token(response):\n for key, value in response.cookies.items():\n if key.startswith('download_warning'):\n return value\n\n return None\n\ndef save_response_content(response, destination):\n \"\"\"\n response = filename for input\n destination = filename for output\n \"\"\" \n CHUNK_SIZE = 32768\n\n with open(destination, \"wb\") as f:\n for chunk in response.iter_content(CHUNK_SIZE):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)",
"_____no_output_____"
],
[
"file_id = '1vsJU0mFlcu4QIxBPDdwuzEl_gtSS_9Gu'\n\ndestination = 'validation.zip'\ndownload_file_from_google_drive(file_id, destination)",
"_____no_output_____"
],
[
"file_id = '1tlEnpQzbq-ogx3z14t12h2QH7keuTssb'\n\ndestination = 'train.zip'\ndownload_file_from_google_drive(file_id, destination)",
"_____no_output_____"
],
[
"file_id = '1fawTKcBH7xTIhCoMwZtos79H6XAhBaIn'\n\ndestination = 'test.zip'\ndownload_file_from_google_drive(file_id, destination)",
"_____no_output_____"
],
[
"!unzip validation.zip > tmp.txt",
"_____no_output_____"
],
[
"!unzip train.zip > tmp.txt",
"_____no_output_____"
],
[
"!unzip test.zip > tmp.txt",
"_____no_output_____"
]
],
[
[
"What class categories do I have?",
"_____no_output_____"
]
],
[
[
"!ls train",
"damage\tno_damage\n"
]
],
[
[
"How many train files?",
"_____no_output_____"
]
],
[
[
"!ls train/damage | wc -l",
"5000\n"
],
[
"!ls train/no_damage | wc -l",
"5000\n"
]
],
[
[
"How many test and validation files?",
"_____no_output_____"
]
],
[
[
"!ls test/damage | wc -l",
"1000\n"
],
[
"!ls test/no_damage | wc -l",
"1000\n"
],
[
"!ls validation/damage | wc -l",
"1000\n"
],
[
"!ls validation/no_damage | wc -l",
"1000\n"
]
],
[
[
"Define text labels for our two classes",
"_____no_output_____"
]
],
[
[
"classes = ['damage',\t'no_damage']",
"_____no_output_____"
]
],
[
[
"Get rid of any corrupt jpegs",
"_____no_output_____"
]
],
[
[
"num_skipped = 0\nfor folder in ['test', 'train', 'validation']:\n for folder_name in classes:\n folder_path = os.path.join(folder, folder_name)\n for fname in os.listdir(folder_path):\n fpath = os.path.join(folder_path, fname)\n fobj = open(fpath, 'rb')\n if tf.compat.as_bytes('JFIF') not in fobj.peek(10):\n num_skipped += 1\n # Delete corrupted image\n os.system('rm ' + fpath)\n print('Deleted %d images' % num_skipped)",
"Deleted 0 images\nDeleted 0 images\nDeleted 0 images\n"
],
[
"",
"_____no_output_____"
]
],
[
[
"## Augmenting the data",
"_____no_output_____"
]
],
[
[
"image_size = (128, 128)\nbatch_size = 32\n\ntrain_ds = tf.keras.preprocessing.image_dataset_from_directory(\n 'train', seed=2020,\n image_size=image_size, batch_size=batch_size) \nval_ds = tf.keras.preprocessing.image_dataset_from_directory(\n 'validation', seed=2020,\n image_size=image_size, batch_size=batch_size) ",
"Found 10000 files belonging to 2 classes.\nFound 2000 files belonging to 2 classes.\n"
],
[
"",
"_____no_output_____"
],
[
"plt.figure(figsize=(10, 10))\nfor images, labels in train_ds.take(1):\n for i in range(9):\n ax = plt.subplot(3, 3, i + 1)\n plt.imshow(images[i].numpy().astype('uint8'))\n plt.title(classes[int(labels[i])])\n plt.axis('off')",
"_____no_output_____"
],
[
"data_augmentation = keras.Sequential([\n layers.experimental.preprocessing.RandomFlip('vertical'),\n layers.experimental.preprocessing.RandomFlip('horizontal'),\n layers.experimental.preprocessing.RandomRotation(0.3),\n layers.experimental.preprocessing.RandomZoom(0.3),\n])",
"_____no_output_____"
],
[
"plt.figure(figsize=(10, 10))\nfor images, labels in train_ds.take(1):\n for i in range(9):\n augmented_images = data_augmentation(images)\n ax = plt.subplot(3, 3, i + 1)\n plt.imshow((augmented_images[0].numpy()).astype('uint8')) #255*\n plt.title(classes[int(labels[0])])\n plt.axis('off')",
"_____no_output_____"
]
],
[
[
"Improve model throughput by using [pre-fetch](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#prefetch)",
"_____no_output_____"
]
],
[
[
"train_ds = train_ds.prefetch(buffer_size=batch_size)\nval_ds = val_ds.prefetch(buffer_size=batch_size)",
"_____no_output_____"
],
[
"augmented_train_ds = train_ds.map(\n lambda x, y: (data_augmentation(x, training=True), y))",
"_____no_output_____"
]
],
[
[
"## Image classification using transfer learning\n\n### Build a model\n",
"_____no_output_____"
],
[
"Load the MobileNetV2 model trained on imagenet, but exclude the classification layers, because we want to add our own classification layers so we can retrain the model on our own categories",
"_____no_output_____"
],
[
"We'll use one of the 'stock' models provided by `keras.applications` called MobileNetV2",
"_____no_output_____"
]
],
[
[
"def mobilenet_model(num_classes, input_shape):\n\n EXTRACTOR = MobileNetV2(include_top=False, weights=\"imagenet\", \n input_shape=input_shape)\n\n EXTRACTOR.trainable = True\n # Construct the head of the model that will be placed on top of the\n # the base model\n class_head = EXTRACTOR.output\n class_head = layers.GlobalAveragePooling2D()(class_head)\n class_head = layers.Dense(512, activation=\"relu\")(class_head)\n class_head = layers.Dropout(0.5)(class_head)\n class_head = layers.Dense(num_classes, activation=\"softmax\")(class_head)\n\n # Create the new model\n model = keras.Model(inputs=EXTRACTOR.input, outputs=class_head)\n\n return model",
"_____no_output_____"
]
],
[
[
"### Train the model",
"_____no_output_____"
]
],
[
[
"min_lr = 1e-4\npatience = 5\nfactor = 0.8\ncooldown = 3\n\nepochs = 50",
"_____no_output_____"
],
[
"from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint",
"_____no_output_____"
],
[
"filepath = 'hurricanes_mn2_best_weights.h5'",
"_____no_output_____"
],
[
"earlystop = EarlyStopping(monitor=\"val_loss\", \n mode=\"min\", patience=patience) \n\n# reduction of learning rate if and when validation scores plateau upon successive epochs\nreduceloss_plat = ReduceLROnPlateau(monitor='val_loss', factor=factor, patience=patience, \n verbose=1, mode='auto', \n cooldown=cooldown, min_lr=min_lr)\n\n# set checkpoint file \nmodel_checkpoint = ModelCheckpoint(filepath, monitor='val_loss', \n verbose=0, save_best_only=True, mode='min', \n save_weights_only = True)\n \ncallbacks = [model_checkpoint, reduceloss_plat, earlystop]",
"_____no_output_____"
],
[
"from tensorflow.keras.applications import MobileNetV2\n\ninput_shape = (224, 224)\n\nmodel2 = mobilenet_model(len(classes), input_shape+(3,) )\n\nmodel2.compile(optimizer=keras.optimizers.Adam(min_lr),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])",
"Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v2/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_1.0_224_no_top.h5\n9412608/9406464 [==============================] - 0s 0us/step\n"
],
[
"model2.fit_generator(augmented_train_ds, \n validation_data=val_ds, \n epochs=50,\n verbose=2)",
"WARNING:tensorflow:From <ipython-input-40-4cca302ae1c0>:4: Model.fit_generator (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use Model.fit, which supports generators.\nEpoch 1/50\nWARNING:tensorflow:Model was constructed with shape (None, 224, 224, 3) for input Tensor(\"input_1:0\", shape=(None, 224, 224, 3), dtype=float32), but it was called on an input with incompatible shape (None, 128, 128, None).\nWARNING:tensorflow:Model was constructed with shape (None, 224, 224, 3) for input Tensor(\"input_1:0\", shape=(None, 224, 224, 3), dtype=float32), but it was called on an input with incompatible shape (None, 128, 128, None).\nWARNING:tensorflow:Model was constructed with shape (None, 224, 224, 3) for input Tensor(\"input_1:0\", shape=(None, 224, 224, 3), dtype=float32), but it was called on an input with incompatible shape (None, 128, 128, None).\n313/313 - 47s - loss: 0.3056 - accuracy: 0.8706 - val_loss: 2.7860 - val_accuracy: 0.5150\nEpoch 2/50\n313/313 - 46s - loss: 0.1573 - accuracy: 0.9340 - val_loss: 0.2622 - val_accuracy: 0.9045\nEpoch 3/50\n313/313 - 47s - loss: 0.1177 - accuracy: 0.9509 - val_loss: 0.1398 - val_accuracy: 0.9375\nEpoch 4/50\n313/313 - 46s - loss: 0.0992 - accuracy: 0.9592 - val_loss: 0.0946 - val_accuracy: 0.9590\nEpoch 5/50\n313/313 - 46s - loss: 0.0836 - accuracy: 0.9671 - val_loss: 0.0814 - val_accuracy: 0.9675\nEpoch 6/50\n313/313 - 47s - loss: 0.0758 - accuracy: 0.9721 - val_loss: 0.0650 - val_accuracy: 0.9735\nEpoch 7/50\n313/313 - 47s - loss: 0.0642 - accuracy: 0.9756 - val_loss: 0.0678 - val_accuracy: 0.9755\nEpoch 8/50\n313/313 - 47s - loss: 0.0514 - accuracy: 0.9791 - val_loss: 0.0742 - val_accuracy: 0.9730\nEpoch 9/50\n313/313 - 47s - loss: 0.0523 - accuracy: 0.9793 - val_loss: 0.0690 - val_accuracy: 0.9725\nEpoch 10/50\n313/313 - 47s - loss: 0.0483 - accuracy: 0.9833 - val_loss: 0.0670 - val_accuracy: 0.9770\nEpoch 11/50\n313/313 - 47s - loss: 0.0407 - accuracy: 0.9855 - val_loss: 0.0653 - val_accuracy: 0.9820\nEpoch 12/50\n313/313 - 47s - loss: 0.0395 - accuracy: 0.9850 - val_loss: 0.0481 - val_accuracy: 0.9805\nEpoch 13/50\n313/313 - 48s - loss: 0.0370 - accuracy: 0.9870 - val_loss: 0.0510 - val_accuracy: 0.9770\nEpoch 14/50\n313/313 - 48s - loss: 0.0333 - accuracy: 0.9875 - val_loss: 0.0509 - val_accuracy: 0.9815\nEpoch 15/50\n313/313 - 47s - loss: 0.0329 - accuracy: 0.9878 - val_loss: 0.0989 - val_accuracy: 0.9705\nEpoch 16/50\n313/313 - 47s - loss: 0.0264 - accuracy: 0.9900 - val_loss: 0.0502 - val_accuracy: 0.9825\nEpoch 17/50\n313/313 - 47s - loss: 0.0323 - accuracy: 0.9887 - val_loss: 0.0437 - val_accuracy: 0.9860\nEpoch 18/50\n313/313 - 47s - loss: 0.0254 - accuracy: 0.9899 - val_loss: 0.0568 - val_accuracy: 0.9835\nEpoch 19/50\n313/313 - 46s - loss: 0.0267 - accuracy: 0.9903 - val_loss: 0.0844 - val_accuracy: 0.9700\nEpoch 20/50\n313/313 - 47s - loss: 0.0233 - accuracy: 0.9924 - val_loss: 0.0708 - val_accuracy: 0.9805\nEpoch 21/50\n313/313 - 46s - loss: 0.0238 - accuracy: 0.9918 - val_loss: 0.0379 - val_accuracy: 0.9855\nEpoch 22/50\n313/313 - 46s - loss: 0.0232 - accuracy: 0.9920 - val_loss: 0.0469 - val_accuracy: 0.9860\nEpoch 23/50\n313/313 - 46s - loss: 0.0202 - accuracy: 0.9928 - val_loss: 0.1014 - val_accuracy: 0.9710\nEpoch 24/50\n313/313 - 46s - loss: 0.0249 - accuracy: 0.9915 - val_loss: 0.2040 - val_accuracy: 0.9415\nEpoch 25/50\n313/313 - 46s - loss: 0.0193 - accuracy: 0.9932 - val_loss: 0.0544 - val_accuracy: 0.9855\nEpoch 26/50\n313/313 - 46s - loss: 0.0176 - accuracy: 0.9934 - val_loss: 0.1841 - val_accuracy: 0.9505\nEpoch 27/50\n313/313 - 46s - loss: 0.0172 - accuracy: 0.9945 - val_loss: 0.1054 - val_accuracy: 0.9755\nEpoch 28/50\n313/313 - 46s - loss: 0.0165 - accuracy: 0.9936 - val_loss: 0.0880 - val_accuracy: 0.9740\nEpoch 29/50\n313/313 - 46s - loss: 0.0176 - accuracy: 0.9928 - val_loss: 0.1271 - val_accuracy: 0.9595\nEpoch 30/50\n313/313 - 46s - loss: 0.0132 - accuracy: 0.9951 - val_loss: 0.0654 - val_accuracy: 0.9835\nEpoch 31/50\n313/313 - 46s - loss: 0.0185 - accuracy: 0.9938 - val_loss: 0.0400 - val_accuracy: 0.9870\nEpoch 32/50\n313/313 - 46s - loss: 0.0150 - accuracy: 0.9952 - val_loss: 0.0563 - val_accuracy: 0.9875\nEpoch 33/50\n313/313 - 47s - loss: 0.0146 - accuracy: 0.9955 - val_loss: 0.0372 - val_accuracy: 0.9895\nEpoch 34/50\n313/313 - 47s - loss: 0.0158 - accuracy: 0.9934 - val_loss: 0.0719 - val_accuracy: 0.9735\nEpoch 35/50\n313/313 - 47s - loss: 0.0138 - accuracy: 0.9951 - val_loss: 0.1197 - val_accuracy: 0.9660\nEpoch 36/50\n313/313 - 47s - loss: 0.0179 - accuracy: 0.9934 - val_loss: 0.1885 - val_accuracy: 0.9485\nEpoch 37/50\n313/313 - 46s - loss: 0.0144 - accuracy: 0.9949 - val_loss: 0.0636 - val_accuracy: 0.9770\nEpoch 38/50\n313/313 - 46s - loss: 0.0151 - accuracy: 0.9950 - val_loss: 0.1459 - val_accuracy: 0.9580\nEpoch 39/50\n313/313 - 46s - loss: 0.0154 - accuracy: 0.9949 - val_loss: 0.0625 - val_accuracy: 0.9835\nEpoch 40/50\n313/313 - 47s - loss: 0.0126 - accuracy: 0.9960 - val_loss: 0.0800 - val_accuracy: 0.9770\nEpoch 41/50\n313/313 - 46s - loss: 0.0142 - accuracy: 0.9955 - val_loss: 0.0945 - val_accuracy: 0.9820\nEpoch 42/50\n313/313 - 46s - loss: 0.0124 - accuracy: 0.9959 - val_loss: 0.0674 - val_accuracy: 0.9875\nEpoch 43/50\n313/313 - 47s - loss: 0.0136 - accuracy: 0.9939 - val_loss: 0.0818 - val_accuracy: 0.9835\nEpoch 44/50\n313/313 - 46s - loss: 0.0103 - accuracy: 0.9968 - val_loss: 0.0775 - val_accuracy: 0.9875\nEpoch 45/50\n313/313 - 46s - loss: 0.0100 - accuracy: 0.9955 - val_loss: 0.0619 - val_accuracy: 0.9840\nEpoch 46/50\n313/313 - 46s - loss: 0.0138 - accuracy: 0.9962 - val_loss: 0.0946 - val_accuracy: 0.9785\nEpoch 47/50\n313/313 - 46s - loss: 0.0164 - accuracy: 0.9945 - val_loss: 0.0687 - val_accuracy: 0.9855\nEpoch 48/50\n313/313 - 46s - loss: 0.0168 - accuracy: 0.9957 - val_loss: 0.0621 - val_accuracy: 0.9860\nEpoch 49/50\n313/313 - 46s - loss: 0.0169 - accuracy: 0.9951 - val_loss: 0.0428 - val_accuracy: 0.9895\nEpoch 50/50\n313/313 - 46s - loss: 0.0124 - accuracy: 0.9959 - val_loss: 0.0568 - val_accuracy: 0.9865\n"
]
],
[
[
"### Run inference on new data\n\nDropout are inactive at inference time, so that layer won't affect our model results",
"_____no_output_____"
]
],
[
[
"f = glob('test/no_damage/*.jpeg')[0]\n\nimg = keras.preprocessing.image.load_img(f, target_size=image_size)\nimg_array = keras.preprocessing.image.img_to_array(img)\nimg_array = tf.expand_dims(img_array, 0) # Create batch axis\nscores = model2.predict(img_array).flatten()\nprint(classes[np.argmax(scores)])",
"no_damage\n"
]
],
[
[
"We can use the `model.evaluate()` function to evaluate the average accuracy for the entire test set",
"_____no_output_____"
]
],
[
[
"scores = model2.evaluate(val_ds)",
"2000/2000 [==============================] - 11s 5ms/step - loss: 0.0223 - accuracy: 0.9920\n"
]
],
[
[
"### Plotting the confusion matrix",
"_____no_output_____"
],
[
"The confusion matrix is one of correspondences between actual and predicted labels, per class",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import confusion_matrix\nimport seaborn as sns",
"/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n import pandas.util.testing as tm\n"
]
],
[
[
"Get a new validation batch generator with a batch size of 1, and shuffling set to False because we want to pair each image with its class",
"_____no_output_____"
]
],
[
[
"val_ds = tf.keras.preprocessing.image_dataset_from_directory(\n 'test', seed=2020, shuffle=False,\n image_size=image_size, batch_size=1)",
"Found 2000 files belonging to 2 classes.\n"
]
],
[
[
"Get the image class labels and store in a list `L`",
"_____no_output_____"
]
],
[
[
"L = []\nfor _, labels in val_ds:\n L.append(int(labels[0]))",
"_____no_output_____"
],
[
"np.bincount(L)",
"_____no_output_____"
]
],
[
[
"Use the trained model to make predictions on the test set",
"_____no_output_____"
]
],
[
[
"preds = model2.predict(val_ds)\npred = np.argmax(preds, axis=1)",
"WARNING:tensorflow:Model was constructed with shape (None, 224, 224, 3) for input Tensor(\"input_1:0\", shape=(None, 224, 224, 3), dtype=float32), but it was called on an input with incompatible shape (None, 128, 128, None).\n"
]
],
[
[
"Get the confusion matrix (the matrix of label correspondences between ground truth and model prediction)",
"_____no_output_____"
]
],
[
[
"cm = confusion_matrix(np.asarray(L), pred)\n\ncm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]",
"_____no_output_____"
]
],
[
[
"Make a plot of that matrix",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(15,15))\nsns.heatmap(cm,\n annot=True,\n cmap = sns.cubehelix_palette(dark=0, light=1, as_cmap=True)) \n \ntick_marks = np.arange(len(classes))+.5\nplt.xticks(tick_marks, classes, rotation=45,fontsize=10)\nplt.yticks(tick_marks, classes,rotation=45, fontsize=10)\t",
"_____no_output_____"
]
],
[
[
"Both classes are estimated to within 1%",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e740d38c00d304009207966d21729365e6957f70 | 44,474 | ipynb | Jupyter Notebook | study_roadmaps/2_transfer_learning_roadmap/4_effect_of_training_epochs/3) Understand the effect of number of epochs in transfer learning - keras.ipynb | shubham7169/monk_v1 | 2d63ba9665160cc7758ba0541baddf87c1cfa578 | [
"Apache-2.0"
] | 7 | 2020-07-26T08:37:29.000Z | 2020-10-30T10:23:11.000Z | study_roadmaps/2_transfer_learning_roadmap/4_effect_of_training_epochs/3) Understand the effect of number of epochs in transfer learning - keras.ipynb | aayush-fadia/monk_v1 | 4234eecede3427efc952461408e2d14ef5fa0e57 | [
"Apache-2.0"
] | null | null | null | study_roadmaps/2_transfer_learning_roadmap/4_effect_of_training_epochs/3) Understand the effect of number of epochs in transfer learning - keras.ipynb | aayush-fadia/monk_v1 | 4234eecede3427efc952461408e2d14ef5fa0e57 | [
"Apache-2.0"
] | null | null | null | 24.652993 | 419 | 0.46308 | [
[
[
"<a href=\"https://colab.research.google.com/github/Tessellate-Imaging/monk_v1/blob/master/study_roadmaps/2_transfer_learning_roadmap/4_effect_of_training_epochs/3)%20Understand%20the%20effect%20of%20number%20of%20epochs%20in%20transfer%20learning%20-%20keras.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# DIY Notebook",
"_____no_output_____"
],
[
"# Goals\n\n\n### Understand the role of number of epochs in transfer learning\n\n\n### Till what point increasing epochs helps in imporving acuracy\n\n\n### How overtraining can result in overfitting the data\n\n\n### You will be using skin-cancer mnist to train the classifiers",
"_____no_output_____"
],
[
"# Table of Contents\n\n\n## [0. Install](#0)\n\n\n## [1. Train a resnet50 network for 5 epochs](#1)\n\n\n## [2. Re-Train a new experiment for 10 epochs](#2)\n\n\n## [3. Re-Train a third experiment for 20 epochs](#3)\n\n\n## [4. Compare the experiments](#4)",
"_____no_output_____"
],
[
"<a id='0'></a>\n# Install Monk\n \n - git clone https://github.com/Tessellate-Imaging/monk_v1.git\n \n - cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt\n - (Select the requirements file as per OS and CUDA version)",
"_____no_output_____"
]
],
[
[
"!git clone https://github.com/Tessellate-Imaging/monk_v1.git",
"_____no_output_____"
],
[
"# If using Colab install using the commands below\n!cd monk_v1/installation/Misc && pip install -r requirements_colab.txt\n\n# If using Kaggle uncomment the following command\n#!cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt\n\n# Select the requirements file as per OS and CUDA version when using a local system or cloud\n#!cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt",
"_____no_output_____"
]
],
[
[
"## Dataset Details\n - Credits: https://www.kaggle.com/kmader/skin-cancer-mnist-ham10000\n \n - Seven classes\n - benign_keratosis_like_lesions\n - melanocytic_nevi\n - dermatofibroma\n - melanoma\n - vascular_lesions\n - basal_cell_carcinoma\n - Bowens_disease",
"_____no_output_____"
],
[
"### Download the dataset",
"_____no_output_____"
]
],
[
[
"! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1MRC58-oCdR1agFTWreDFqevjEOIWDnYZ' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id=1MRC58-oCdR1agFTWreDFqevjEOIWDnYZ\" -O skin_cancer_mnist_dataset.zip && rm -rf /tmp/cookies.txt",
"_____no_output_____"
],
[
"! unzip -qq skin_cancer_mnist_dataset.zip",
"_____no_output_____"
]
],
[
[
"# Imports",
"_____no_output_____"
]
],
[
[
"# Monk\nimport os\nimport sys\nsys.path.append(\"monk_v1/monk/\");",
"_____no_output_____"
],
[
"#Using keras backend \nfrom keras_prototype import prototype",
"_____no_output_____"
]
],
[
[
"<a id='1'></a>\n# Train a resnet50 network for 5 epochs",
"_____no_output_____"
],
[
"## Creating and managing experiments\n - Provide project name\n - Provide experiment name\n - For a specific data create a single project\n - Inside each project multiple experiments can be created\n - Every experiment can be have diferent hyper-parameters attached to it",
"_____no_output_____"
]
],
[
[
"gtf = prototype(verbose=1);\ngtf.Prototype(\"Project\", \"Epochs-5\");",
"Keras Version: 2.2.5\nTensorflow Version: 1.12.0\n\nExperiment Details\n Project: Project\n Experiment: Epochs-5\n Dir: /home/abhi/Desktop/Work/tess_tool/gui/v0.3/finetune_models/Organization/development/v5.0_blocks/study_roadmap/change_post_num_layers/5_transfer_learning_params/3_training_epochs/workspace/Project/Epochs-5/\n\n"
]
],
[
[
"### This creates files and directories as per the following structure\n \n \n workspace\n |\n |--------Project\n |\n |\n |-----Freeze_Base_Network\n |\n |-----experiment-state.json\n |\n |-----output\n |\n |------logs (All training logs and graphs saved here)\n |\n |------models (all trained models saved here)\n ",
"_____no_output_____"
],
[
"## Set dataset and select the model",
"_____no_output_____"
],
[
"## Quick mode training\n\n - Using Default Function\n - dataset_path\n - model_name\n - freeze_base_network\n - num_epochs\n \n \n## Sample Dataset folder structure\n\n parent_directory\n |\n |\n |------cats\n |\n |------img1.jpg\n |------img2.jpg\n |------.... (and so on)\n |------dogs\n |\n |------img1.jpg\n |------img2.jpg\n |------.... (and so on) ",
"_____no_output_____"
],
[
"## Modifyable params \n - dataset_path: path to data\n - model_name: which pretrained model to use\n - freeze_base_network: Retrain already trained network or not\n - num_epochs: Number of epochs to train for",
"_____no_output_____"
]
],
[
[
"gtf.Default(dataset_path=\"skin_cancer_mnist_dataset/images\",\n path_to_csv=\"skin_cancer_mnist_dataset/train_labels.csv\",\n model_name=\"resnet50\", \n freeze_base_network=True,\n \n \n \n num_epochs=5); #Set number of epochs here\n\n#Read the summary generated once you run this cell. ",
"Dataset Details\n Train path: skin_cancer_mnist_dataset/images\n Val path: None\n CSV train path: skin_cancer_mnist_dataset/train_labels.csv\n CSV val path: None\n\nDataset Params\n Input Size: 224\n Batch Size: 4\n Data Shuffle: True\n Processors: 4\n Train-val split: 0.7\n Delimiter: ,\n\nFound 7011 validated image filenames belonging to 7 classes.\nFound 3004 validated image filenames belonging to 7 classes.\nPre-Composed Train Transforms\n[{'RandomHorizontalFlip': {'p': 0.8}}, {'MeanSubtraction': {'mean': [0.485, 0.456, 0.406]}}]\n\nPre-Composed Val Transforms\n[{'RandomHorizontalFlip': {'p': 0.8}}, {'MeanSubtraction': {'mean': [0.485, 0.456, 0.406]}}]\n\nDataset Numbers\n Num train images: 7011\n Num val images: 3004\n Num classes: 7\n\nModel Params\n Model name: resnet50\n Use Gpu: True\n Gpu Memory Fraction: 0.6\n Use pretrained: True\n Freeze base network: True\n\nModel Details\n Loading pretrained model\n Model Loaded on device\n Model name: resnet50\n Num layers in model: 108\n Num trainable layers: 2\n\nOptimizer\n Name: sgd\n Learning rate: 0.0001\n Params: {'lr': 0.0001, 'momentum': 0.9, 'weight_decay': 0, 'momentum_dampening_rate': 0, 'clipnorm': 0.0, 'clipvalue': 0.0}\n\n\n\nLearning rate scheduler\n Name: reduceonplateaulr\n Params: {'mode': 'min', 'factor': 0.1, 'patience': 1, 'verbose': True, 'threshold': 0.0001, 'threshold_mode': 'rel', 'cooldown': 0, 'min_lr': 0, 'epsilon': 1e-08}\n\nLoss\n Name: crossentropy\n Params: {'weight': None, 'batch_axis': 0, 'axis_to_sum_over': -1, 'label_as_categories': True, 'label_smoothing': False}\n\nTraining params\n Num Epochs: 5\n\nDisplay params\n Display progress: True\n Display progress realtime: True\n Save Training logs: True\n Save Intermediate models: True\n Intermediate model prefix: intermediate_model_\n\n"
]
],
[
[
"## From summary above\n\n Training params\n Num Epochs: 5",
"_____no_output_____"
],
[
"## Train the classifier",
"_____no_output_____"
]
],
[
[
"#Start Training\ngtf.Train();\n\n#Read the training summary generated once you run the cell and training is completed",
"_____no_output_____"
]
],
[
[
"### Final training loss - \n\n### Final validation loss - \n\n(You may get a different result)",
"_____no_output_____"
],
[
"<a id='2'></a>\n# Re-Train a new experiment for 10 epochs",
"_____no_output_____"
],
[
"## Creating and managing experiments\n - Provide project name\n - Provide experiment name\n - For a specific data create a single project\n - Inside each project multiple experiments can be created\n - Every experiment can be have diferent hyper-parameters attached to it",
"_____no_output_____"
]
],
[
[
"gtf = prototype(verbose=1);\ngtf.Prototype(\"Project\", \"Epochs-10\");",
"Keras Version: 2.2.5\nTensorflow Version: 1.12.0\n\nExperiment Details\n Project: Project\n Experiment: Epochs-10\n Dir: /home/abhi/Desktop/Work/tess_tool/gui/v0.3/finetune_models/Organization/development/v5.0_blocks/study_roadmap/change_post_num_layers/5_transfer_learning_params/3_training_epochs/workspace/Project/Epochs-10/\n\n"
]
],
[
[
"### This creates files and directories as per the following structure\n \n \n workspace\n |\n |--------Project\n |\n |\n |-----Epochs-5 (Previously created)\n |\n |-----experiment-state.json\n |\n |-----output\n |\n |------logs (All training logs and graphs saved here)\n |\n |------models (all trained models saved here)\n |\n |\n |-----Epochs-10 (Created Now)\n |\n |-----experiment-state.json\n |\n |-----output\n |\n |------logs (All training logs and graphs saved here)\n |\n |------models (all trained models saved here)",
"_____no_output_____"
],
[
"## Set dataset and select the model",
"_____no_output_____"
],
[
"## Quick mode training\n\n - Using Default Function\n - dataset_path\n - model_name\n - freeze_base_network\n - num_epochs\n \n \n## Sample Dataset folder structure\n\n parent_directory\n |\n |\n |------cats\n |\n |------img1.jpg\n |------img2.jpg\n |------.... (and so on)\n |------dogs\n |\n |------img1.jpg\n |------img2.jpg\n |------.... (and so on)",
"_____no_output_____"
],
[
"## Modifyable params \n - dataset_path: path to data\n - model_name: which pretrained model to use\n - freeze_base_network: Retrain already trained network or not\n - num_epochs: Number of epochs to train for",
"_____no_output_____"
]
],
[
[
"gtf.Default(dataset_path=\"skin_cancer_mnist_dataset/images\",\n path_to_csv=\"skin_cancer_mnist_dataset/train_labels.csv\",\n model_name=\"resnet50\", \n freeze_base_network=True,\n \n \n \n num_epochs=10); #Set number of epochs here\n\n#Read the summary generated once you run this cell. ",
"Dataset Details\n Train path: skin_cancer_mnist_dataset/images\n Val path: None\n CSV train path: skin_cancer_mnist_dataset/train_labels.csv\n CSV val path: None\n\nDataset Params\n Input Size: 224\n Batch Size: 4\n Data Shuffle: True\n Processors: 4\n Train-val split: 0.7\n Delimiter: ,\n\nFound 7011 validated image filenames belonging to 7 classes.\nFound 3004 validated image filenames belonging to 7 classes.\nPre-Composed Train Transforms\n[{'RandomHorizontalFlip': {'p': 0.8}}, {'MeanSubtraction': {'mean': [0.485, 0.456, 0.406]}}]\n\nPre-Composed Val Transforms\n[{'RandomHorizontalFlip': {'p': 0.8}}, {'MeanSubtraction': {'mean': [0.485, 0.456, 0.406]}}]\n\nDataset Numbers\n Num train images: 7011\n Num val images: 3004\n Num classes: 7\n\nModel Params\n Model name: resnet50\n Use Gpu: True\n Gpu Memory Fraction: 0.6\n Use pretrained: True\n Freeze base network: True\n\nModel Details\n Loading pretrained model\n Model Loaded on device\n Model name: resnet50\n Num layers in model: 108\n Num trainable layers: 2\n\nOptimizer\n Name: sgd\n Learning rate: 0.0001\n Params: {'lr': 0.0001, 'momentum': 0.9, 'weight_decay': 0, 'momentum_dampening_rate': 0, 'clipnorm': 0.0, 'clipvalue': 0.0}\n\n\n\nLearning rate scheduler\n Name: reduceonplateaulr\n Params: {'mode': 'min', 'factor': 0.1, 'patience': 3, 'verbose': True, 'threshold': 0.0001, 'threshold_mode': 'rel', 'cooldown': 0, 'min_lr': 0, 'epsilon': 1e-08}\n\nLoss\n Name: crossentropy\n Params: {'weight': None, 'batch_axis': 0, 'axis_to_sum_over': -1, 'label_as_categories': True, 'label_smoothing': False}\n\nTraining params\n Num Epochs: 10\n\nDisplay params\n Display progress: True\n Display progress realtime: True\n Save Training logs: True\n Save Intermediate models: True\n Intermediate model prefix: intermediate_model_\n\n"
]
],
[
[
"## From summary above\n\n Training params\n Num Epochs: 10",
"_____no_output_____"
],
[
"## Train the classifier",
"_____no_output_____"
]
],
[
[
"#Start Training\ngtf.Train();\n\n#Read the training summary generated once you run the cell and training is completed",
"_____no_output_____"
]
],
[
[
"### Final training loss - \n\n### Final validation loss - \n\n(You may get a different result)",
"_____no_output_____"
]
],
[
[
"\n",
"_____no_output_____"
]
],
[
[
"<a id='3'></a>\n# Re-Train a third experiment for 20 epochs",
"_____no_output_____"
],
[
"## Creating and managing experiments\n - Provide project name\n - Provide experiment name\n - For a specific data create a single project\n - Inside each project multiple experiments can be created\n - Every experiment can be have diferent hyper-parameters attached to it",
"_____no_output_____"
]
],
[
[
"gtf = prototype(verbose=1);\ngtf.Prototype(\"Project\", \"Epochs-20\");",
"Keras Version: 2.2.5\nTensorflow Version: 1.12.0\n\nExperiment Details\n Project: Project\n Experiment: Epochs-20\n Dir: /home/abhi/Desktop/Work/tess_tool/gui/v0.3/finetune_models/Organization/development/v5.0_blocks/study_roadmap/change_post_num_layers/5_transfer_learning_params/3_training_epochs/workspace/Project/Epochs-20/\n\n"
]
],
[
[
"### This creates files and directories as per the following structure\n \n \n workspace\n |\n |--------Project\n |\n |\n |-----Epochs-5 (Previously created)\n |\n |-----experiment-state.json\n |\n |-----output\n |\n |------logs (All training logs and graphs saved here)\n |\n |------models (all trained models saved here)\n |\n |\n |-----Epochs-10 (Previously Created)\n |\n |-----experiment-state.json\n |\n |-----output\n |\n |------logs (All training logs and graphs saved here)\n |\n |------models (all trained models saved here)\n |\n |\n |-----Epochs-20 (Created Now)\n |\n |-----experiment-state.json\n |\n |-----output\n |\n |------logs (All training logs and graphs saved here)\n |\n |------models (all trained models saved here)",
"_____no_output_____"
],
[
"## Modifyable params \n - dataset_path: path to data\n - model_name: which pretrained model to use\n - freeze_base_network: Retrain already trained network or not\n - num_epochs: Number of epochs to train for",
"_____no_output_____"
]
],
[
[
"gtf.Default(dataset_path=\"skin_cancer_mnist_dataset/images\",\n path_to_csv=\"skin_cancer_mnist_dataset/train_labels.csv\",\n model_name=\"resnet50\", \n freeze_base_network=True,\n \n \n \n num_epochs=20); #Set number of epochs here\n\n#Read the summary generated once you run this cell. ",
"Dataset Details\n Train path: skin_cancer_mnist_dataset/images\n Val path: None\n CSV train path: skin_cancer_mnist_dataset/train_labels.csv\n CSV val path: None\n\nDataset Params\n Input Size: 224\n Batch Size: 4\n Data Shuffle: True\n Processors: 4\n Train-val split: 0.7\n Delimiter: ,\n\nFound 7011 validated image filenames belonging to 7 classes.\nFound 3004 validated image filenames belonging to 7 classes.\nPre-Composed Train Transforms\n[{'RandomHorizontalFlip': {'p': 0.8}}, {'MeanSubtraction': {'mean': [0.485, 0.456, 0.406]}}, {'RandomHorizontalFlip': {'p': 0.8}}, {'MeanSubtraction': {'mean': [0.485, 0.456, 0.406]}}]\n\nPre-Composed Val Transforms\n[{'RandomHorizontalFlip': {'p': 0.8}}, {'MeanSubtraction': {'mean': [0.485, 0.456, 0.406]}}, {'RandomHorizontalFlip': {'p': 0.8}}, {'MeanSubtraction': {'mean': [0.485, 0.456, 0.406]}}]\n\nDataset Numbers\n Num train images: 7011\n Num val images: 3004\n Num classes: 7\n\nModel Params\n Model name: resnet50\n Use Gpu: True\n Gpu Memory Fraction: 0.6\n Use pretrained: True\n Freeze base network: True\n\nModel Details\n Loading pretrained model\n Model Loaded on device\n Model name: resnet50\n Num layers in model: 108\n Num trainable layers: 2\n\nOptimizer\n Name: sgd\n Learning rate: 0.0001\n Params: {'lr': 0.0001, 'momentum': 0.9, 'weight_decay': 0, 'momentum_dampening_rate': 0, 'clipnorm': 0.0, 'clipvalue': 0.0}\n\n\n\nLearning rate scheduler\n Name: reduceonplateaulr\n Params: {'mode': 'min', 'factor': 0.1, 'patience': 6, 'verbose': True, 'threshold': 0.0001, 'threshold_mode': 'rel', 'cooldown': 0, 'min_lr': 0, 'epsilon': 1e-08}\n\nLoss\n Name: crossentropy\n Params: {'weight': None, 'batch_axis': 0, 'axis_to_sum_over': -1, 'label_as_categories': True, 'label_smoothing': False}\n\nTraining params\n Num Epochs: 20\n\nDisplay params\n Display progress: True\n Display progress realtime: True\n Save Training logs: True\n Save Intermediate models: True\n Intermediate model prefix: intermediate_model_\n\n"
]
],
[
[
"## From summary above\n\n Training params\n Num Epochs: 20",
"_____no_output_____"
],
[
"## Train the classifier",
"_____no_output_____"
]
],
[
[
"#Start Training\ngtf.Train();\n\n#Read the training summary generated once you run the cell and training is completed",
"_____no_output_____"
]
],
[
[
"### Final training loss - \n\n### Final validation loss - \n\n(You may get a different result)",
"_____no_output_____"
],
[
"<a id='4'></a>\n# Compare the experiments",
"_____no_output_____"
]
],
[
[
"# Invoke the comparison class\nfrom compare_prototype import compare",
"_____no_output_____"
]
],
[
[
"### Creating and managing comparison experiments\n - Provide project name",
"_____no_output_____"
]
],
[
[
"# Create a project \ngtf = compare(verbose=1);\ngtf.Comparison(\"Compare-effect-of-num-epochs\");",
"_____no_output_____"
]
],
[
[
"### This creates files and directories as per the following structure\n \n workspace\n |\n |--------comparison\n |\n |\n |-----Compare-effect-of-num-epochs\n |\n |------stats_best_val_acc.png\n |------stats_max_gpu_usage.png\n |------stats_training_time.png\n |------train_accuracy.png\n |------train_loss.png\n |------val_accuracy.png\n |------val_loss.png\n \n |\n |-----comparison.csv (Contains necessary details of all experiments)",
"_____no_output_____"
],
[
"### Add the experiments\n - First argument - Project name\n - Second argument - Experiment name",
"_____no_output_____"
]
],
[
[
"gtf.Add_Experiment(\"Project\", \"Epochs-5\");\ngtf.Add_Experiment(\"Project\", \"Epochs-10\");\ngtf.Add_Experiment(\"Project\", \"Epochs-20\");",
"_____no_output_____"
]
],
[
[
"### Run Analysis",
"_____no_output_____"
]
],
[
[
"gtf.Generate_Statistics();",
"_____no_output_____"
]
],
[
[
"## Visualize and study comparison metrics",
"_____no_output_____"
],
[
"### Training Accuracy Curves",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\nImage(filename=\"workspace/comparison/Compare-effect-of-num-epochs/train_accuracy.png\") ",
"_____no_output_____"
]
],
[
[
"### Training Loss Curves",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\nImage(filename=\"workspace/comparison/Compare-effect-of-num-epochs/train_loss.png\") ",
"_____no_output_____"
]
],
[
[
"### Validation Accuracy Curves",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\nImage(filename=\"workspace/comparison/Compare-effect-of-num-epochs/val_accuracy.png\") ",
"_____no_output_____"
]
],
[
[
"### Validation loss curves",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\nImage(filename=\"workspace/comparison/Compare-effect-of-num-epochs/val_loss.png\") ",
"_____no_output_____"
]
],
[
[
"## Training Accuracies achieved \n\n### With 5 epochs - \n### With 10 epochs - \n### With 20 epochs - \n\n\n## Validation accuracies achieved \n\n### With 5 epochs - \n### With 10 epochs -\n### With 20 epochs - \n\n\n(You may get a different result)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e740d4c5aecb522ad84d33f79665daee15d241d2 | 139,298 | ipynb | Jupyter Notebook | code/CH01/CH01_SEC05_2_OvarianCancer.ipynb | ksang/data-book-python | 08b5d3ae76980628887bc1ab2e17d83ddf07b00c | [
"MIT"
] | 1 | 2022-01-22T03:45:05.000Z | 2022-01-22T03:45:05.000Z | code/CH01/CH01_SEC05_2_OvarianCancer.ipynb | ksang/data-book-python | 08b5d3ae76980628887bc1ab2e17d83ddf07b00c | [
"MIT"
] | null | null | null | code/CH01/CH01_SEC05_2_OvarianCancer.ipynb | ksang/data-book-python | 08b5d3ae76980628887bc1ab2e17d83ddf07b00c | [
"MIT"
] | null | null | null | 1,326.647619 | 111,020 | 0.960294 | [
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom mpl_toolkits.mplot3d import Axes3D\nplt.rcParams['figure.figsize'] = [16, 8]\nplt.rcParams.update({'font.size': 18})\n\n\nobs = np.loadtxt(os.path.join('..','DATA','ovariancancer_obs.csv'),delimiter=',')\n\nf = open(os.path.join('..','DATA','ovariancancer_grp.csv'), \"r\")\ngrp = f.read().split(\"\\n\")\n\nU, S, VT = np.linalg.svd(obs,full_matrices=0)\n\nfig1 = plt.figure()\nax1 = fig1.add_subplot(121)\nax1.semilogy(S,'-o',color='k')\nax2 = fig1.add_subplot(122)\nax2.plot(np.cumsum(S)/np.sum(S),'-o',color='k')\n\nplt.show()",
"_____no_output_____"
],
[
"fig2 = plt.figure()\nax = fig2.add_subplot(111, projection='3d')\n\nfor j in range(obs.shape[0]):\n x = VT[0,:] @ obs[j,:].T\n y = VT[1,:] @ obs[j,:].T\n z = VT[2,:] @ obs[j,:].T\n \n if grp[j] == 'Cancer':\n ax.scatter(x,y,z,marker='x',color='r',s=50)\n else:\n ax.scatter(x,y,z,marker='o',color='b',s=50)\n\nax.view_init(25,20)\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code"
]
] |
e740dad9a331787b93ada353073e771fd3cf226d | 370,678 | ipynb | Jupyter Notebook | .ipynb_checkpoints/Raster_manipulation-checkpoint.ipynb | pavlovc2/goes_r_fire | 0f03359b373330005cff5641bf45f98a1b4810dc | [
"MIT"
] | 2 | 2018-08-10T02:42:09.000Z | 2019-02-27T21:34:25.000Z | .ipynb_checkpoints/Raster_manipulation-checkpoint.ipynb | pavlovc2/goes_r_fire | 0f03359b373330005cff5641bf45f98a1b4810dc | [
"MIT"
] | null | null | null | .ipynb_checkpoints/Raster_manipulation-checkpoint.ipynb | pavlovc2/goes_r_fire | 0f03359b373330005cff5641bf45f98a1b4810dc | [
"MIT"
] | 2 | 2019-07-24T09:31:25.000Z | 2021-03-19T02:18:00.000Z | 842.45 | 275,428 | 0.942103 | [
[
[
"# Examine a sample image\n\nMuch of what is describe here has been borrowed from the following resources\nhttps://github.com/blaylockbk/pyBKB_v2/blob/master/BB_goes16/mapping_GOES16_data.ipynb\nhttp://edc.occ-data.org/goes16/python/\nhttp://www.ceda.ac.uk/static/media/uploads/ncas-reading-2015/10_read_netcdf_python.pdf\n\n## Import the libraries and set working directory",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nfrom netCDF4 import Dataset\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os \nfrom pyproj import Proj\nimport datetime\nfrom mpl_toolkits.basemap import Basemap\nfrom osgeo import gdal\n\nos.chdir(\"/Users/nathan/Documents/Projects/GOES_Fire_Growth/Raw_Data\")",
"_____no_output_____"
]
],
[
[
"## Import dataset and examine dimensions",
"_____no_output_____"
]
],
[
[
"C_file = Dataset(\"OR_ABI-L2-CMIPC-M3C07_G16_s20172830932227_e20172830935012_c20172830935048.nc\", 'r')\nref_ch7 = C_file.variables['CMI'][:]\n#C_file.close()\n#C_file = None\nprint C_file.file_format\ndims = C_file.dimensions.keys()\nprint dims\nfor dim in dims:\n print C_file.dimensions[dim]",
"NETCDF4\n[u'y', u'x', u'number_of_time_bounds', u'band', u'number_of_image_bounds']\n<type 'netCDF4._netCDF4.Dimension'>: name = 'y', size = 1500\n\n<type 'netCDF4._netCDF4.Dimension'>: name = 'x', size = 2500\n\n<type 'netCDF4._netCDF4.Dimension'>: name = 'number_of_time_bounds', size = 2\n\n<type 'netCDF4._netCDF4.Dimension'>: name = 'band', size = 1\n\n<type 'netCDF4._netCDF4.Dimension'>: name = 'number_of_image_bounds', size = 2\n\n"
]
],
[
[
"## Examine Variables",
"_____no_output_____"
]
],
[
[
"print C_file.variables.keys()\nprint C_file.variables[\"goes_imager_projection\"]",
"[u'CMI', u'DQF', u't', u'y', u'x', u'time_bounds', u'goes_imager_projection', u'y_image', u'y_image_bounds', u'x_image', u'x_image_bounds', u'nominal_satellite_subpoint_lat', u'nominal_satellite_subpoint_lon', u'nominal_satellite_height', u'geospatial_lat_lon_extent', u'band_wavelength', u'band_id', u'total_number_of_points', u'valid_pixel_count', u'outlier_pixel_count', u'min_brightness_temperature', u'max_brightness_temperature', u'mean_brightness_temperature', u'std_dev_brightness_temperature', u'esun', u'kappa0', u'planck_fk1', u'planck_fk2', u'planck_bc1', u'planck_bc2', u'algorithm_dynamic_input_data_container', u'percent_uncorrectable_GRB_errors', u'percent_uncorrectable_L0_errors', u'earth_sun_distance_anomaly_in_AU', u'processing_parm_version_container', u'algorithm_product_version_container']\n<type 'netCDF4._netCDF4.Variable'>\nint32 goes_imager_projection()\n long_name: GOES-R ABI fixed grid projection\n grid_mapping_name: geostationary\n perspective_point_height: 35786023.0\n semi_major_axis: 6378137.0\n semi_minor_axis: 6356752.31414\n inverse_flattening: 298.2572221\n latitude_of_projection_origin: 0.0\n longitude_of_projection_origin: -89.5\n sweep_angle_axis: x\nunlimited dimensions: \ncurrent shape = ()\nfilling on, default _FillValue of -2147483647 used\n\n"
]
],
[
[
"## Get time",
"_____no_output_____"
]
],
[
[
"# Data are stored as seconds since 2000-01-01 12:00:00\nsecs = C_file.variables['t'][0]\nimg_date = datetime.datetime(2000, 1, 1, 12) + datetime.timedelta(seconds = secs)",
"_____no_output_____"
]
],
[
[
"## Get image data",
"_____no_output_____"
]
],
[
[
"b = C_file.variables['CMI']\n\n# Plot it\nplt.figure(figsize = [8,8])\nplt.imshow(b)\nplt.title(img_date)",
"_____no_output_____"
],
[
"bt = np.array(b) > 295\n\n# Plot it\nplt.figure(figsize = [8,8])\nplt.imshow(bt)\nplt.title(img_date)",
"_____no_output_____"
]
],
[
[
"## Get projection and location info",
"_____no_output_____"
]
],
[
[
"sh = C_file.variables['goes_imager_projection'].perspective_point_height\nslon = C_file.variables['goes_imager_projection'].longitude_of_projection_origin\nssweep = C_file.variables['goes_imager_projection'].sweep_angle_axis\n\n# Get coordinates\nxcoords = C_file.variables['x'][:] * sh\nycoords = C_file.variables['y'][:] * sh\n\n# Convert to lat lon\np = Proj(proj = \"geos\", h = sh, lon_0 = slon, sweep = ssweep)\nXs, Ys = np.meshgrid(xcoords, ycoords)\nlons, lats = p(Xs, Ys, inverse = True)",
"_____no_output_____"
]
],
[
[
"## Subset to North Bay area",
"_____no_output_____"
]
],
[
[
"nbb = b[325:475, 30:120]\n\n# Plot it\nplt.figure(figsize = [8,8])\nplt.imshow(nbb)\nplt.title(img_date)",
"_____no_output_____"
],
[
"# Histogram\nplt.hist(np.concatenate(nbb))",
"_____no_output_____"
],
[
"nbbt = nbb > 295\n\n# Plot it\nplt.figure(figsize = [8,8])\nplt.imshow(nbbt)\nplt.title(img_date)",
"_____no_output_____"
]
],
[
[
"## Try writing raster",
"_____no_output_____"
]
],
[
[
"driver = gdal.GetDriverByName('GTiff')\nnew_file = driver.Create('test_band1.tif', \n C_file.dimensions['x'].size, # number of columns\n C_file.dimensions['y'].size, # number of rows\n 1, # number of bands\n gdal.GDT_Float32) # datatype\n\nhelp(Proj.srs)\nprint(dir(p))\nnew_file.SetProjection(p.srs)\nnew_band = new_file.GetRasterBand(1)\nnew_band.WriteArray(np.array(b))",
"Help on getset descriptor _proj.Proj.srs:\n\nsrs\n\n['__call__', '__class__', '__delattr__', '__dict__', '__doc__', '__format__', '__getattribute__', '__hash__', '__init__', '__module__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_fwd', '_inv', 'is_geocent', 'is_latlong', 'proj_version', 'srs', 'to_latlong']\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e740e1f5e1ad6603d9bd3c61760078349433cba9 | 360,618 | ipynb | Jupyter Notebook | jupyter-notebooks/vault/Kinase Sarfari regression dataset.ipynb | cdd/os-models | 443536657a685de9e4e769b11714b7432307af17 | [
"Apache-2.0"
] | 1 | 2019-10-28T09:43:28.000Z | 2019-10-28T09:43:28.000Z | jupyter-notebooks/vault/Kinase Sarfari regression dataset.ipynb | cdd/os-models | 443536657a685de9e4e769b11714b7432307af17 | [
"Apache-2.0"
] | null | null | null | jupyter-notebooks/vault/Kinase Sarfari regression dataset.ipynb | cdd/os-models | 443536657a685de9e4e769b11714b7432307af17 | [
"Apache-2.0"
] | 1 | 2018-12-05T02:39:37.000Z | 2018-12-05T02:39:37.000Z | 91.830405 | 42,390 | 0.716928 | [
[
[
"%matplotlib inline\n%load_ext autoreload\n%autoreload 2\n\nimport seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as n\n\nimport sys\nimport os\nsys.path.insert(0, 'python')",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
],
[
"dirname = os.getcwd()\nregression_file = os.path.abspath(os.path.join(dirname, \n 'regression_kinase_sarfari.csv'))\nregression_df = pd.read_csv(regression_file)\nselect_fields = ['ProtocolName', 'LabelName', 'GroupValues']\nregression_df.sort_values(select_fields)",
"_____no_output_____"
],
[
"regression_df.LabelName.value_counts()\nregression_df.shape\nregression_df[regression_df.Correlation.isnull()]",
"_____no_output_____"
],
[
"regression_df = regression_df[~regression_df.Correlation.isnull()]",
"_____no_output_____"
],
[
"max_indices=regression_df.groupby(select_fields)[\"Correlation\"].idxmax()\nbest_regression_df=regression_df.loc[max_indices]\nbest_regression_df.sort_values(select_fields)",
"_____no_output_____"
],
[
"best_regression_df['Correlation'].describe()",
"_____no_output_____"
],
[
"best_regression_df['RMSE'].describe()",
"_____no_output_____"
],
[
"high_rms_df = best_regression_df[best_regression_df.RMSE > 10]\nprint(high_rms_df.ProtocolName.unique())\nfor _, value in high_rms_df.GroupValues.items():\n print(value)",
"[]\n"
],
[
"fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(14, 7))\nsns.distplot(best_regression_df['Correlation'], rug=True, kde=True, ax=axes[0])\nsns.distplot(best_regression_df['RMSE'], rug=False, kde=True, ax=axes[1])",
"_____no_output_____"
],
[
"fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(7, 7))\nsns.distplot(best_regression_df.Size, rug=True, kde=False, norm_hist=False, ax=axes)",
"_____no_output_____"
],
[
"best_regression_df['Estimator'].value_counts()",
"_____no_output_____"
],
[
"sns.countplot(y='Estimator', data=best_regression_df)",
"_____no_output_____"
],
[
"fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(14, 7))\nsns.boxplot(y='Estimator', x='Correlation', data=regression_df, ax=axes)\n#for tick in axes.get_xticklabels():\n# tick.set_rotation(80)",
"_____no_output_____"
],
[
"fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(14, 7))\nsns.boxplot(x='Estimator', y='RMSE', data=regression_df, ax=axes)\nfor tick in axes.get_xticklabels():\n tick.set_rotation(80)",
"_____no_output_____"
],
[
"dirname = os.getcwd()\nclassification_file = os.path.abspath(os.path.join(dirname, \n 'classification_v_1_d_KINASE: GSK Published Kinase Inhibitor Set (PKIS)_p_Kinase Assay.csv'))\nclassification_df = pd.read_csv(classification_file)\nclassification_df.sort_values(['GroupName', 'GroupValue'])\n",
"_____no_output_____"
],
[
"max_indices=classification_df.groupby(['GroupName', 'GroupValue'])[\"ROC_AUC\"].idxmax()\nbest_classification_df=classification_df.loc[max_indices]\nbest_classification_df.sort_values(['GroupName', 'GroupValue'])",
"_____no_output_____"
],
[
"best_classification_df['ROC_AUC'].describe()",
"_____no_output_____"
],
[
"fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(7, 7))\nsns.distplot(best_classification_df['ROC_AUC'], rug=True, kde=True, ax=axes)",
"_____no_output_____"
],
[
"best_classification_df['Estimator'].value_counts()",
"_____no_output_____"
],
[
"sns.countplot(y='Estimator', data=best_classification_df)",
"_____no_output_____"
],
[
"fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(14, 7))\nsns.boxplot(y='Estimator', x='ROC_AUC', data=classification_df, ax=axes)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e740fc945a505c9634040f71db6ef51b7ffb943b | 12,513 | ipynb | Jupyter Notebook | notes/02 Pandas Mini-Project/Statistics from Stock Data.ipynb | jedrzejpolaczek/AIPND | 8bfce4e2ca2c54257cfd660dd6f75eb27344a8fb | [
"MIT"
] | null | null | null | notes/02 Pandas Mini-Project/Statistics from Stock Data.ipynb | jedrzejpolaczek/AIPND | 8bfce4e2ca2c54257cfd660dd6f75eb27344a8fb | [
"MIT"
] | null | null | null | notes/02 Pandas Mini-Project/Statistics from Stock Data.ipynb | jedrzejpolaczek/AIPND | 8bfce4e2ca2c54257cfd660dd6f75eb27344a8fb | [
"MIT"
] | null | null | null | 33.818919 | 665 | 0.627507 | [
[
[
"# Statistics from Stock Data\n\nIn this lab we will load stock data into a Pandas Dataframe and calculate some statistics on it. We will be working with stock data from Google, Apple, and Amazon. All the stock data was downloaded from yahoo finance in CSV format. In your workspace you should have a file named GOOG.csv containing the Google stock data, a file named AAPL.csv containing the Apple stock data, and a file named AMZN.csv containing the Amazon stock data. All the files contain 7 columns of data:\n\n**Date Open High Low Close Adj_Close Volume**\n\nWe will start by reading in any of the above CSV files into a DataFrame and see what the data looks like.",
"_____no_output_____"
]
],
[
[
"# We import pandas into Python\nimport pandas as pd\n\n# We read in a stock data data file into a data frame and see what it looks like\ndf = pd.read_csv('./GOOG.csv')\n\n# We display the first 5 rows of the DataFrame\ndf.head()",
"_____no_output_____"
]
],
[
[
"We clearly see that the Dataframe is has automatically labeled the row indices using integers and has labeled the columns of the DataFrame using the names of the columns in the CSV files.\n\n# To Do\n\nYou will now load the stock data from Google, Apple, and Amazon into separte DataFrames. However, for each stock data you will only be interested in loading the `Date` and `Adj Close` columns into the Dataframe. In addtion, you want to use the `Date` column as your row index. Finally, you want the DataFrame to recognize the dates as actual dates (year/month/day) and not as strings. For each stock, you can accomplish all theses things in just one line of code by using the appropiate keywords in the `pd.read_csv()` function. Here are a few hints:\n\n* Use the `index_col` keyword to indicate which column you want to use as an index. For example `index_col = ['Open']`\n\n* Set the `parse_dates` keyword equal to `True` to convert the Dates into real dates of the form year/month/day\n\n* Use the `usecols` keyword to select which columns you want to load into the DataFrame. For example `usecols = ['Open', 'High']`\n\nFill in the code below:",
"_____no_output_____"
]
],
[
[
"# We load the Google stock data into a DataFrame\ngoogle_stock = pd.read_csv('./GOOG.csv', parse_dates=True)\n\n# We load the Apple stock data into a DataFrame\napple_stock = pd.read_csv('./AAPL.csv', parse_dates=True)\n\n# We load the Amazon stock data into a DataFrame\namazon_stock = pd.read_csv('./AMZN.csv', parse_dates=True)",
"_____no_output_____"
]
],
[
[
"You can check that you have loaded the data correctly by displaying the head of the DataFrames.",
"_____no_output_____"
]
],
[
[
"# We display the google_stock DataFrame\ngoogle_stock.head()",
"_____no_output_____"
]
],
[
[
"You will now join the three DataFrames above to create a single new DataFrame that contains all the `Adj Close` for all the stocks. Let's start by creating an empty DataFrame that has as row indices calendar days between `2000-01-01` and `2016-12-31`. We will use the `pd.date_range()` function to create the calendar dates first and then we will create a DataFrame that uses those dates as row indices:",
"_____no_output_____"
]
],
[
[
"# We create calendar dates between '2000-01-01' and '2016-12-31'\ndates = pd.date_range('2000-01-01', '2016-12-31')\n\n# We create and empty DataFrame that uses the above dates as indices\nall_stocks = pd.DataFrame(index = dates)",
"_____no_output_____"
]
],
[
[
"# To Do\n\nYou will now join the the individual DataFrames, `google_stock`, `apple_stock`, and `amazon_stock`, to the `all_stocks` DataFrame. However, before you do this, it is necessary that you change the name of the columns in each of the three dataframes. This is because the column labels in the `all_stocks` dataframe must be unique. Since all the columns in the individual dataframes have the same name, `Adj Close`, we must change them to the stock name before joining them. In the space below change the column label `Adj Close` of each individual dataframe to the name of the corresponding stock. You can do this by using the `pd.DataFrame.rename()` function. ",
"_____no_output_____"
]
],
[
[
"# Change the Adj Close column label to Google\ngoogle_stock = google_stock.rename(columns = {'Adj Close': 'Google'})\n\n# Change the Adj Close column label to Apple\napple_stock = apple_stock.rename(columns = {'Adj Close': 'Apple'})\n\n# Change the Adj Close column label to Amazon\namazon_stock = amazon_stock.rename(columns = {'Adj Close':'Amazon'})",
"_____no_output_____"
]
],
[
[
"You can check that the column labels have been changed correctly by displaying the datadrames",
"_____no_output_____"
]
],
[
[
"# We display the google_stock DataFrame\ngoogle_stock.head()",
"_____no_output_____"
],
[
"# We display the apple_stock DataFrame\napple_stock.head()",
"_____no_output_____"
],
[
"# We display the amazon_stock DataFrame\namazon_stock.head()",
"_____no_output_____"
]
],
[
[
"Now that we have unique column labels, we can join the individual DataFrames to the `all_stocks` DataFrame. For this we will use the `dataframe.join()` function. The function `dataframe1.join(dataframe2)` joins `dataframe1` with `dataframe2`. We will join each dataframe one by one to the `all_stocks` dataframe. Fill in the code below to join the dataframes, the first join has been made for you:",
"_____no_output_____"
]
],
[
[
"# We join the Google stock to all_stocks\nall_stocks = all_stocks.join(google_stock, lsuffix=\"_all_stocks\", rsuffix=\"_google\")\n\n# We join the Apple stock to all_stocks\nall_stocks = all_stocks.join(apple_stock, lsuffix=\"_all_stocks\", rsuffix=\"_google\")\n\n# We join the Amazon stock to all_stocks\nall_stocks =all_stocks.join(amazon_stock, lsuffix=\"_all_stocks\", rsuffix=\"_google\")",
"_____no_output_____"
]
],
[
[
"You can check that the dataframes have been joined correctly by displaying the `all_stocks` dataframe",
"_____no_output_____"
]
],
[
[
"# We display the all_stocks DataFrame\nall_stocks.head()",
"_____no_output_____"
]
],
[
[
"# To Do\n\nBefore we proceed to get some statistics on the stock data, let's first check that we don't have any *NaN* values. In the space below check if there are any *NaN* values in the `all_stocks` dataframe. If there are any, remove any rows that have *NaN* values:",
"_____no_output_____"
]
],
[
[
"# Check if there are any NaN values in the all_stocks dataframe\nall_stocks.isnull().sum().sum()",
"_____no_output_____"
],
[
"# Remove any rows that contain NaN values\nall_stocks.dropna(axis = 0)",
"_____no_output_____"
]
],
[
[
"You can check that the *NaN* values have been eliminated by displaying the `all_stocks` dataframe",
"_____no_output_____"
]
],
[
[
"# Check if there are any NaN values in the all_stocks dataframe\n",
"_____no_output_____"
]
],
[
[
"Display the `all_stocks` dataframe and verify that there are no *NaN* values ",
"_____no_output_____"
]
],
[
[
"# We display the all_stocks DataFrame\nall_stocks.head()",
"_____no_output_____"
]
],
[
[
"Now that you have eliminated any *NaN* values we can now calculate some basic statistics on the stock prices. Fill in the code below",
"_____no_output_____"
]
],
[
[
"# Print the average stock price for each stock\nall_stocks.fillna(all_stocks.mean(), axis = 0)\n\n# Print the median stock price for each stock\nall_stocks.fillna(all_stocks.median(), axis = 0)\n\n# Print the standard deviation of the stock price for each stock \nall_stocks.fillna(all_stocks.std(), axis = 0)\n\n# Print the correlation between stocks\nall_stocks.corr()",
"_____no_output_____"
]
],
[
[
"We will now look at how we can compute some rolling statistics, also known as moving statistics. We can calculate for example the rolling mean (moving average) of the Google stock price by using the Pandas `dataframe.rolling().mean()` method. The `dataframe.rolling(N).mean()` calculates the rolling mean over an `N`-day window. In other words, we can take a look at the average stock price every `N` days using the above method. Fill in the code below to calculate the average stock price every 150 days for Google stock",
"_____no_output_____"
]
],
[
[
"# We compute the rolling mean using a 150-Day window for Google stock\nrollingMean = dataframe.rolling(150).mean()",
"_____no_output_____"
]
],
[
[
"We can also visualize the rolling mean by plotting the data in our dataframe. In the following lessons you will learn how to use **Matplotlib** to visualize data. For now I will just import matplotlib and plot the Google stock data on top of the rolling mean. You can play around by changing the rolling mean window and see how the plot changes. ",
"_____no_output_____"
]
],
[
[
"# This allows plots to be rendered in the notebook\n%matplotlib inline \n\n# We import matplotlib into Python\nimport matplotlib.pyplot as plt\n\n\n# We plot the Google stock data\nplt.plot(all_stocks['Google'])\n\n# We plot the rolling mean ontop of our Google stock data\nplt.plot(rollingMean)\nplt.legend(['Google Stock Price', 'Rolling Mean'])\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7410c4fd27f869770349a6110f9d9e2f2d65c30 | 793,963 | ipynb | Jupyter Notebook | Trademoney_Prediction.ipynb | aomike/Team_Learning_RentMoney_Prediction | fc8f5988f4f84d398565fb6ba0c30222a0b35d0b | [
"MIT"
] | null | null | null | Trademoney_Prediction.ipynb | aomike/Team_Learning_RentMoney_Prediction | fc8f5988f4f84d398565fb6ba0c30222a0b35d0b | [
"MIT"
] | null | null | null | Trademoney_Prediction.ipynb | aomike/Team_Learning_RentMoney_Prediction | fc8f5988f4f84d398565fb6ba0c30222a0b35d0b | [
"MIT"
] | null | null | null | 641.845594 | 745,172 | 0.94223 | [
[
[
"#coding:utf-8\n#导入warnings包,利用过滤器来实现忽略警告语句。\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# GBDT\nfrom sklearn.ensemble import GradientBoostingRegressor\n# XGBoost\nimport xgboost as xgb\n# LightGBM\nimport lightgbm as lgb\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import r2_score\nfrom sklearn.preprocessing import LabelEncoder\nimport pickle\nimport multiprocessing\nfrom sklearn.preprocessing import StandardScaler\nss = StandardScaler() \nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC,LinearRegression,LogisticRegression\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import IsolationForest\n\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\" ",
"_____no_output_____"
],
[
"#载入数据\ndata_train = pd.read_csv('./train_data.csv')\ndata_test = pd.read_csv('./test_a.csv')\ndata_all = pd.concat([data_train, data_test], ignore_index=True)\nfea_cols = [col for col in data_train.columns]",
"_____no_output_____"
],
[
"data_train.head(5)\ndata_test.head(5)\n# np.equal(data_train.columns, data_test.columns)\n# print(fea_cols)",
"_____no_output_____"
]
],
[
[
"## 异常值处理",
"_____no_output_____"
]
],
[
[
"def dropData(train):\n # 丢弃部分异常值\n train = train[train.area <= 200]\n train = train[(train.tradeMoney <=16000) & (train.tradeMoney >=700)]\n train.drop(train[(train['totalFloor'] == 0)].index, inplace=True)\n# sns.regplot(x=data_train['area'],y=data_train['tradeMoney'])\n# plt.show()\n return train \n#数据集异常值处理\ndata_train = dropData(data_train)\nprint('len(data_train):', len(data_train))",
"_____no_output_____"
]
],
[
[
"## 缺失值处理、数据变换",
"_____no_output_____"
]
],
[
[
"def preprocessingData(data):\n # 填充缺失值\n data['rentType'][data['rentType'] == '--'] = '未知方式'\n \n # 转换object类型数据\n columns = ['houseFloor', 'houseToward', 'houseDecoration', 'communityName', 'plate'] # 'rentType', 'houseType',\n for feature in columns:\n data[feature] = LabelEncoder().fit_transform(data[feature])\n\n # 将buildYear列转换为整型数据\n buildYearmean = pd.DataFrame(data[data['buildYear'] != '暂无信息']['buildYear'].mode())\n data.loc[data[data['buildYear'] == '暂无信息'].index, 'buildYear'] = buildYearmean.iloc[0, 0]\n data['buildYear'] = data['buildYear'].astype('int')\n\n # 处理pv和uv的空值填充为平均值\n data['pv'].fillna(data['pv'].mean(), inplace=True)\n data['uv'].fillna(data['uv'].mean(), inplace=True)\n data['pv'] = data['pv'].astype('int')\n data['uv'] = data['uv'].astype('int')\n\n # 分割交易时间\n def month(x):\n month = int(x.split('/')[1])\n return month\n def day(x):\n day = int(x.split('/')[2])\n return day\n data['month'] = data['tradeTime'].apply(lambda x: month(x))\n data['day'] = data['tradeTime'].apply(lambda x: day(x))\n \n # 去掉部分特征:city=SH, ID唯一, \n data.drop('city', axis=1, inplace=True)\n data.drop('tradeTime', axis=1, inplace=True)\n return data\n\ndata_train = preprocessingData(data_train)\ndrop_ID = data_train.drop('ID', axis=1, inplace=True)\n# data_train\ndata_test = preprocessingData(data_test)\n# data_test",
"_____no_output_____"
],
[
"def cleanData(data):\n data.drop(data[(data['region']=='RG00001') & (data['tradeMoney']<1000)&(data['area']>50)].index,inplace=True)\n data.drop(data[(data['region']=='RG00001') & (data['tradeMoney']>25000)].index,inplace=True)\n data.drop(data[(data['region']=='RG00001') & (data['area']>250)&(data['tradeMoney']<20000)].index,inplace=True)\n data.drop(data[(data['region']=='RG00001') & (data['area']>400)&(data['tradeMoney']>50000)].index,inplace=True)\n data.drop(data[(data['region']=='RG00001') & (data['area']>100)&(data['tradeMoney']<2000)].index,inplace=True)\n data.drop(data[(data['region']=='RG00002') & (data['area']<100)&(data['tradeMoney']>60000)].index,inplace=True)\n data.drop(data[(data['region']=='RG00003') & (data['area']<300)&(data['tradeMoney']>30000)].index,inplace=True)\n data.drop(data[(data['region']=='RG00003') & (data['tradeMoney']<500)&(data['area']<50)].index,inplace=True)\n data.drop(data[(data['region']=='RG00003') & (data['tradeMoney']<1500)&(data['area']>100)].index,inplace=True)\n data.drop(data[(data['region']=='RG00003') & (data['tradeMoney']<2000)&(data['area']>300)].index,inplace=True)\n data.drop(data[(data['region']=='RG00003') & (data['tradeMoney']>5000)&(data['area']<20)].index,inplace=True)\n data.drop(data[(data['region']=='RG00003') & (data['area']>600)&(data['tradeMoney']>40000)].index,inplace=True)\n data.drop(data[(data['region']=='RG00004') & (data['tradeMoney']<1000)&(data['area']>80)].index,inplace=True)\n data.drop(data[(data['region']=='RG00006') & (data['tradeMoney']<200)].index,inplace=True)\n data.drop(data[(data['region']=='RG00005') & (data['tradeMoney']<2000)&(data['area']>180)].index,inplace=True)\n data.drop(data[(data['region']=='RG00005') & (data['tradeMoney']>50000)&(data['area']<200)].index,inplace=True)\n data.drop(data[(data['region']=='RG00006') & (data['area']>200)&(data['tradeMoney']<2000)].index,inplace=True)\n data.drop(data[(data['region']=='RG00007') & (data['area']>100)&(data['tradeMoney']<2500)].index,inplace=True)\n data.drop(data[(data['region']=='RG00010') & (data['area']>200)&(data['tradeMoney']>25000)].index,inplace=True)\n data.drop(data[(data['region']=='RG00010') & (data['area']>400)&(data['tradeMoney']<15000)].index,inplace=True)\n data.drop(data[(data['region']=='RG00010') & (data['tradeMoney']<3000)&(data['area']>200)].index,inplace=True)\n data.drop(data[(data['region']=='RG00010') & (data['tradeMoney']>7000)&(data['area']<75)].index,inplace=True)\n data.drop(data[(data['region']=='RG00010') & (data['tradeMoney']>12500)&(data['area']<100)].index,inplace=True)\n data.drop(data[(data['region']=='RG00004') & (data['area']>400)&(data['tradeMoney']>20000)].index,inplace=True)\n data.drop(data[(data['region']=='RG00008') & (data['tradeMoney']<2000)&(data['area']>80)].index,inplace=True)\n data.drop(data[(data['region']=='RG00009') & (data['tradeMoney']>40000)].index,inplace=True)\n data.drop(data[(data['region']=='RG00009') & (data['area']>300)].index,inplace=True)\n data.drop(data[(data['region']=='RG00009') & (data['area']>100)&(data['tradeMoney']<2000)].index,inplace=True)\n data.drop(data[(data['region']=='RG00011') & (data['tradeMoney']<10000)&(data['area']>390)].index,inplace=True)\n data.drop(data[(data['region']=='RG00012') & (data['area']>120)&(data['tradeMoney']<5000)].index,inplace=True)\n data.drop(data[(data['region']=='RG00013') & (data['area']<100)&(data['tradeMoney']>40000)].index,inplace=True)\n data.drop(data[(data['region']=='RG00013') & (data['area']>400)&(data['tradeMoney']>50000)].index,inplace=True)\n data.drop(data[(data['region']=='RG00013') & (data['area']>80)&(data['tradeMoney']<2000)].index,inplace=True)\n data.drop(data[(data['region']=='RG00014') & (data['area']>300)&(data['tradeMoney']>40000)].index,inplace=True)\n data.drop(data[(data['region']=='RG00014') & (data['tradeMoney']<1300)&(data['area']>80)].index,inplace=True)\n data.drop(data[(data['region']=='RG00014') & (data['tradeMoney']<8000)&(data['area']>200)].index,inplace=True)\n data.drop(data[(data['region']=='RG00014') & (data['tradeMoney']<1000)&(data['area']>20)].index,inplace=True)\n data.drop(data[(data['region']=='RG00014') & (data['tradeMoney']>25000)&(data['area']>200)].index,inplace=True)\n data.drop(data[(data['region']=='RG00014') & (data['tradeMoney']<20000)&(data['area']>250)].index,inplace=True)\n data.drop(data[(data['region']=='RG00005') & (data['tradeMoney']>30000)&(data['area']<100)].index,inplace=True)\n data.drop(data[(data['region']=='RG00005') & (data['tradeMoney']<50000)&(data['area']>600)].index,inplace=True)\n data.drop(data[(data['region']=='RG00005') & (data['tradeMoney']>50000)&(data['area']>350)].index,inplace=True)\n data.drop(data[(data['region']=='RG00006') & (data['tradeMoney']>4000)&(data['area']<100)].index,inplace=True)\n data.drop(data[(data['region']=='RG00006') & (data['tradeMoney']<600)&(data['area']>100)].index,inplace=True)\n data.drop(data[(data['region']=='RG00006') & (data['area']>165)].index,inplace=True)\n data.drop(data[(data['region']=='RG00012') & (data['tradeMoney']<800)&(data['area']<30)].index,inplace=True)\n data.drop(data[(data['region']=='RG00007') & (data['tradeMoney']<1100)&(data['area']>50)].index,inplace=True)\n data.drop(data[(data['region']=='RG00004') & (data['tradeMoney']>8000)&(data['area']<80)].index,inplace=True)\n data.loc[(data['region']=='RG00002')&(data['area']>50)&(data['rentType']=='合租'),'rentType']='整租'\n data.loc[(data['region']=='RG00014')&(data['rentType']=='合租')&(data['area']>60),'rentType']='整租'\n data.drop(data[(data['region']=='RG00008')&(data['tradeMoney']>15000)&(data['area']<110)].index,inplace=True)\n data.drop(data[(data['region']=='RG00008')&(data['tradeMoney']>20000)&(data['area']>110)].index,inplace=True)\n data.drop(data[(data['region']=='RG00008')&(data['tradeMoney']<1500)&(data['area']<50)].index,inplace=True)\n data.drop(data[(data['region']=='RG00008')&(data['rentType']=='合租')&(data['area']>50)].index,inplace=True)\n data.drop(data[(data['region']=='RG00015') ].index,inplace=True)\n data.reset_index(drop=True, inplace=True)\n data['region'] = LabelEncoder().fit_transform(data['region'])\n return data\n\ndata_train = cleanData(data_train)\n# data_train\ndata_test['region'] = LabelEncoder().fit_transform(data_test['region'])\n# data_test",
"_____no_output_____"
],
[
"def houseType(data):\n # 特征拆分\n # 将houseType转为'Room','Hall','Bath'\n def Room(x):\n Room = int(x.split('室')[0])\n return Room\n def Hall(x):\n Hall = int(x.split(\"室\")[1].split(\"厅\")[0])\n return Hall\n def Bath(x):\n Bath = int(x.split(\"室\")[1].split(\"厅\")[1].split(\"卫\")[0])\n return Bath\n\n data['Room'] = data['houseType'].apply(lambda x: Room(x))\n data['Hall'] = data['houseType'].apply(lambda x: Hall(x))\n data['Bath'] = data['houseType'].apply(lambda x: Bath(x))\n# data['transportNum'] = 5 * data['subwayStationNum'] / data['subwayStationNum'].mean() + data['busStationNum'] / data['busStationNum'].mean()\n # 交叉生成特征:特征之间交叉+ - * / \n data['Room_Bath'] = (data['Bath']+1) / (data['Room']+1)\n \n return data\n\ndata_train = houseType(data_train)\n# data_train\ndata_test = houseType(data_test)\n# data_test",
"_____no_output_____"
],
[
"def fill(data):\n # 填充租房类型\n data.loc[(data['rentType'] == '未知方式') & (data['Room'] <= 1), 'rentType'] = '整租'\n # print(data.loc[(data['rentType']=='未知方式')&(data['Room_Bath']>1),'rentType'])\n data.loc[(data['rentType'] == '未知方式') & (data['Room_Bath'] > 1), 'rentType'] = '合租'\n data.loc[(data['rentType'] == '未知方式') & (data['Room'] > 1) & (data['area'] < 50), 'rentType'] = '合租'\n data.loc[(data['rentType'] == '未知方式') & (data['area'] / data['Room'] < 20), 'rentType'] = '合租'\n # data.loc[(data['rentType']=='未知方式')&(data['area']>60),'rentType']='合租'\n data.loc[(data['rentType'] == '未知方式') & (data['area'] <= 50) & (data['Room'] == 2), 'rentType'] = '合租'\n data.loc[(data['rentType'] == '未知方式') & (data['area'] > 60) & (data['Room'] == 2), 'rentType'] = '整租'\n data.loc[(data['rentType'] == '未知方式') & (data['area'] <= 60) & (data['Room'] == 3), 'rentType'] = '合租'\n data.loc[(data['rentType'] == '未知方式') & (data['area'] > 60) & (data['Room'] == 3), 'rentType'] = '整租'\n data.loc[(data['rentType'] == '未知方式') & (data['area'] >= 100) & (data['Room'] > 3), 'rentType'] = '整租'\n return data\n\ndata_train = fill(data_train)\n# data_train",
"_____no_output_____"
],
[
"# 特征合并\ndef newfeature(data):\n # 合并部分配套设施特征\n data['transportNum'] = 5 * data['subwayStationNum'] / data['subwayStationNum'].mean() + data['busStationNum'] / data['busStationNum'].mean()\n data['all_SchoolNum'] = 2 * data['interSchoolNum'] / data['interSchoolNum'].mean() + data['schoolNum'] / data['schoolNum'].mean() \\\n + data['privateSchoolNum'] / data['privateSchoolNum'].mean()\n data['all_hospitalNum'] = 2 * data['hospitalNum'] / data['hospitalNum'].mean() + data['drugStoreNum'] / data['drugStoreNum'].mean()\n data['all_mall'] = data['mallNum'] / data['mallNum'].mean() + data['superMarketNum'] / data['superMarketNum'].mean()\n data['otherNum'] = data['gymNum'] / data['gymNum'].mean() + data['bankNum'] / data['bankNum'].mean() + \\\n data['shopNum'] / data['shopNum'].mean() + 2 * data['parkNum'] / data['parkNum'].mean()\n\n data.drop(['subwayStationNum', 'busStationNum',\n 'interSchoolNum', 'schoolNum', 'privateSchoolNum',\n 'hospitalNum', 'drugStoreNum', 'mallNum', 'superMarketNum', 'gymNum', 'bankNum', 'shopNum', 'parkNum'],\n axis=1, inplace=True)\n # 提升0.0005\n# data['houseType_1sumcsu']=data['Bath'].map(lambda x:str(x))+data['month'].map(lambda x:str(x))\n# data['houseType_2sumcsu']=data['Bath'].map(lambda x:str(x))+data['communityName']\n# data['houseType_3sumcsu']=data['Bath'].map(lambda x:str(x))+data['plate']\n \n data.drop('houseType', axis=1, inplace=True)\n# data.drop('tradeTime', axis=1, inplace=True)\n \n data[\"area\"] = data[\"area\"].astype(int)\n\n # categorical_feats = ['rentType', 'houseFloor', 'houseToward', 'houseDecoration', 'communityName','region', 'plate']\n# categorical_feats = ['rentType', 'houseFloor', 'houseToward', 'houseDecoration', 'region', 'plate','cluster']\n\n return data\n\ndata_train = newfeature(data_train)\n# data_train\ndata_test = newfeature(data_test)\n# data_test",
"_____no_output_____"
],
[
"def encoder(data):\n # 特征编码\n data['rentType'] = LabelEncoder().fit_transform(data['rentType'])\n return data\n\ndata_train = encoder(data_train)\ndata_test = encoder(data_test)\n# data_train\n# data_test",
"_____no_output_____"
],
[
"#聚类\ndef cluster(train, test):\n from sklearn.mixture import GaussianMixture\n\n train['data_type'] = 0\n test['data_type'] = 1\n data = pd.concat([train, test], axis=0, join='outer')\n col = ['totalFloor',\n 'houseDecoration', 'communityName', 'region', 'plate', 'buildYear',\n\n 'tradeMeanPrice', 'tradeSecNum', 'totalNewTradeMoney',\n 'totalNewTradeArea', 'tradeNewMeanPrice', 'tradeNewNum', 'remainNewNum',\n\n 'landTotalPrice', 'landMeanPrice', 'totalWorkers',\n 'newWorkers', 'residentPopulation', 'lookNum',\n 'transportNum',\n 'all_SchoolNum', 'all_hospitalNum', 'all_mall', 'otherNum']\n\n # EM\n gmm = GaussianMixture(n_components=3, covariance_type='full', random_state=0)\n data['cluster']= pd.DataFrame(gmm.fit_predict(data[col]))\n\n\n col1 = ['totalFloor','houseDecoration', 'communityName', 'region', 'plate', 'buildYear']\n col2 = ['tradeMeanPrice', 'tradeSecNum', 'totalNewTradeMoney',\n 'totalNewTradeArea', 'tradeNewMeanPrice', 'tradeNewNum', 'remainNewNum',\n 'landTotalPrice', 'landMeanPrice', 'totalWorkers',\n 'newWorkers', 'residentPopulation', 'lookNum',\n 'transportNum',\n 'all_SchoolNum', 'all_hospitalNum', 'all_mall', 'otherNum']\n for feature1 in col1:\n for feature2 in col2:\n \n temp = data.groupby(['cluster',feature1])[feature2].agg('mean').reset_index(name=feature2+'_'+feature1+'_cluster_mean')\n temp.fillna(0, inplace=True)\n \n data = data.merge(temp, on=['cluster', feature1], how='left')\n \n new_train = data[data['data_type'] == 0]\n new_test = data[data['data_type'] == 1]\n new_train.drop('data_type', axis=1, inplace=True)\n new_test.drop(['data_type'], axis=1, inplace=True)\n \n return new_train, new_test\n\ndata_train, data_test = cluster(data_train, data_test)\ndata_test.drop('tradeMoney', axis=1, inplace=True)\n",
"_____no_output_____"
],
[
"#groupby生成统计特征:mean,std等\ndef groupby(train, test):\n train['data_type'] = 0\n test['data_type'] = 1\n data = pd.concat([train, test], axis=0, join='outer')\n# columns = ['rentType', 'houseFloor', 'houseToward', 'houseDecoration', 'communityName', 'region', 'plate']\n# for feature in columns:\n# data[feature] = LabelEncoder().fit_transform(data[feature])\n\n temp = data.groupby('communityName')['area'].agg({'com_area_mean': 'mean', 'com_area_std': 'std'})\n temp.fillna(0, inplace=True)\n data = data.merge(temp, on='communityName', how='left')\n \n data['price_per_area'] = data.tradeMeanPrice / data.area * 100\n temp = data.groupby('communityName')['price_per_area'].agg({'comm_price_mean': 'mean', 'comm_price_std': 'std'})\n temp.fillna(0, inplace=True)\n data = data.merge(temp, on='communityName', how='left')\n\n temp = data.groupby('plate')['price_per_area'].agg({'plate_price_mean': 'mean', 'plate_price_std': 'std'})\n temp.fillna(0, inplace=True)\n data = data.merge(temp, on='plate', how='left')\n data.drop('price_per_area', axis=1, inplace=True)\n\n temp = data.groupby('plate')['area'].agg({'plate_area_mean': 'mean', 'plate_area_std': 'std'})\n temp.fillna(0, inplace=True)\n data = data.merge(temp, on='plate', how='left')\n \n# temp = data.groupby(['plate'])['buildYear'].agg({'plate_year_mean': 'mean', 'plate_year_std': 'std'})\n# data = data.merge(temp, on='plate', how='left')\n# data.plate_year_mean = data.plate_year_mean.astype('int')\n# data['comm_plate_year_diff'] = data.buildYear - data.plate_year_mean\n# data.drop('plate_year_mean', axis=1, inplace=True)\n\n temp = data.groupby('plate')['transportNum'].agg('sum').reset_index(name='plate_trainsportNum')\n data = data.merge(temp, on='plate', how='left')\n temp = data.groupby(['communityName', 'plate'])['transportNum'].agg('sum').reset_index(name='com_trainsportNum')\n data = data.merge(temp, on=['communityName', 'plate'], how='left')\n data['trainsportNum_ratio'] = list(map(lambda x, y: round(x / y, 3) if y != 0 else -1,\n data['com_trainsportNum'], data['plate_trainsportNum']))\n data = data.drop(['com_trainsportNum', 'plate_trainsportNum'], axis=1)\n\n temp = data.groupby('plate')['all_SchoolNum'].agg('sum').reset_index(name='plate_all_SchoolNum')\n data = data.merge(temp, on='plate', how='left')\n temp = data.groupby(['communityName', 'plate'])['all_SchoolNum'].agg('sum').reset_index(name='com_all_SchoolNum')\n data = data.merge(temp, on=['communityName', 'plate'], how='left')\n data = data.drop(['com_all_SchoolNum', 'plate_all_SchoolNum'], axis=1)\n\n temp = data.groupby(['communityName', 'plate'])['all_mall'].agg('sum').reset_index(name='com_all_mall')\n data = data.merge(temp, on=['communityName', 'plate'], how='left')\n\n temp = data.groupby('plate')['otherNum'].agg('sum').reset_index(name='plate_otherNum')\n data = data.merge(temp, on='plate', how='left')\n temp = data.groupby(['communityName', 'plate'])['otherNum'].agg('sum').reset_index(name='com_otherNum')\n data = data.merge(temp, on=['communityName', 'plate'], how='left')\n data['other_ratio'] = list(map(lambda x, y: round(x / y, 3) if y != 0 else -1,\n data['com_otherNum'], data['plate_otherNum']))\n data = data.drop(['com_otherNum', 'plate_otherNum'], axis=1)\n\n temp = data.groupby(['month', 'communityName']).size().reset_index(name='communityName_saleNum')\n data = data.merge(temp, on=['month', 'communityName'], how='left')\n temp = data.groupby(['month', 'plate']).size().reset_index(name='plate_saleNum')\n data = data.merge(temp, on=['month', 'plate'], how='left')\n\n data['sale_ratio'] = round((data.communityName_saleNum + 1) / (data.plate_saleNum + 1), 3)\n data['sale_newworker_differ'] = 3 * data.plate_saleNum - data.newWorkers\n data.drop(['communityName_saleNum', 'plate_saleNum'], axis=1, inplace=True)\n\n new_train = data[data['data_type'] == 0]\n new_test = data[data['data_type'] == 1]\n new_train.drop('data_type', axis=1, inplace=True)\n new_test.drop(['data_type'], axis=1, inplace=True)\n return new_train, new_test\n\ndata_train, data_test = groupby(data_train, data_test)\ndata_train.drop('ID', axis=1, inplace=True)\ndata_test.drop('tradeMoney', axis=1, inplace=True)\n# data_train\n# data_test",
"_____no_output_____"
],
[
"# 过大量级值取log平滑(针对线性模型有效)\nbig_num_cols = ['totalTradeMoney','totalTradeArea','tradeMeanPrice','totalNewTradeMoney', 'totalNewTradeArea',\n 'tradeNewMeanPrice','remainNewNum', 'supplyNewNum', 'supplyLandArea',\n 'tradeLandArea','landTotalPrice','landMeanPrice','totalWorkers','newWorkers',\n 'residentPopulation','pv','uv']\nfor col in big_num_cols:\n data_train[col] = data_train[col].map(lambda x: np.log1p(x))\n data_test[col] = data_test[col].map(lambda x: np.log1p(x))\n \n",
"_____no_output_____"
],
[
"target_train = data_train['tradeMoney'].tolist().reshape(1, -1)\n# train = data_train.drop('tradeMoney', axis=1, inplace=True)\ntrain = data_train['pv'].tolist().reshape(1, -1)\ntarget_train\ntrain\ntest = data_test\n#对比特征工程前后线性模型结果情况\ntest = test.fillna(0)\n# Lasso回归\nfrom sklearn.linear_model import Lasso\n\nlasso = Lasso(alpha=0.1)\nlasso.fit(train, target_train)\n#预测测试集和训练集结果\ny_pred_train = lasso.predict(train)\ny_pred_test = lasso.predict(test)\n#对比结果\nfrom sklearn.metrics import r2_score\nscore_train = r2_score(y_pred_train, target_train)\nprint(\"训练集结果:\",score_train)\nscore_test = r2_score(y_pred_test, target_test)\nprint(\"测试集结果:\",score_test)",
"_____no_output_____"
]
],
[
[
"## 特征选择",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import RandomForestRegressor\nrf = RandomForestRegressor()\n# 训练随机森林模型,并通过feature_importances_属性获取每个特征的重要性分数。rf = RandomForestRegressor()\ntrain = data_train.drop('tradeMoney', axis=1)\ntrain = train.fillna(0)\ny_pred = data_train['tradeMoney']\nrf.fit(train, y_pred)\nprint(\"Features sorted by their score:\")\nprint(sorted(zip(map(lambda x: round(x, 4), rf.feature_importances_), data_train.columns), reverse=True))",
"_____no_output_____"
],
[
"def drop_feature(data):\n drop_feature = ['rentType', 'newWorkers', 'tradeLandNum', 'tradeLandArea', 'supplyLandArea', 'region', 'landMeanPrice', 'supplyLandNum', 'landTotalPrice']\n for i in drop_feature:\n data.drop(i, axis=1)\n return data\n\ndata_train = drop_feature(data_train)\ndata_test = drop_feature(data_test)",
"_____no_output_____"
],
[
"data_train.to_csv('/home/cc/holdshy/XJQ/数据竞赛(房租预测)/train_data(featured).csv')\n# y_pred = data['tradeMoney']\n# data.drop('tradeMoney', axis=1, inplace=True)\n# data.to_csv('./train_data(featured_without_tradeMoney).csv')\n\ndata_test.to_csv('/home/cc/holdshy/XJQ/数据竞赛(房租预测)/test_a(cleared).csv')",
"_____no_output_____"
],
[
"train = pd.read_csv('/home/cc/holdshy/XJQ/数据竞赛(房租预测)/train_data(featured).csv', index_col=0)\ntest = pd.read_csv('/home/cc/holdshy/XJQ/数据竞赛(房租预测)/test_a(cleared).csv', index_col=0)\n# test = pd.read_csv('/home/cc/holdshy/XJQ/数据竞赛(房租预测)/test_a.csv')\n# train.columns\n# test.columns",
"_____no_output_____"
],
[
"train_result = train['tradeMoney']\ntrain_data = train.drop(['tradeMoney'],axis=1)\ntest_data = test.drop(['ID'],axis=1)\ntest_id = test.loc[:,'ID']\n\n# xbox\nX_train, X_test, Y_train, Y_test = train_test_split(train_data, train_result, test_size=0.1, random_state=2333)\n\nxgb_val = xgb.DMatrix(X_test, label=Y_test)\nxgb_train = xgb.DMatrix(X_train, label=Y_train)\nxgb_test = xgb.DMatrix(test_data)\n# xgbooster\nparams = {\n 'booster': 'gbtree',\n 'objective': 'reg:linear', # 多分类的问题\n 'n_estimators': 2200,\n 'gamma': 0.0468, # 用于控制是否后剪枝的参数,越大越保守,一般0.1、0.2这样子。\n 'max_depth': 3, # 构建树的深度,越大越容易过拟合\n \"reg_alpha\": 0.4640,\n 'lambda': 7, # 控制模型复杂度的权重值的L2正则化项参数,参数越大,模型越不容易过拟合。\n 'subsample': 0.5213, # 随机采样训练样本\n 'colsample_bytree': 0.4603, # 生成树时进行的列采样\n 'colsample_bylevel': 0.7,\n 'min_child_weight': 2,\n # 这个参数默认是 1,是每个叶子里面 h 的和至少是多少,对正负样本不均衡时的 0-1 分类而言\n # ,假设 h 在 0.01 附近,min_child_weight 为 1 意味着叶子节点中最少需要包含 100 个样本。\n # 这个参数非常影响结果,控制叶子节点中二阶导的和的最小值,该参数值越小,越容易 overfitting。\n 'silent': 1, # 设置成1则没有运行信息输出,最好是设置为0.\n 'eta': 0.05, # 如同学习率 0.007\n 'seed': 7,\n 'reg_lambda': 0.8571\n}\n\nplst = list(params.items())\nnum_rounds = 10000 # 迭代次数\nwatchlist = [(xgb_train, 'train'), (xgb_val, 'val')]\n\n# 训练模型并保存\n# early_stopping_rounds 当设置的迭代次数较大时,early_stopping_rounds 可在一定的迭代次数内准确率没有提升就停止训练\nmodel = xgb.train(plst, xgb_train, num_rounds, watchlist, early_stopping_rounds=300, verbose_eval=50)\nmodel.save_model('/home/cc/holdshy/XJQ/数据竞赛(房租预测)/xgb.pth') # 用于存储训练出的模型\nprint(\"模型训练完成\")\n\nprint(\"训练完毕,开始预测\")\ntest_result = model.predict(xgb_test, ntree_limit=model.best_ntree_limit)\n\ndata_df = pd.DataFrame(test_result)\nfilename = '/home/cc/holdshy/XJQ/数据竞赛(房租预测)/xgb_results.csv'\ndata_df.to_csv(filename, encoding='utf-8')\n",
"_____no_output_____"
],
[
"xgb_reg = xgb.XGBRegressor(colsample_bytree=0.4603, gamma=0.0468,\n learning_rate=0.05, max_depth=3,\n min_child_weight=2, n_estimators=2200,\n reg_alpha=0.4640, reg_lambda=0.8571,\n subsample=0.5213, silent=True,\n random_state=7, nthread=-1)\nxgb_reg.fit(train_data, train_result)\nY_pred = xgb_reg.predict(test_data)\n\nsub = pd.DataFrame({\n 'id':test_id,\n 'price':Y_pred\n})\nsub.to_csv('/home/cc/holdshy/XJQ/数据竞赛(房租预测)/XGBRegressor.csv',index=False) #baseline:2.554\n",
"_____no_output_____"
]
],
[
[
"<img src=\"https://img-blog.csdnimg.cn/20190712170129921.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzM5NzU2NzE5,size_16,color_FFFFFF,t_70\" width=600 heigth=400 >",
"_____no_output_____"
],
[
"-",
"_____no_output_____"
]
],
[
[
"import warnings\nwarnings.filterwarnings('ignore')\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\" \nfrom sklearn.linear_model import ElasticNetCV, LassoCV, RidgeCV\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.kernel_ridge import KernelRidge\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.model_selection import KFold, cross_val_score\nfrom sklearn.metrics import mean_squared_error\nfrom mlxtend.regressor import StackingCVRegressor\nimport xgboost as xgb\nimport lightgbm as lgb\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"train = pd.read_csv('/home/cc/holdshy/XJQ/数据竞赛(房租预测)/train_data(featured).csv', index_col=0)\ntest = pd.read_csv('/home/cc/holdshy/XJQ/数据竞赛(房租预测)/test_a(cleared).csv', index_col=0)\n# train['tradeMoney']",
"_____no_output_____"
],
[
"# =========================model begin =========================================================== #\ny_train = train['tradeMoney']\ntrain = train.drop(['tradeMoney'],axis=1)\ntest_ID = test['ID']\ntest = test.drop(['ID'],axis=1)\n\nn_folds = 5\ndef rmsle_cv(model):\n kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(train)\n mse = np.sqrt(-cross_val_score(model, train, y_train, scoring=\"neg_mean_squared_error\", cv=kf))\n return mse\n\nkfolds = KFold(n_splits=n_folds, shuffle=True, random_state=42)\nalph = [0.01, 0.001, 0.0001, 0.0002, 0.0004, 0.0008, 0.002, 0.004, 0.008, 1, 2, 4, 6, 8, 10, 12]\nalph2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]\nlasso = make_pipeline(RobustScaler(), LassoCV(alphas=alph, cv=kfolds, random_state=1))\nENet = make_pipeline(RobustScaler(), ElasticNetCV(alphas=alph, l1_ratio=.9, cv=kfolds, random_state=3))\nridge = make_pipeline(RobustScaler(), RidgeCV(alphas=alph2, cv=kfolds))\nKRR = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5)\n\nGBoost = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05,\n max_depth=4, max_features='sqrt',\n min_samples_leaf=15, min_samples_split=10,\n loss='huber', random_state=5)\n\nmodel_xgb = xgb.XGBRegressor(colsample_bytree=0.4603, gamma=0.0468,\n learning_rate=0.05, max_depth=3,\n min_child_weight=2, n_estimators=2200,\n reg_alpha=0.4640, reg_lambda=0.8571,\n subsample=0.5213, silent=True,\n random_state=7, nthread=-1)\n\nmodel_lgb = lgb.LGBMRegressor(objective='regression', num_leaves=5,\n learning_rate=0.05, n_estimators=720,\n max_bin=55, bagging_fraction=0.8,\n bagging_freq=5, feature_fraction=0.2319,\n feature_fraction_seed=9, bagging_seed=9,\n min_data_in_leaf=6, min_sum_hessian_in_leaf=11)\n\nstacked_averaged_models = StackingCVRegressor(regressors=(ENet, GBoost, KRR),\n meta_regressor=lasso,\n use_features_in_secondary=True)\n\n# stacked_averaged_models = StackingCVRegressor(regressors=(ridge, lasso, ENet, GBoost, model_xgb, model_lgb),\n# meta_regressor=model_lgb,\n# use_features_in_secondary=True)\n\ndef rmsle(y, y_pred):\n return np.sqrt(mean_squared_error(y, y_pred))\n\n# lasso.fit(train, y_train)\n# lasso_train_pred = lasso.predict(train)\n# lasso_pred = np.expm1(lasso.predict(test))",
"_____no_output_____"
],
[
"# 下一步,更改融合方式\n# model_gboost = GradientBoostingRegressor\nGBoost.fit(train, y_train)\nGBoost_train_pred = GBoost.predict(train)\nGBT_feature = GBoost.feature_importances_\ngbt_out = np.argsort(GBT_feature)\nprint('GBoost RMSLE:')\nprint(rmsle(y_train, GBoost_train_pred))\n\n# model_xgb = xgb.XGBRegressor\nmodel_xgb.fit(train, y_train)\nxgb_train_pred = model_xgb.predict(train)\nxgb_importance = model_xgb.feature_importances_\nxgb_out = np.argsort(xgb_importance)\nprint('XGBoost RMSLE:')\nprint(rmsle(y_train, xgb_train_pred))\n\n# model_lgb = lgb.LGBMRegressor\nmodel_lgb.fit(train, y_train)\nlgb_train_pred = model_lgb.predict(train)\n# save all models feature importance and drop the bad feature\n# 给特征排序,然后删除无用特征\n# lgb.plot_importance(model_lgb, max_num_features=30)\n# plt.show()\nbooster = model_lgb.booster_\nlgb_importance = booster.feature_importance(importance_type='split')\nlgb_out = np.argsort(lgb_importance)\nprint('LightGBM RMSLE:')\nprint(rmsle(y_train, lgb_train_pred))\n",
"GBoost RMSLE:\n636.5137006515322\nXGBoost RMSLE:\n673.1352167575985\nLightGBM RMSLE:\n860.3949053254771\n"
],
[
"# stacked_averaged_models.fit(train.values, y_train)\n# stacked_train_pred = stacked_averaged_models.predict(train.values)\n# stacked_pred = np.expm1(stacked_averaged_models.predict(test.values))\n# print('rmsle(y_train, stacked_train_pred)')\n# print(rmsle(y_train, stacked_train_pred))\n# stacked_pred\n\ndrop_num = 15\nlgb_out = lgb_out[:drop_num]\nxgb_out = xgb_out[:drop_num]\ngbt_out = gbt_out[:drop_num]\n# drop_feature = [val for val in lgb_out if (val in xgb_out and val in gbt_out)]\ndrop_feature = list(set(lgb_out).union(xgb_out).union(gbt_out))\n# print(drop_feature)\ntrain.columns[drop_feature]\nfor i in train.columns[drop_feature]:\n train.drop(i, axis=1, inplace=True)\n test.drop(i, axis=1, inplace=True)\n# train.shape # (40134, 36)\n# test.shape # (2469, 36)",
"_____no_output_____"
],
[
"# ========================================== pred ===================================#\nstacked_averaged_models.fit(train, y_train)\nstacked_train_pred = stacked_averaged_models.predict(train)\nstacked_pred = np.expm1(stacked_averaged_models.predict(test))\nprint('stacked_averaged_models RMSLE:')\nprint(rmsle(y_train, stacked_train_pred))\n\nmodel_xgb.fit(train, y_train)\nmodel_lgb.fit(train, y_train)\nxgb_pred = np.expm1(model_xgb.predict(test))\n# GBoost_pred = np.expm1(GBoost.predict(test))\nlgb_pred = np.expm1(model_lgb.predict(test))\nprint('0.7stacked + 0.15xgb + 0.15lgb RMSLE:')\nprint(rmsle(y_train, stacked_train_pred * 0.7 + xgb_train_pred * 0.15 + lgb_train_pred * 0.15))\n\nensemble = stacked_pred * 0.7 + xgb_pred * 0.15 + lgb_pred * 0.15\nsubmission = pd.DataFrame()\nsubmission['Id'] = test_ID\nsubmission['SalePrice'] = ensemblebagging_fraction\nq1 = submission['SalePrice'].quantile(0.005)\nq2 = submission['SalePrice'].quantile(0.995)\nsubmission['SalePrice'] = submission['SalePrice'].apply(lambda x: x if x > q1 else x * 0.85)\nsubmission['SalePrice'] = submission['SalePrice'].apply(lambda x: x if x < q2 else x * 1.1)\nsubmission.to_csv('/home/cc/holdshy/XJQ/数据竞赛(房租预测)/stacking.csv', index=False)",
"_____no_output_____"
],
[
"X_test.shape\nY_test.shape\ntest_id.shape\nY_pred.shape\ndata_train.shape\ndata_test.shape",
"_____no_output_____"
],
[
"# data_test['tradeMoney']\ndata_train.columns#[40:]\ndata_test.columns#[39:]",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7412ab0ade04fce626faa325126e3f05ce28648 | 14,490 | ipynb | Jupyter Notebook | example-notebooks/exploring-elements.ipynb | hussainsultan/vdom | 07505bc9664fe5759fc7db2db43d32d35cf277fd | [
"BSD-3-Clause"
] | 218 | 2017-09-15T00:22:43.000Z | 2021-11-06T20:55:33.000Z | example-notebooks/exploring-elements.ipynb | hussainsultan/vdom | 07505bc9664fe5759fc7db2db43d32d35cf277fd | [
"BSD-3-Clause"
] | 88 | 2017-09-19T21:01:01.000Z | 2021-06-15T16:15:12.000Z | example-notebooks/exploring-elements.ipynb | hussainsultan/vdom | 07505bc9664fe5759fc7db2db43d32d35cf277fd | [
"BSD-3-Clause"
] | 38 | 2017-09-22T04:52:41.000Z | 2021-11-08T16:29:51.000Z | 26.933086 | 159 | 0.401587 | [
[
[
"from vdom.helpers import *",
"_____no_output_____"
]
],
[
[
"You can put video within your notebook using `%%html`, writing literal strings or with `IPython.display.HTML`:",
"_____no_output_____"
]
],
[
[
"%%html\n\n<video width=\"480\" controls\n poster=\"https://archive.org/download/WebmVp8Vorbis/webmvp8.gif\" >\n <source\n src=\"https://archive.org/download/WebmVp8Vorbis/webmvp8.webm\"\n type=\"video/webm\">\n <source\n src=\"https://archive.org/download/WebmVp8Vorbis/webmvp8_512kb.mp4\"\n type=\"video/mp4\">\n <source\n src=\"https://archive.org/download/WebmVp8Vorbis/webmvp8.ogv\"\n type=\"video/ogg\">\n Your browser doesn't support HTML5 video tag.\n</video>",
"_____no_output_____"
]
],
[
[
"With vdom, we can create it declaratively",
"_____no_output_____"
]
],
[
[
"vid = video(source(\n src=\"https://archive.org/download/WebmVp8Vorbis/webmvp8.webm\",\n type=\"video/webm\"),\n source(\n src=\"https://archive.org/download/WebmVp8Vorbis/webmvp8_512kb.mp4\",\n type=\"video/mp4\"),\n source(\n src=\"https://archive.org/download/WebmVp8Vorbis/webmvp8.ogv\",\n type=\"video/ogg\"),\n \"Your browser doesn't support HTML5 video tag.\", width=\"480\", controls=True, poster=\"https://archive.org/download/WebmVp8Vorbis/webmvp8.gif\")",
"_____no_output_____"
]
],
[
[
"and display it when we want",
"_____no_output_____"
]
],
[
[
"vid",
"_____no_output_____"
],
[
"hand = display(vid, display_id=True)",
"_____no_output_____"
]
],
[
[
"Since you can change attributes of the element directly with `display` updates, we can turn controls off",
"_____no_output_____"
]
],
[
[
"attrs = vid.attributes.copy()\nattrs['controls'] = False\nattrs['autoplay'] = False\nhand.update(video(vid.children, **attrs))",
"_____no_output_____"
]
],
[
[
"There are many more elements available",
"_____no_output_____"
]
],
[
[
"em('what')",
"_____no_output_____"
],
[
"strong('bad')",
"_____no_output_____"
],
[
"abbr(\"lol\", title=\"laugh out loud\")",
"_____no_output_____"
],
[
"time(\"10/05/13 at 10 PM\", datetime=\"2013-10-05 22:00\")",
"_____no_output_____"
],
[
"p(\n \"pay attention, because you'll find out that you can\",\n mark(' highlight', style={'backgroundColor': 'blue', 'color': 'white'}),\n span(\" to \", style={ 'backgroundColor': 'yellow' }),\n \" your heart's content\"\n )",
"_____no_output_____"
],
[
"inp = input_(type='color', value=\"#eeeeee\")\nhand = display(inp, display_id=True)",
"_____no_output_____"
],
[
"# Change the value directly from Python\ninp.attributes['value'] = \"#DD55FF\"\n# Then update the version in your notebook\nhand.update(inp)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74143e84e16bd0264f5a5898768627d0d9779db | 27,865 | ipynb | Jupyter Notebook | Sample/Day_16_Sample.ipynb | sueshow/Data_Science_Marathon | 05219bca6c524420f0764e66ecdf51b7da7b2a49 | [
"MIT"
] | null | null | null | Sample/Day_16_Sample.ipynb | sueshow/Data_Science_Marathon | 05219bca6c524420f0764e66ecdf51b7da7b2a49 | [
"MIT"
] | null | null | null | Sample/Day_16_Sample.ipynb | sueshow/Data_Science_Marathon | 05219bca6c524420f0764e66ecdf51b7da7b2a49 | [
"MIT"
] | 2 | 2021-03-31T01:58:39.000Z | 2021-04-06T08:17:43.000Z | 27.132425 | 133 | 0.352665 | [
[
[
"<img width=150 src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/1/1a/NumPy_logo.svg/200px-NumPy_logo.svg.png\"></img>",
"_____no_output_____"
],
[
"* 範例目標:\n 1. 實做 groupby 函式實現資料科學的 Split-Apply-Combine 策略\n* 範例重點:\n 1. Groupby:可以同時針對多個欄位做 Group,並在Group中做運算\n 2. Split:將大的數據集拆成可獨立計算的小數據集\n 3. Apply:獨立計算各個小數據集\n 4. Combine:將小數據集運算結果合併",
"_____no_output_____"
]
],
[
[
"# 載入 NumPy, Pandas 套件\nimport numpy as np\nimport pandas as pd\n\n# 檢查正確載入與版本\nprint(np)\nprint(np.__version__)\nprint(pd)\nprint(pd.__version__)",
"<module 'numpy' from 'D:\\\\anaconda3\\\\lib\\\\site-packages\\\\numpy\\\\__init__.py'>\n1.19.2\n<module 'pandas' from 'D:\\\\anaconda3\\\\lib\\\\site-packages\\\\pandas\\\\__init__.py'>\n1.1.3\n"
]
],
[
[
"# 【基礎16=進階15】",
"_____no_output_____"
]
],
[
[
"score_df = pd.DataFrame([[1,50,80,70,'boy'], \n [2,60,45,50,'boy'],\n [3,98,43,55,'boy'],\n [4,70,69,89,'boy'],\n [5,56,79,60,'girl'],\n [6,60,68,55,'girl'],\n [7,45,70,77,'girl'],\n [8,55,77,76,'girl'],\n [9,25,57,60,'girl'],\n [10,88,40,43,'girl']],columns=['student_id','math_score','english_score','chinese_score','sex'])\nscore_df = score_df.set_index('student_id')\nscore_df",
"_____no_output_____"
]
],
[
[
"### 平均",
"_____no_output_____"
],
[
"* (法一)運用索引將資料分開",
"_____no_output_____"
]
],
[
[
"boy_score_df = score_df.loc[score_df.sex=='boy']\ngirl_score_df = score_df.loc[score_df.sex=='girl']\nprint(boy_score_df.mean())\nprint(girl_score_df.mean())",
"math_score 69.50\nenglish_score 59.25\nchinese_score 66.00\ndtype: float64\nmath_score 54.833333\nenglish_score 65.166667\nchinese_score 61.833333\ndtype: float64\n"
]
],
[
[
"* (法二)運用groupby方法",
"_____no_output_____"
]
],
[
[
"score_df.groupby('sex').mean()",
"_____no_output_____"
]
],
[
[
"* 新增欄位class",
"_____no_output_____"
]
],
[
[
"score_df['class'] = [1,2,1,2,1,2,1,2,1,2]\nscore_df",
"_____no_output_____"
]
],
[
[
"### Group By",
"_____no_output_____"
],
[
"##### 對多個欄位做分析",
"_____no_output_____"
],
[
"* 寫法:你的 dataframe 變數名稱.groupby(['要分析之行的名稱', '可以多個']).運算函數名稱()\n * Split:將大的數據集拆成可獨立計算的小數據集,如:拆成男生、女生資料\n * Apply:獨立計算各個小數據集,如成績取平均\n * Combine:將小數據集運算結果合併",
"_____no_output_____"
]
],
[
[
"score_df.groupby(['sex','class']).mean()",
"_____no_output_____"
]
],
[
[
"##### 對欄位做多個分析",
"_____no_output_____"
],
[
"* 寫法:你的 dataframe 變數名稱.groupby(['要分析之行的名稱']).agg(['運算函數名稱','可以多個運算函數'])",
"_____no_output_____"
]
],
[
[
"score_df.groupby(['sex']).agg(['mean','std'])",
"_____no_output_____"
]
],
[
[
"##### 對多個欄位做多個分析",
"_____no_output_____"
],
[
"* 寫法:你的 dataframe 變數名稱.groupby(['要分析之行的名稱','可以多個']).agg(['運算函數名稱','可以多個運算函數'])",
"_____no_output_____"
]
],
[
[
"score_df.groupby(['sex','class']).agg(['mean','max'])",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
e741449cb8d73aa0c797982e3c75a95afe801377 | 62,350 | ipynb | Jupyter Notebook | site/en/r2/tutorials/text/text_generation.ipynb | mullikine/tfdocs | 3fb3faf2a0238c0f7ec38012c93e06f4222230c7 | [
"Apache-2.0"
] | 2 | 2021-01-27T08:34:26.000Z | 2021-04-06T00:07:55.000Z | site/en/r2/tutorials/text/text_generation.ipynb | mullikine/tfdocs | 3fb3faf2a0238c0f7ec38012c93e06f4222230c7 | [
"Apache-2.0"
] | 1 | 2019-03-20T11:26:42.000Z | 2019-03-20T11:26:42.000Z | site/en/r2/tutorials/text/text_generation.ipynb | mullikine/tfdocs | 3fb3faf2a0238c0f7ec38012c93e06f4222230c7 | [
"Apache-2.0"
] | null | null | null | 33.57566 | 493 | 0.500609 | [
[
[
"##### Copyright 2019 The TensorFlow Authors.",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Text generation with an RNN\n\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n<td>\n<a target=\"_blank\" href=\"https://www.tensorflow.org/alpha/tutorials/text/text_generation\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n</td><td>\n<a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/text/text_generation.ipynb\">\n <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n</td><td>\n<a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/text/text_generation.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a></td></table>",
"_____no_output_____"
],
[
"This tutorial demonstrates how to generate text using a character-based RNN. We will work with a dataset of Shakespeare's writing from Andrej Karpathy's [The Unreasonable Effectiveness of Recurrent Neural Networks](http://karpathy.github.io/2015/05/21/rnn-effectiveness/). Given a sequence of characters from this data (\"Shakespear\"), train a model to predict the next character in the sequence (\"e\"). Longer sequences of text can be generated by calling the model repeatedly.\n\nNote: Enable GPU acceleration to execute this notebook faster. In Colab: *Runtime > Change runtime type > Hardware acclerator > GPU*. If running locally make sure TensorFlow version >= 1.11.\n\nThis tutorial includes runnable code implemented using [tf.keras](https://www.tensorflow.org/programmers_guide/keras) and [eager execution](https://www.tensorflow.org/programmers_guide/eager). The following is sample output when the model in this tutorial trained for 30 epochs, and started with the string \"Q\":\n\n<pre>\nQUEENE:\nI had thought thou hadst a Roman; for the oracle,\nThus by All bids the man against the word,\nWhich are so weak of care, by old care done;\nYour children were in your holy love,\nAnd the precipitation through the bleeding throne.\n\nBISHOP OF ELY:\nMarry, and will, my lord, to weep in such a one were prettiest;\nYet now I was adopted heir\nOf the world's lamentable day,\nTo watch the next way with his father with his face?\n\nESCALUS:\nThe cause why then we are all resolved more sons.\n\nVOLUMNIA:\nO, no, no, no, no, no, no, no, no, no, no, no, no, no, no, no, no, no, no, no, no, it is no sin it should be dead,\nAnd love and pale as any will to that word.\n\nQUEEN ELIZABETH:\nBut how long have I heard the soul for this world,\nAnd show his hands of life be proved to stand.\n\nPETRUCHIO:\nI say he look'd on, if I must be content\nTo stay him from the fatal of our country's bliss.\nHis lordship pluck'd from this sentence then for prey,\nAnd then let us twain, being the moon,\nwere she such a case as fills m\n</pre>\n\nWhile some of the sentences are grammatical, most do not make sense. The model has not learned the meaning of words, but consider:\n\n* The model is character-based. When training started, the model did not know how to spell an English word, or that words were even a unit of text.\n\n* The structure of the output resembles a play—blocks of text generally begin with a speaker name, in all capital letters similar to the dataset.\n\n* As demonstrated below, the model is trained on small batches of text (100 characters each), and is still able to generate a longer sequence of text with coherent structure.",
"_____no_output_____"
],
[
"## Setup",
"_____no_output_____"
],
[
"### Import TensorFlow and other libraries",
"_____no_output_____"
]
],
[
[
"from __future__ import absolute_import, division, print_function, unicode_literals\n\n!pip install tensorflow-gpu==2.0.0-alpha0\nimport tensorflow as tf\n\nimport numpy as np\nimport os\nimport time",
"Collecting tensorflow-gpu==2.0.0-alpha0\nSuccessfully installed google-pasta-0.1.4 tb-nightly-1.14.0a20190303 tensorflow-estimator-2.0-preview-1.14.0.dev2019030300 tensorflow-gpu==2.0.0-alpha0-2.0.0.dev20190303\n"
]
],
[
[
"### Download the Shakespeare dataset\n\nChange the following line to run this code on your own data.",
"_____no_output_____"
]
],
[
[
"path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')",
"Downloading data from https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt\n1122304/1115394 [==============================] - 0s 0us/step\n"
]
],
[
[
"### Read the data\n\nFirst, look in the text:",
"_____no_output_____"
]
],
[
[
"# Read, then decode for py2 compat.\ntext = open(path_to_file, 'rb').read().decode(encoding='utf-8')\n# length of text is the number of characters in it\nprint ('Length of text: {} characters'.format(len(text)))",
"Length of text: 1115394 characters\n"
],
[
"# Take a look at the first 250 characters in text\nprint(text[:250])",
"First Citizen:\nBefore we proceed any further, hear me speak.\n\nAll:\nSpeak, speak.\n\nFirst Citizen:\nYou are all resolved rather to die than to famish?\n\nAll:\nResolved. resolved.\n\nFirst Citizen:\nFirst, you know Caius Marcius is chief enemy to the people.\n\n"
],
[
"# The unique characters in the file\nvocab = sorted(set(text))\nprint ('{} unique characters'.format(len(vocab)))",
"65 unique characters\n"
]
],
[
[
"## Process the text",
"_____no_output_____"
],
[
"### Vectorize the text\n\nBefore training, we need to map strings to a numerical representation. Create two lookup tables: one mapping characters to numbers, and another for numbers to characters.",
"_____no_output_____"
]
],
[
[
"# Creating a mapping from unique characters to indices\nchar2idx = {u:i for i, u in enumerate(vocab)}\nidx2char = np.array(vocab)\n\ntext_as_int = np.array([char2idx[c] for c in text])",
"_____no_output_____"
]
],
[
[
"Now we have an integer representation for each character. Notice that we mapped the character as indexes from 0 to `len(unique)`.",
"_____no_output_____"
]
],
[
[
"print('{')\nfor char,_ in zip(char2idx, range(20)):\n print(' {:4s}: {:3d},'.format(repr(char), char2idx[char]))\nprint(' ...\\n}')",
"{\n '\\n': 0,\n ' ' : 1,\n '!' : 2,\n '$' : 3,\n '&' : 4,\n \"'\" : 5,\n ',' : 6,\n '-' : 7,\n '.' : 8,\n '3' : 9,\n ':' : 10,\n ';' : 11,\n '?' : 12,\n 'A' : 13,\n 'B' : 14,\n 'C' : 15,\n 'D' : 16,\n 'E' : 17,\n 'F' : 18,\n 'G' : 19,\n ...\n}\n"
],
[
"# Show how the first 13 characters from the text are mapped to integers\nprint ('{} ---- characters mapped to int ---- > {}'.format(repr(text[:13]), text_as_int[:13]))",
"'First Citizen' ---- characters mapped to int ---- > [18 47 56 57 58 1 15 47 58 47 64 43 52]\n"
]
],
[
[
"### The prediction task",
"_____no_output_____"
],
[
"Given a character, or a sequence of characters, what is the most probable next character? This is the task we're training the model to perform. The input to the model will be a sequence of characters, and we train the model to predict the output—the following character at each time step.\n\nSince RNNs maintain an internal state that depends on the previously seen elements, given all the characters computed until this moment, what is the next character?\n",
"_____no_output_____"
],
[
"### Create training examples and targets\n\nNext divide the text into example sequences. Each input sequence will contain `seq_length` characters from the text.\n\nFor each input sequence, the corresponding targets contain the same length of text, except shifted one character to the right.\n\nSo break the text into chunks of `seq_length+1`. For example, say `seq_length` is 4 and our text is \"Hello\". The input sequence would be \"Hell\", and the target sequence \"ello\".\n\nTo do this first use the `tf.data.Dataset.from_tensor_slices` function to convert the text vector into a stream of character indices.",
"_____no_output_____"
]
],
[
[
"# The maximum length sentence we want for a single input in characters\nseq_length = 100\nexamples_per_epoch = len(text)//seq_length\n\n# Create training examples / targets\nchar_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)\n\nfor i in char_dataset.take(5):\n print(idx2char[i.numpy()])",
"F\ni\nr\ns\nt\n"
]
],
[
[
"The `batch` method lets us easily convert these individual characters to sequences of the desired size.",
"_____no_output_____"
]
],
[
[
"sequences = char_dataset.batch(seq_length+1, drop_remainder=True)\n\nfor item in sequences.take(5):\n print(repr(''.join(idx2char[item.numpy()])))",
"'First Citizen:\\nBefore we proceed any further, hear me speak.\\n\\nAll:\\nSpeak, speak.\\n\\nFirst Citizen:\\nYou '\n'are all resolved rather to die than to famish?\\n\\nAll:\\nResolved. resolved.\\n\\nFirst Citizen:\\nFirst, you k'\n\"now Caius Marcius is chief enemy to the people.\\n\\nAll:\\nWe know't, we know't.\\n\\nFirst Citizen:\\nLet us ki\"\n\"ll him, and we'll have corn at our own price.\\nIs't a verdict?\\n\\nAll:\\nNo more talking on't; let it be d\"\n'one: away, away!\\n\\nSecond Citizen:\\nOne word, good citizens.\\n\\nFirst Citizen:\\nWe are accounted poor citi'\n"
]
],
[
[
"For each sequence, duplicate and shift it to form the input and target text by using the `map` method to apply a simple function to each batch:",
"_____no_output_____"
]
],
[
[
"def split_input_target(chunk):\n input_text = chunk[:-1]\n target_text = chunk[1:]\n return input_text, target_text\n\ndataset = sequences.map(split_input_target)",
"_____no_output_____"
]
],
[
[
"Print the first examples input and target values:",
"_____no_output_____"
]
],
[
[
"for input_example, target_example in dataset.take(1):\n print ('Input data: ', repr(''.join(idx2char[input_example.numpy()])))\n print ('Target data:', repr(''.join(idx2char[target_example.numpy()])))",
"Input data: 'First Citizen:\\nBefore we proceed any further, hear me speak.\\n\\nAll:\\nSpeak, speak.\\n\\nFirst Citizen:\\nYou'\nTarget data: 'irst Citizen:\\nBefore we proceed any further, hear me speak.\\n\\nAll:\\nSpeak, speak.\\n\\nFirst Citizen:\\nYou '\n"
]
],
[
[
"Each index of these vectors are processed as one time step. For the input at time step 0, the model receives the index for \"F\" and trys to predict the index for \"i\" as the next character. At the next timestep, it does the same thing but the `RNN` considers the previous step context in addition to the current input character.",
"_____no_output_____"
]
],
[
[
"for i, (input_idx, target_idx) in enumerate(zip(input_example[:5], target_example[:5])):\n print(\"Step {:4d}\".format(i))\n print(\" input: {} ({:s})\".format(input_idx, repr(idx2char[input_idx])))\n print(\" expected output: {} ({:s})\".format(target_idx, repr(idx2char[target_idx])))",
"Step 0\n input: 18 ('F')\n expected output: 47 ('i')\nStep 1\n input: 47 ('i')\n expected output: 56 ('r')\nStep 2\n input: 56 ('r')\n expected output: 57 ('s')\nStep 3\n input: 57 ('s')\n expected output: 58 ('t')\nStep 4\n input: 58 ('t')\n expected output: 1 (' ')\n"
]
],
[
[
"### Create training batches\n\nWe used `tf.data` to split the text into manageable sequences. But before feeding this data into the model, we need to shuffle the data and pack it into batches.",
"_____no_output_____"
]
],
[
[
"# Batch size\nBATCH_SIZE = 64\n\n# Buffer size to shuffle the dataset\n# (TF data is designed to work with possibly infinite sequences,\n# so it doesn't attempt to shuffle the entire sequence in memory. Instead,\n# it maintains a buffer in which it shuffles elements).\nBUFFER_SIZE = 10000\n\ndataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)\n\ndataset",
"_____no_output_____"
]
],
[
[
"## Build The Model",
"_____no_output_____"
],
[
"Use `tf.keras.Sequential` to define the model. For this simple example three layers are used to define our model:\n\n* `tf.keras.layers.Embedding`: The input layer. A trainable lookup table that will map the numbers of each character to a vector with `embedding_dim` dimensions;\n* `tf.keras.layers.GRU`: A type of RNN with size `units=rnn_units` (You can also use a LSTM layer here.)\n* `tf.keras.layers.Dense`: The output layer, with `vocab_size` outputs.",
"_____no_output_____"
]
],
[
[
"# Length of the vocabulary in chars\nvocab_size = len(vocab)\n\n# The embedding dimension\nembedding_dim = 256\n\n# Number of RNN units\nrnn_units = 1024",
"_____no_output_____"
],
[
"def build_model(vocab_size, embedding_dim, rnn_units, batch_size):\n model = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim,\n batch_input_shape=[batch_size, None]),\n tf.keras.layers.LSTM(rnn_units,\n return_sequences=True,\n stateful=True,\n recurrent_initializer='glorot_uniform'),\n tf.keras.layers.Dense(vocab_size)\n ])\n return model",
"_____no_output_____"
],
[
"model = build_model(\n vocab_size = len(vocab),\n embedding_dim=embedding_dim,\n rnn_units=rnn_units,\n batch_size=BATCH_SIZE)",
"WARNING: Logging before flag parsing goes to stderr.\nW0304 03:48:46.706135 140067035297664 tf_logging.py:161] <tensorflow.python.keras.layers.recurrent.UnifiedLSTM object at 0x7f637273ccf8>: Note that this layer is not optimized for performance. Please use tf.keras.layers.CuDNNLSTM for better performance on GPU.\n"
]
],
[
[
"For each character the model looks up the embedding, runs the GRU one timestep with the embedding as input, and applies the dense layer to generate logits predicting the log-liklihood of the next character:\n\n",
"_____no_output_____"
],
[
"## Try the model\n\nNow run the model to see that it behaves as expected.\n\nFirst check the shape of the output:",
"_____no_output_____"
]
],
[
[
"for input_example_batch, target_example_batch in dataset.take(1):\n example_batch_predictions = model(input_example_batch)\n print(example_batch_predictions.shape, \"# (batch_size, sequence_length, vocab_size)\")",
"(64, 100, 65) # (batch_size, sequence_length, vocab_size)\n"
]
],
[
[
"In the above example the sequence length of the input is `100` but the model can be run on inputs of any length:",
"_____no_output_____"
]
],
[
[
"model.summary()",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param #\n=================================================================\nembedding (Embedding) (64, None, 256) 16640\n_________________________________________________________________\nunified_lstm (UnifiedLSTM) (64, None, 1024) 5246976\n_________________________________________________________________\ndense (Dense) (64, None, 65) 66625\n=================================================================\nTotal params: 5,330,241\nTrainable params: 5,330,241\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"To get actual predictions from the model we need to sample from the output distribution, to get actual character indices. This distribution is defined by the logits over the character vocabulary.\n\nNote: It is important to _sample_ from this distribution as taking the _argmax_ of the distribution can easily get the model stuck in a loop.\n\nTry it for the first example in the batch:",
"_____no_output_____"
]
],
[
[
"sampled_indices = tf.random.categorical(example_batch_predictions[0], num_samples=1)\nsampled_indices = tf.squeeze(sampled_indices,axis=-1).numpy()",
"_____no_output_____"
]
],
[
[
"This gives us, at each timestep, a prediction of the next character index:",
"_____no_output_____"
]
],
[
[
"sampled_indices",
"_____no_output_____"
]
],
[
[
"Decode these to see the text predicted by this untrained model:",
"_____no_output_____"
]
],
[
[
"print(\"Input: \\n\", repr(\"\".join(idx2char[input_example_batch[0]])))\nprint()\nprint(\"Next Char Predictions: \\n\", repr(\"\".join(idx2char[sampled_indices ])))",
"Input:\n 'to it far before thy time?\\nWarwick is chancellor and the lord of Calais;\\nStern Falconbridge commands'\n\nNext Char Predictions:\n \"I!tbdTa-FZRtKtY:KDnBe.TkxcoZEXLucZ&OUupVB rqbY&Tfxu :HQ!jYN:Jt'N3KNpehXxs.onKsdv:e;g?PhhCm3r-om! :t\"\n"
]
],
[
[
"## Train the model",
"_____no_output_____"
],
[
"At this point the problem can be treated as a standard classification problem. Given the previous RNN state, and the input this time step, predict the class of the next character.",
"_____no_output_____"
],
[
"### Attach an optimizer, and a loss function",
"_____no_output_____"
],
[
"The standard `tf.keras.losses.sparse_softmax_crossentropy` loss function works in this case because it is applied across the last dimension of the predictions.\n\nBecause our model returns logits, we need to set the `from_logits` flag.\n",
"_____no_output_____"
]
],
[
[
"def loss(labels, logits):\n return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)\n\nexample_batch_loss = loss(target_example_batch, example_batch_predictions)\nprint(\"Prediction shape: \", example_batch_predictions.shape, \" # (batch_size, sequence_length, vocab_size)\")\nprint(\"scalar_loss: \", example_batch_loss.numpy().mean())",
"Prediction shape: (64, 100, 65) # (batch_size, sequence_length, vocab_size)\nscalar_loss: 4.174188\n"
]
],
[
[
"Configure the training procedure using the `tf.keras.Model.compile` method. We'll use `tf.keras.optimizers.Adam` with default arguments and the loss function.",
"_____no_output_____"
]
],
[
[
"model.compile(optimizer='adam', loss=loss)",
"_____no_output_____"
]
],
[
[
"### Configure checkpoints",
"_____no_output_____"
],
[
"Use a `tf.keras.callbacks.ModelCheckpoint` to ensure that checkpoints are saved during training:",
"_____no_output_____"
]
],
[
[
"# Directory where the checkpoints will be saved\ncheckpoint_dir = './training_checkpoints'\n# Name of the checkpoint files\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt_{epoch}\")\n\ncheckpoint_callback=tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_prefix,\n save_weights_only=True)",
"_____no_output_____"
]
],
[
[
"### Execute the training",
"_____no_output_____"
],
[
"To keep training time reasonable, use 10 epochs to train the model. In Colab, set the runtime to GPU for faster training.",
"_____no_output_____"
]
],
[
[
"EPOCHS=10",
"_____no_output_____"
],
[
"history = model.fit(dataset, epochs=EPOCHS, callbacks=[checkpoint_callback])",
"Epoch 1/10\n172/172 [==============================] - 31s 183ms/step - loss: 2.7052\nEpoch 2/10\n172/172 [==============================] - 31s 180ms/step - loss: 2.0039\nEpoch 3/10\n172/172 [==============================] - 31s 180ms/step - loss: 1.7375\nEpoch 4/10\n172/172 [==============================] - 31s 179ms/step - loss: 1.5772\nEpoch 5/10\n172/172 [==============================] - 31s 179ms/step - loss: 1.4772\nEpoch 6/10\n172/172 [==============================] - 31s 180ms/step - loss: 1.4087\nEpoch 7/10\n172/172 [==============================] - 31s 179ms/step - loss: 1.3556\nEpoch 8/10\n172/172 [==============================] - 31s 179ms/step - loss: 1.3095\nEpoch 9/10\n172/172 [==============================] - 31s 179ms/step - loss: 1.2671\nEpoch 10/10\n172/172 [==============================] - 31s 180ms/step - loss: 1.2276\n"
]
],
[
[
"## Generate text",
"_____no_output_____"
],
[
"### Restore the latest checkpoint",
"_____no_output_____"
],
[
"To keep this prediction step simple, use a batch size of 1.\n\nBecause of the way the RNN state is passed from timestep to timestep, the model only accepts a fixed batch size once built.\n\nTo run the model with a different `batch_size`, we need to rebuild the model and restore the weights from the checkpoint.\n",
"_____no_output_____"
]
],
[
[
"tf.train.latest_checkpoint(checkpoint_dir)",
"_____no_output_____"
],
[
"model = build_model(vocab_size, embedding_dim, rnn_units, batch_size=1)\n\nmodel.load_weights(tf.train.latest_checkpoint(checkpoint_dir))\n\nmodel.build(tf.TensorShape([1, None]))",
"W0304 03:54:01.201246 140067035297664 tf_logging.py:161] <tensorflow.python.keras.layers.recurrent.UnifiedLSTM object at 0x7f636183c7f0>: Note that this layer is not optimized for performance. Please use tf.keras.layers.CuDNNLSTM for better performance on GPU.\n"
],
[
"model.summary()",
"Model: \"sequential_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param #\n=================================================================\nembedding_1 (Embedding) (1, None, 256) 16640\n_________________________________________________________________\nunified_lstm_1 (UnifiedLSTM) (1, None, 1024) 5246976\n_________________________________________________________________\ndense_1 (Dense) (1, None, 65) 66625\n=================================================================\nTotal params: 5,330,241\nTrainable params: 5,330,241\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"### The prediction loop\n\nThe following code block generates the text:\n\n* It Starts by choosing a start string, initializing the RNN state and setting the number of characters to generate.\n\n* Get the prediction distribution of the next character using the start string and the RNN state.\n\n* Then, use a categorical distribution to calculate the index of the predicted character. Use this predicted character as our next input to the model.\n\n* The RNN state returned by the model is fed back into the model so that it now has more context, instead than only one word. After predicting the next word, the modified RNN states are again fed back into the model, which is how it learns as it gets more context from the previously predicted words.\n\n\n\n\nLooking at the generated text, you'll see the model knows when to capitalize, make paragraphs and imitates a Shakespeare-like writing vocabulary. With the small number of training epochs, it has not yet learned to form coherent sentences.",
"_____no_output_____"
]
],
[
[
"def generate_text(model, start_string):\n # Evaluation step (generating text using the learned model)\n\n # Number of characters to generate\n num_generate = 1000\n\n # Converting our start string to numbers (vectorizing)\n input_eval = [char2idx[s] for s in start_string]\n input_eval = tf.expand_dims(input_eval, 0)\n\n # Empty string to store our results\n text_generated = []\n\n # Low temperatures results in more predictable text.\n # Higher temperatures results in more surprising text.\n # Experiment to find the best setting.\n temperature = 1.0\n\n # Here batch size == 1\n model.reset_states()\n for i in range(num_generate):\n predictions = model(input_eval)\n # remove the batch dimension\n predictions = tf.squeeze(predictions, 0)\n\n # using a categorical distribution to predict the word returned by the model\n predictions = predictions / temperature\n predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()\n\n # We pass the predicted word as the next input to the model\n # along with the previous hidden state\n input_eval = tf.expand_dims([predicted_id], 0)\n\n text_generated.append(idx2char[predicted_id])\n\n return (start_string + ''.join(text_generated))",
"_____no_output_____"
],
[
"print(generate_text(model, start_string=u\"ROMEO: \"))",
"ROMEO: now to have weth hearten sonce,\nNo more than the thing stand perfect your self,\nLove way come. Up, this is d so do in friends:\nIf I fear e this, I poisple\nMy gracious lusty, born once for readyus disguised:\nBut that a pry; do it sure, thou wert love his cause;\nMy mind is come too!\n\nPOMPEY:\nServe my master's him: he hath extreme over his hand in the\nwhere they shall not hear they right for me.\n\nPROSSPOLUCETER:\nI pray you, mistress, I shall be construted\nWith one that you shall that we know it, in this gentleasing earls of daiberkers now\nhe is to look upon this face, which leadens from his master as\nyou should not put what you perciploce backzat of cast,\nNor fear it sometime but for a pit\na world of Hantua?\n\nFirst Gentleman:\nThat we can fall of bastards my sperial;\nO, she Go seeming that which I have\nwhat enby oar own best injuring them,\nOr thom I do now, I, in heart is nothing gone,\nLeatt the bark which was done born.\n\nBRUTUS:\nBoth Margaret, he is sword of the house person. If born,\n"
]
],
[
[
"The easiest thing you can do to improve the results it to train it for longer (try `EPOCHS=30`).\n\nYou can also experiment with a different start string, or try adding another RNN layer to improve the model's accuracy, or adjusting the temperature parameter to generate more or less random predictions.",
"_____no_output_____"
],
[
"## Advanced: Customized Training\n\nThe above training procedure is simple, but does not give you much control.\n\nSo now that you've seen how to run the model manually let's unpack the training loop, and implement it ourselves. This gives a starting point if, for example, to implement _curriculum learning_ to help stabilize the model's open-loop output.\n\nWe will use `tf.GradientTape` to track the gradients. You can learn more about this approach by reading the [eager execution guide](https://www.tensorflow.org/guide/eager).\n\nThe procedure works as follows:\n\n* First, initialize the RNN state. We do this by calling the `tf.keras.Model.reset_states` method.\n\n* Next, iterate over the dataset (batch by batch) and calculate the *predictions* associated with each.\n\n* Open a `tf.GradientTape`, and calculate the predictions and loss in that context.\n\n* Calculate the gradients of the loss with respect to the model variables using the `tf.GradientTape.grads` method.\n\n* Finally, take a step downwards by using the optimizer's `tf.train.Optimizer.apply_gradients` method.\n\n",
"_____no_output_____"
]
],
[
[
"model = build_model(\n vocab_size = len(vocab),\n embedding_dim=embedding_dim,\n rnn_units=rnn_units,\n batch_size=BATCH_SIZE)",
"W0304 03:54:08.030432 140067035297664 tf_logging.py:161] <tensorflow.python.keras.layers.recurrent.UnifiedLSTM object at 0x7f63635efe80>: Note that this layer is not optimized for performance. Please use tf.keras.layers.CuDNNLSTM for better performance on GPU.\n"
],
[
"optimizer = tf.keras.optimizers.Adam()",
"_____no_output_____"
],
[
"@tf.function\ndef train_step(inp, target):\n with tf.GradientTape() as tape:\n predictions = model(inp)\n loss = tf.reduce_mean(\n tf.keras.losses.sparse_categorical_crossentropy(\n target, predictions, from_logits=True))\n grads = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n\n return loss",
"_____no_output_____"
],
[
"# Training step\nEPOCHS = 10\n\nfor epoch in range(EPOCHS):\n start = time.time()\n\n # initializing the hidden state at the start of every epoch\n # initally hidden is None\n hidden = model.reset_states()\n\n for (batch_n, (inp, target)) in enumerate(dataset):\n loss = train_step(inp, target)\n\n if batch_n % 100 == 0:\n template = 'Epoch {} Batch {} Loss {}'\n print(template.format(epoch+1, batch_n, loss))\n\n # saving (checkpoint) the model every 5 epochs\n if (epoch + 1) % 5 == 0:\n model.save_weights(checkpoint_prefix.format(epoch=epoch))\n\n print ('Epoch {} Loss {:.4f}'.format(epoch+1, loss))\n print ('Time taken for 1 epoch {} sec\\n'.format(time.time() - start))\n\nmodel.save_weights(checkpoint_prefix.format(epoch=epoch))",
"Epoch 1 Batch 0 Loss 4.174627780914307\nEpoch 1 Batch 100 Loss 2.333711862564087\nEpoch 1 Loss 2.0831\nTime taken for 1 epoch 15.117910146713257 sec\n\nEpoch 2 Batch 0 Loss 2.150496244430542\nEpoch 2 Batch 100 Loss 1.8478351831436157\nEpoch 2 Loss 1.7348\nTime taken for 1 epoch 14.401937007904053 sec\n\nEpoch 3 Batch 0 Loss 1.8013415336608887\nEpoch 3 Batch 100 Loss 1.6072556972503662\nEpoch 3 Loss 1.5668\nTime taken for 1 epoch 14.415359258651733 sec\n\nEpoch 4 Batch 0 Loss 1.6106206178665161\nEpoch 4 Batch 100 Loss 1.478020191192627\nEpoch 4 Loss 1.4673\nTime taken for 1 epoch 14.300090312957764 sec\n\nEpoch 5 Batch 0 Loss 1.5047727823257446\nEpoch 5 Batch 100 Loss 1.3985247611999512\nEpoch 5 Loss 1.3992\nTime taken for 1 epoch 14.128910779953003 sec\n\nEpoch 6 Batch 0 Loss 1.4343167543411255\nEpoch 6 Batch 100 Loss 1.3426867723464966\nEpoch 6 Loss 1.3441\nTime taken for 1 epoch 13.973440170288086 sec\n\nEpoch 7 Batch 0 Loss 1.3767048120498657\nEpoch 7 Batch 100 Loss 1.297922968864441\nEpoch 7 Loss 1.2969\nTime taken for 1 epoch 14.030970573425293 sec\n\nEpoch 8 Batch 0 Loss 1.3249197006225586\nEpoch 8 Batch 100 Loss 1.251737117767334\nEpoch 8 Loss 1.2502\nTime taken for 1 epoch 14.13421106338501 sec\n\nEpoch 9 Batch 0 Loss 1.2790530920028687\nEpoch 9 Batch 100 Loss 1.2029081583023071\nEpoch 9 Loss 1.2074\nTime taken for 1 epoch 14.262096643447876 sec\n\nEpoch 10 Batch 0 Loss 1.2350478172302246\nEpoch 10 Batch 100 Loss 1.1610674858093262\nEpoch 10 Loss 1.1558\nTime taken for 1 epoch 14.261839628219604 sec\n\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e7414823cfebae7eaf9e27d5cc77b4a7108ec342 | 26,280 | ipynb | Jupyter Notebook | k1lib/cli/others.ipynb | 157239n/k1lib | 285520b8364ad5b21cb736b44471aa939e692e9b | [
"MIT"
] | 1 | 2021-08-11T19:10:08.000Z | 2021-08-11T19:10:08.000Z | k1lib/cli/others.ipynb | 157239n/k1lib | 285520b8364ad5b21cb736b44471aa939e692e9b | [
"MIT"
] | null | null | null | k1lib/cli/others.ipynb | 157239n/k1lib | 285520b8364ad5b21cb736b44471aa939e692e9b | [
"MIT"
] | null | null | null | 59.727273 | 132 | 0.684665 | [
[
[
"#export\n\"\"\"\nThis is for pretty random clis that's scattered everywhere.\n\"\"\"\n__all__ = [\"crissCross\"]\nfrom typing import Callable, Iterator, Any, Union, List\nfrom k1lib.cli import BaseCli; from k1lib import cli\nimport torch",
"_____no_output_____"
],
[
"#export\ndef crissCross():\n \"\"\"Like the monkey-patched function :meth:`torch.crissCross`.\nExample::\n\n # returns another Tensor\n [torch.randn(3, 3), torch.randn(3)] | crissCross()\"\"\"\n return cli.applyS(lambda x: torch.crissCross(*x))",
"_____no_output_____"
],
[
"a = [torch.randn(3, 3), torch.randn(3)] | crissCross()\nassert len(a) == 6; assert isinstance(a, torch.Tensor)",
"_____no_output_____"
],
[
"#export\n#torch.stack = cli.applyS(torch.stack)\n#torch.stack.__doc__ = \"Stacks tensors together\"",
"_____no_output_____"
],
[
"!../../export.py cli/others",
"Current dir: /home/kelvin/repos/labs/k1lib, ../../export.py\nrm: cannot remove '__pycache__': No such file or directory\nFound existing installation: k1lib 0.11\nUninstalling k1lib-0.11:\n Successfully uninstalled k1lib-0.11\nrunning install\nrunning bdist_egg\nrunning egg_info\ncreating k1lib.egg-info\nwriting k1lib.egg-info/PKG-INFO\nwriting dependency_links to k1lib.egg-info/dependency_links.txt\nwriting requirements to k1lib.egg-info/requires.txt\nwriting top-level names to k1lib.egg-info/top_level.txt\nwriting manifest file 'k1lib.egg-info/SOURCES.txt'\nreading manifest file 'k1lib.egg-info/SOURCES.txt'\nadding license file 'LICENSE'\nwriting manifest file 'k1lib.egg-info/SOURCES.txt'\ninstalling library code to build/bdist.linux-x86_64/egg\nrunning install_lib\nrunning build_py\ncreating build\ncreating build/lib\ncreating build/lib/k1lib\ncopying k1lib/_learner.py -> build/lib/k1lib\ncopying k1lib/fmt.py -> build/lib/k1lib\ncopying k1lib/_context.py -> build/lib/k1lib\ncopying k1lib/selector.py -> build/lib/k1lib\ncopying k1lib/imports.py -> build/lib/k1lib\ncopying k1lib/_baseClasses.py -> build/lib/k1lib\ncopying k1lib/_basics.py -> build/lib/k1lib\ncopying k1lib/viz.py -> build/lib/k1lib\ncopying k1lib/_higher.py -> build/lib/k1lib\ncopying k1lib/__init__.py -> build/lib/k1lib\ncopying k1lib/_monkey.py -> build/lib/k1lib\ncopying k1lib/knn.py -> build/lib/k1lib\ncopying k1lib/graphEqn.py -> build/lib/k1lib\ncopying k1lib/schedule.py -> build/lib/k1lib\ncopying k1lib/_perlin.py -> build/lib/k1lib\ncopying k1lib/kdata.py -> build/lib/k1lib\ncopying k1lib/eqn.py -> build/lib/k1lib\ncreating build/lib/k1lib/_hidden\ncopying k1lib/_hidden/hiddenFile.py -> build/lib/k1lib/_hidden\ncopying k1lib/_hidden/__init__.py -> build/lib/k1lib/_hidden\ncreating build/lib/k1lib/cli\ncopying k1lib/cli/bio.py -> build/lib/k1lib/cli\ncopying k1lib/cli/structural.py -> build/lib/k1lib/cli\ncopying k1lib/cli/modifier.py -> build/lib/k1lib/cli\ncopying k1lib/cli/gb.py -> build/lib/k1lib/cli\ncopying k1lib/cli/output.py -> build/lib/k1lib/cli\ncopying k1lib/cli/kxml.py -> build/lib/k1lib/cli\ncopying k1lib/cli/inp.py -> build/lib/k1lib/cli\ncopying k1lib/cli/mgi.py -> build/lib/k1lib/cli\ncopying k1lib/cli/grep.py -> build/lib/k1lib/cli\ncopying k1lib/cli/sam.py -> build/lib/k1lib/cli\ncopying k1lib/cli/trace.py -> build/lib/k1lib/cli\ncopying k1lib/cli/entrez.py -> build/lib/k1lib/cli\ncopying k1lib/cli/__init__.py -> build/lib/k1lib/cli\ncopying k1lib/cli/filt.py -> build/lib/k1lib/cli\ncopying k1lib/cli/utils.py -> build/lib/k1lib/cli\ncopying k1lib/cli/init.py -> build/lib/k1lib/cli\ncopying k1lib/cli/others.py -> build/lib/k1lib/cli\ncopying k1lib/cli/kcsv.py -> build/lib/k1lib/cli\ncreating build/lib/k1lib/callbacks\ncopying k1lib/callbacks/loss_accuracy.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/progress.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/limits.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/hookParam.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/profiler.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/callbacks.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/paramFinder.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/core.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/__init__.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/landscape.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/confusionMatrix.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/recorder.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/shorts.py -> build/lib/k1lib/callbacks\ncopying k1lib/callbacks/hookModule.py -> build/lib/k1lib/callbacks\ncreating build/lib/k1lib/callbacks/profilers\ncopying k1lib/callbacks/profilers/time.py -> build/lib/k1lib/callbacks/profilers\ncopying k1lib/callbacks/profilers/memory.py -> build/lib/k1lib/callbacks/profilers\ncopying k1lib/callbacks/profilers/__init__.py -> build/lib/k1lib/callbacks/profilers\ncopying k1lib/callbacks/profilers/io.py -> build/lib/k1lib/callbacks/profilers\ncopying k1lib/callbacks/profilers/computation.py -> build/lib/k1lib/callbacks/profilers\ncreating build/lib/k1lib/callbacks/lossFunctions\ncopying k1lib/callbacks/lossFunctions/accuracy.py -> build/lib/k1lib/callbacks/lossFunctions\ncopying k1lib/callbacks/lossFunctions/__init__.py -> build/lib/k1lib/callbacks/lossFunctions\ncopying k1lib/callbacks/lossFunctions/shorts.py -> build/lib/k1lib/callbacks/lossFunctions\ncreating build/lib/k1lib/_mo\ncopying k1lib/_mo/atom.py -> build/lib/k1lib/_mo\ncopying k1lib/_mo/parseM.py -> build/lib/k1lib/_mo\ncopying k1lib/_mo/substance.py -> build/lib/k1lib/_mo\ncopying k1lib/_mo/system.py -> build/lib/k1lib/_mo\ncopying k1lib/_mo/__init__.py -> build/lib/k1lib/_mo\ncreating build/bdist.linux-x86_64\ncreating build/bdist.linux-x86_64/egg\ncreating build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/_learner.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/fmt.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/_context.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/selector.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/imports.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/_baseClasses.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/_basics.py -> build/bdist.linux-x86_64/egg/k1lib\ncreating build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/bio.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/structural.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/modifier.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/gb.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/output.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/kxml.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/inp.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/mgi.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/grep.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/sam.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/trace.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/entrez.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/__init__.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/filt.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/utils.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/init.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/others.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/cli/kcsv.py -> build/bdist.linux-x86_64/egg/k1lib/cli\ncopying build/lib/k1lib/viz.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/_higher.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/__init__.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/_monkey.py -> build/bdist.linux-x86_64/egg/k1lib\ncreating build/bdist.linux-x86_64/egg/k1lib/_mo\ncopying build/lib/k1lib/_mo/atom.py -> build/bdist.linux-x86_64/egg/k1lib/_mo\ncopying build/lib/k1lib/_mo/parseM.py -> build/bdist.linux-x86_64/egg/k1lib/_mo\ncopying build/lib/k1lib/_mo/substance.py -> build/bdist.linux-x86_64/egg/k1lib/_mo\ncopying build/lib/k1lib/_mo/system.py -> build/bdist.linux-x86_64/egg/k1lib/_mo\ncopying build/lib/k1lib/_mo/__init__.py -> build/bdist.linux-x86_64/egg/k1lib/_mo\ncopying build/lib/k1lib/knn.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/graphEqn.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/schedule.py -> build/bdist.linux-x86_64/egg/k1lib\ncreating build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/loss_accuracy.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/progress.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/limits.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/hookParam.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/profiler.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/callbacks.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/paramFinder.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/core.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncreating build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers\ncopying build/lib/k1lib/callbacks/profilers/time.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers\ncopying build/lib/k1lib/callbacks/profilers/memory.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers\ncopying build/lib/k1lib/callbacks/profilers/__init__.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers\ncopying build/lib/k1lib/callbacks/profilers/io.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers\ncopying build/lib/k1lib/callbacks/profilers/computation.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers\ncopying build/lib/k1lib/callbacks/__init__.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/landscape.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/confusionMatrix.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/recorder.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/shorts.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncopying build/lib/k1lib/callbacks/hookModule.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks\ncreating build/bdist.linux-x86_64/egg/k1lib/callbacks/lossFunctions\ncopying build/lib/k1lib/callbacks/lossFunctions/accuracy.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks/lossFunctions\ncopying build/lib/k1lib/callbacks/lossFunctions/__init__.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks/lossFunctions\ncopying build/lib/k1lib/callbacks/lossFunctions/shorts.py -> build/bdist.linux-x86_64/egg/k1lib/callbacks/lossFunctions\ncopying build/lib/k1lib/_perlin.py -> build/bdist.linux-x86_64/egg/k1lib\ncopying build/lib/k1lib/kdata.py -> build/bdist.linux-x86_64/egg/k1lib\ncreating build/bdist.linux-x86_64/egg/k1lib/_hidden\ncopying build/lib/k1lib/_hidden/hiddenFile.py -> build/bdist.linux-x86_64/egg/k1lib/_hidden\ncopying build/lib/k1lib/_hidden/__init__.py -> build/bdist.linux-x86_64/egg/k1lib/_hidden\ncopying build/lib/k1lib/eqn.py -> build/bdist.linux-x86_64/egg/k1lib\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_learner.py to _learner.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/fmt.py to fmt.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_context.py to _context.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/selector.py to selector.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/imports.py to imports.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_baseClasses.py to _baseClasses.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_basics.py to _basics.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/bio.py to bio.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/structural.py to structural.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/modifier.py to modifier.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/gb.py to gb.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/output.py to output.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/kxml.py to kxml.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/inp.py to inp.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/mgi.py to mgi.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/grep.py to grep.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/sam.py to sam.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/trace.py to trace.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/entrez.py to entrez.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/__init__.py to __init__.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/filt.py to filt.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/utils.py to utils.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/init.py to init.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/others.py to others.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/cli/kcsv.py to kcsv.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/viz.py to viz.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_higher.py to _higher.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/__init__.py to __init__.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_monkey.py to _monkey.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_mo/atom.py to atom.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_mo/parseM.py to parseM.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_mo/substance.py to substance.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_mo/system.py to system.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_mo/__init__.py to __init__.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/knn.py to knn.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/graphEqn.py to graphEqn.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/schedule.py to schedule.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/loss_accuracy.py to loss_accuracy.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/progress.py to progress.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/limits.py to limits.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/hookParam.py to hookParam.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/profiler.py to profiler.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/callbacks.py to callbacks.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/paramFinder.py to paramFinder.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/core.py to core.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers/time.py to time.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers/memory.py to memory.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers/__init__.py to __init__.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers/io.py to io.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/profilers/computation.py to computation.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/__init__.py to __init__.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/landscape.py to landscape.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/confusionMatrix.py to confusionMatrix.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/recorder.py to recorder.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/shorts.py to shorts.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/hookModule.py to hookModule.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/lossFunctions/accuracy.py to accuracy.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/lossFunctions/__init__.py to __init__.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/callbacks/lossFunctions/shorts.py to shorts.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_perlin.py to _perlin.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/kdata.py to kdata.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_hidden/hiddenFile.py to hiddenFile.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/_hidden/__init__.py to __init__.cpython-38.pyc\nbyte-compiling build/bdist.linux-x86_64/egg/k1lib/eqn.py to eqn.cpython-38.pyc\ncreating build/bdist.linux-x86_64/egg/EGG-INFO\ncopying k1lib.egg-info/PKG-INFO -> build/bdist.linux-x86_64/egg/EGG-INFO\ncopying k1lib.egg-info/SOURCES.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\ncopying k1lib.egg-info/dependency_links.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\ncopying k1lib.egg-info/requires.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\ncopying k1lib.egg-info/top_level.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\nzip_safe flag not set; analyzing archive contents...\ncreating dist\ncreating 'dist/k1lib-0.11-py3.8.egg' and adding 'build/bdist.linux-x86_64/egg' to it\nremoving 'build/bdist.linux-x86_64/egg' (and everything under it)\nProcessing k1lib-0.11-py3.8.egg\nCopying k1lib-0.11-py3.8.egg to /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nAdding k1lib 0.11 to easy-install.pth file\n\nInstalled /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages/k1lib-0.11-py3.8.egg\nProcessing dependencies for k1lib==0.11\nSearching for dill==0.3.4\nBest match: dill 0.3.4\nAdding dill 0.3.4 to easy-install.pth file\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for matplotlib==3.3.2\nBest match: matplotlib 3.3.2\nAdding matplotlib 3.3.2 to easy-install.pth file\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for numpy==1.19.2\nBest match: numpy 1.19.2\nAdding numpy 1.19.2 to easy-install.pth file\nInstalling f2py script to /home/kelvin/anaconda3/envs/torch/bin\nInstalling f2py3 script to /home/kelvin/anaconda3/envs/torch/bin\nInstalling f2py3.8 script to /home/kelvin/anaconda3/envs/torch/bin\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for torch==1.10.0\nBest match: torch 1.10.0\nAdding torch 1.10.0 to easy-install.pth file\nInstalling convert-caffe2-to-onnx script to /home/kelvin/anaconda3/envs/torch/bin\nInstalling convert-onnx-to-caffe2 script to /home/kelvin/anaconda3/envs/torch/bin\nInstalling torchrun script to /home/kelvin/anaconda3/envs/torch/bin\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for certifi==2021.10.8\nBest match: certifi 2021.10.8\nAdding certifi 2021.10.8 to easy-install.pth file\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for python-dateutil==2.8.2\nBest match: python-dateutil 2.8.2\nAdding python-dateutil 2.8.2 to easy-install.pth file\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for pyparsing==2.4.7\nBest match: pyparsing 2.4.7\nAdding pyparsing 2.4.7 to easy-install.pth file\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for Pillow==7.2.0\nBest match: Pillow 7.2.0\nAdding Pillow 7.2.0 to easy-install.pth file\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for kiwisolver==1.3.2\nBest match: kiwisolver 1.3.2\nAdding kiwisolver 1.3.2 to easy-install.pth file\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for cycler==0.10.0\nBest match: cycler 0.10.0\nAdding cycler 0.10.0 to easy-install.pth file\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for typing-extensions==3.10.0.2\nBest match: typing-extensions 3.10.0.2\nAdding typing-extensions 3.10.0.2 to easy-install.pth file\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nSearching for six==1.16.0\nBest match: six 1.16.0\nAdding six 1.16.0 to easy-install.pth file\n\nUsing /home/kelvin/anaconda3/envs/torch/lib/python3.8/site-packages\nFinished processing dependencies for k1lib==0.11\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7414c9918204a8b320ce7a4b7a8d96e6110a17b | 651,768 | ipynb | Jupyter Notebook | Examples/NoiseColumns/NoiseColumns.ipynb | sdrees/pyvtreat | fed9a653b2524ba04b1e92b1087e58bead25f99a | [
"BSD-3-Clause"
] | 104 | 2019-07-21T06:15:02.000Z | 2022-02-23T19:41:58.000Z | Examples/NoiseColumns/NoiseColumns.ipynb | arita37/pyvtreat | c32e7ce6db11a2ccdd63e545b25028cbec03a3ff | [
"BSD-3-Clause"
] | 15 | 2019-08-12T09:59:40.000Z | 2021-12-09T00:38:47.000Z | Examples/NoiseColumns/NoiseColumns.ipynb | arita37/pyvtreat | c32e7ce6db11a2ccdd63e545b25028cbec03a3ff | [
"BSD-3-Clause"
] | 9 | 2019-08-15T13:29:15.000Z | 2021-03-08T18:04:08.000Z | 162.373692 | 63,464 | 0.818676 | [
[
[
"import pandas\nimport numpy.random\nimport vtreat\nimport sklearn.linear_model\nimport sklearn.metrics\nimport seaborn\nimport matplotlib.pyplot\nimport statsmodels.api\nimport scipy.stats\nimport re\n",
"_____no_output_____"
],
[
"n_rows = 5000\nn_signal_variables = 10\nn_noise_variables = 100\nn_levels = 500",
"_____no_output_____"
],
[
"d = pandas.DataFrame({\"y\":0.01*numpy.random.normal(size = n_rows)})",
"_____no_output_____"
],
[
"def mk_var_values(n_levels):\n values = {}\n for i in range(n_levels):\n values[\"level_\" + str(i)] = numpy.random.uniform(low=-10, high=10, size=1)[0]\n return values",
"_____no_output_____"
],
[
"for i in range(n_signal_variables):\n var_name = \"var_\" + str(i)\n levs = mk_var_values(n_levels)\n keys = [ k for k in levs.keys() ]\n observed = [ keys[i] for i in numpy.random.choice(len(keys), size=n_rows, replace=True)]\n effect = numpy.asarray([ levs[k] for k in observed ])\n d[var_name] = observed\n d[\"y\"] = d[\"y\"] + effect",
"_____no_output_____"
],
[
"for i in range(n_noise_variables):\n var_name = \"noise_\" + str(i)\n levs = mk_var_values(n_levels)\n keys = [ k for k in levs.keys() ]\n observed = [ keys[i] for i in numpy.random.choice(len(keys), size=n_rows, replace=True)]\n d[var_name] = observed",
"_____no_output_____"
],
[
"d.head()",
"_____no_output_____"
],
[
"is_train = numpy.random.uniform(size=n_rows)<=0.5",
"_____no_output_____"
],
[
"d_train = d.loc[is_train,:].copy()\nd_train.reset_index(inplace=True, drop=True)\ny_train = numpy.asarray(d_train[\"y\"])\nd_train.drop([\"y\"], axis=1, inplace=True)\nd_test = d.loc[numpy.logical_not(is_train),:].copy()\nd_test.reset_index(inplace=True, drop=True)\ny_test = numpy.asarray(d_test[\"y\"])\nd_test.drop([\"y\"], axis=1, inplace=True)",
"_____no_output_____"
],
[
"plan = vtreat.NumericOutcomeTreatment(params=vtreat.vtreat_parameters({'filter_to_recommended':False,\n 'coders':['impact_code']}))\ncross_frame = plan.fit_transform(d_train, y_train)\nprepared_test = plan.transform(d_test)\nnaive_train_hierarchical = plan.transform(d_train)",
"_____no_output_____"
],
[
"p2 = vtreat.NumericOutcomeTreatment(params=vtreat.vtreat_parameters({'filter_to_recommended':False,\n 'coders':['impact_code'],\n 'use_hierarchical_estimate':False}))\np2.fit_transform(d_train, y_train)\nnaive_train_empirical = p2.transform(d_train)",
"_____no_output_____"
],
[
"naive_train_empirical.head()",
"_____no_output_____"
],
[
"naive_train_hierarchical.head()",
"_____no_output_____"
],
[
"cross_frame.head()",
"_____no_output_____"
],
[
"all_vars = [vi for vi in plan.score_frame_[\"variable\"]]\ncorr_frame = pandas.DataFrame({\"variable\":[vi for vi in all_vars if re.match(\".*_impact_.*\", vi)]})\ncorr_frame[\"naive_train_empirical_correlation\"] = [ \n scipy.stats.pearsonr(naive_train_empirical[vi], y_train)[0] for vi in corr_frame[\"variable\"]]\ncorr_frame[\"naive_train_hierarchical_correlation\"] = [ \n scipy.stats.pearsonr(naive_train_hierarchical[vi], y_train)[0] for vi in corr_frame[\"variable\"]]\ncorr_frame[\"cross_frame_correlation\"] = [ \n scipy.stats.pearsonr(cross_frame[vi], y_train)[0] for vi in corr_frame[\"variable\"]]\ncorr_frame[\"test_correlation\"] = [ \n scipy.stats.pearsonr(prepared_test[vi], y_test)[0] for vi in corr_frame[\"variable\"]]\ncorr_frame[\"is_noise\"] = [re.match(\"^noise_.*\", vi) is not None for vi in corr_frame[\"variable\"]]\ncorr_frame\n",
"_____no_output_____"
],
[
"print(scipy.stats.pearsonr(corr_frame['naive_train_empirical_correlation'], corr_frame['test_correlation']))\nseaborn.scatterplot(x = \"naive_train_empirical_correlation\", y = \"test_correlation\", data = corr_frame, hue = \"is_noise\")\nmatplotlib.pyplot.plot([-1, 1], [-1, 1], color=\"red\")\nmatplotlib.pyplot.xlim(-.2,1)\nmatplotlib.pyplot.ylim(-.2,1)\nmatplotlib.pyplot.gca().set_aspect('equal', adjustable='box')",
"(0.8381054968500248, 3.418251880794392e-30)\n"
],
[
"print(scipy.stats.pearsonr(corr_frame['naive_train_hierarchical_correlation'], corr_frame['test_correlation']))\nseaborn.scatterplot(x = \"naive_train_hierarchical_correlation\", y = \"test_correlation\", data = corr_frame, hue = \"is_noise\")\nmatplotlib.pyplot.plot([-1, 1], [-1, 1], color=\"red\")\nmatplotlib.pyplot.xlim(-.2,1)\nmatplotlib.pyplot.ylim(-.2,1)\nmatplotlib.pyplot.gca().set_aspect('equal', adjustable='box')",
"(0.8403949406835453, 1.689768908803988e-30)\n"
],
[
"print(scipy.stats.pearsonr(corr_frame['cross_frame_correlation'], corr_frame['test_correlation']))\nseaborn.scatterplot(x = \"cross_frame_correlation\", y = \"test_correlation\", data = corr_frame, hue = \"is_noise\")\nmatplotlib.pyplot.plot([-1, 1], [-1, 1], color=\"red\")\nmatplotlib.pyplot.xlim(-.2,1)\nmatplotlib.pyplot.ylim(-.2,1)\nmatplotlib.pyplot.gca().set_aspect('equal', adjustable='box')",
"(0.8163025838383855, 1.6910007641688934e-27)\n"
],
[
"plan.score_frame_.tail()",
"_____no_output_____"
],
[
"recommended_vars = [vi for vi in plan.score_frame_[\"variable\"][plan.score_frame_[\"recommended\"]]]\nrecommended_vars",
"_____no_output_____"
],
[
"plot_train = pandas.DataFrame({\"y\":y_train})\nplot_test = pandas.DataFrame({\"y\":y_test})",
"_____no_output_____"
],
[
"fitter = sklearn.linear_model.LinearRegression(fit_intercept = True)",
"_____no_output_____"
],
[
"fitter.fit(cross_frame[all_vars], y_train)\nplot_train[\"predict_cross_all_vars\"] = fitter.predict(cross_frame[all_vars])\nplot_test[\"predict_cross_all_vars\"] = fitter.predict(prepared_test[all_vars])",
"_____no_output_____"
],
[
"fitter.fit(cross_frame[recommended_vars], y_train)\nplot_train[\"predict_cross_recommended_vars\"] = fitter.predict(cross_frame[recommended_vars])\nplot_test[\"predict_cross_recommended_vars\"] = fitter.predict(prepared_test[recommended_vars])",
"_____no_output_____"
],
[
"fitter.fit(naive_train_empirical[all_vars], y_train)\nplot_train[\"predict_naive_empirical_all_vars\"] = fitter.predict(naive_train_empirical[all_vars])\nplot_test[\"predict_naive_empirical_all_vars\"] = fitter.predict(prepared_test[all_vars])",
"_____no_output_____"
],
[
"fitter.fit(naive_train_hierarchical[all_vars], y_train)\nplot_train[\"predict_naive_hierarchical_all_vars\"] = fitter.predict(naive_train_hierarchical[all_vars])\nplot_test[\"predict_naive_hierarchical_all_vars\"] = fitter.predict(prepared_test[all_vars])",
"_____no_output_____"
],
[
"plot_test.head()",
"_____no_output_____"
],
[
"def rmse(x, y):\n return numpy.sqrt(numpy.mean((x-y)**2))",
"_____no_output_____"
],
[
"print(rmse(plot_train[\"predict_naive_empirical_all_vars\"], plot_train[\"y\"]))\nseaborn.scatterplot(x=\"predict_naive_empirical_all_vars\", y =\"y\", data = plot_train)\nplt = matplotlib.pyplot.title(\"Naive empirical prediction on train\")",
"3.1922383588073755\n"
],
[
"print(rmse(plot_train[\"predict_naive_hierarchical_all_vars\"], plot_train[\"y\"]))\nseaborn.scatterplot(x=\"predict_naive_hierarchical_all_vars\", y =\"y\", data = plot_train)\nplt = matplotlib.pyplot.title(\"Naive hierarchical prediction on train\")",
"4.6959733853856775\n"
],
[
"print(rmse(plot_train[\"predict_cross_all_vars\"], plot_train[\"y\"]))\nseaborn.scatterplot(x=\"predict_cross_all_vars\", y =\"y\", data = plot_train)\nplt = matplotlib.pyplot.title(\"Cross prediction on (all vars) train\")",
"15.36154462619125\n"
],
[
"print(rmse(plot_train[\"predict_cross_recommended_vars\"], plot_train[\"y\"]))\nseaborn.scatterplot(x=\"predict_cross_recommended_vars\", y =\"y\", data = plot_train)\nplt = matplotlib.pyplot.title(\"Cross prediction (recommended vars) on train\")",
"15.837575479725219\n"
],
[
"print(rmse(plot_test[\"predict_naive_empirical_all_vars\"], plot_test[\"y\"]))\nseaborn.scatterplot(x=\"predict_naive_empirical_all_vars\", y =\"y\", data = plot_test)\nplt = matplotlib.pyplot.title(\"Naive empirical prediction on test\")",
"17.472934842454194\n"
],
[
"print(rmse(plot_test[\"predict_naive_hierarchical_all_vars\"], plot_test[\"y\"]))\nseaborn.scatterplot(x=\"predict_naive_hierarchical_all_vars\", y =\"y\", data = plot_test)\nplt = matplotlib.pyplot.title(\"Naive hierarchical prediction on test\")",
"17.24679666676164\n"
],
[
"print(rmse(plot_test[\"predict_cross_all_vars\"], plot_test[\"y\"]))\nseaborn.scatterplot(x=\"predict_cross_all_vars\", y =\"y\", data = plot_test)\nplt = matplotlib.pyplot.title(\"Cross prediction on (all vars) test\")",
"15.757533201024454\n"
],
[
"print(rmse(plot_test[\"predict_cross_recommended_vars\"], plot_test[\"y\"]))\nseaborn.scatterplot(x=\"predict_cross_recommended_vars\", y =\"y\", data = plot_test)\nplt = matplotlib.pyplot.title(\"Cross prediction (recommended vars) on test\")",
"15.29893570860548\n"
],
[
"smf1 = statsmodels.api.OLS(\n y_train, \n statsmodels.api.add_constant(naive_train_empirical[all_vars])).fit()\nsmf1.summary()",
"/Users/johnmount/anaconda3/envs/aiAcademy/lib/python3.6/site-packages/numpy/core/fromnumeric.py:2389: FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.\n return ptp(axis=axis, out=out, **kwargs)\n"
],
[
"sklearn.metrics.r2_score(plot_train[\"y\"],plot_train[\"predict_naive_empirical_all_vars\"])",
"_____no_output_____"
],
[
"sklearn.metrics.r2_score(plot_test[\"y\"],plot_test[\"predict_naive_empirical_all_vars\"])",
"_____no_output_____"
],
[
"smf2 = statsmodels.api.OLS(\n y_train, \n statsmodels.api.add_constant(cross_frame[all_vars])).fit()\nsmf2.summary()",
"/Users/johnmount/anaconda3/envs/aiAcademy/lib/python3.6/site-packages/numpy/core/fromnumeric.py:2389: FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.\n return ptp(axis=axis, out=out, **kwargs)\n"
],
[
"sklearn.metrics.r2_score(plot_train[\"y\"],plot_train[\"predict_cross_all_vars\"])",
"_____no_output_____"
],
[
"sklearn.metrics.r2_score(plot_test[\"y\"],plot_test[\"predict_cross_all_vars\"])",
"_____no_output_____"
],
[
"smf3 = statsmodels.api.OLS(\n y_train, \n statsmodels.api.add_constant(cross_frame[recommended_vars])).fit()\nsmf3.summary()",
"/Users/johnmount/anaconda3/envs/aiAcademy/lib/python3.6/site-packages/numpy/core/fromnumeric.py:2389: FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.\n return ptp(axis=axis, out=out, **kwargs)\n"
],
[
"sklearn.metrics.r2_score(plot_train[\"y\"],plot_train[\"predict_cross_recommended_vars\"])",
"_____no_output_____"
],
[
"sklearn.metrics.r2_score(plot_test[\"y\"],plot_test[\"predict_cross_recommended_vars\"])",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74153232a3cb19413a19b71a7886144192e7302 | 176,653 | ipynb | Jupyter Notebook | lab7.ipynb | yjoen/ml | 0ac99d35969e2de1c178bc626b0634b7c060af64 | [
"MIT"
] | null | null | null | lab7.ipynb | yjoen/ml | 0ac99d35969e2de1c178bc626b0634b7c060af64 | [
"MIT"
] | null | null | null | lab7.ipynb | yjoen/ml | 0ac99d35969e2de1c178bc626b0634b7c060af64 | [
"MIT"
] | null | null | null | 1,436.203252 | 148,964 | 0.958506 | [
[
[
"### Lab7 - DP\n\nConvergence of iterative policy evaluation on a small gridworld. \n\n",
"_____no_output_____"
],
[
"The left column is the sequence of approximations of the state-value function for the random policy (all actions equally likely). The right column is the sequence of greedy policies corresponding to the value function estimates (arrows are shown for all actions achieving the maximum, and the numbers shown are rounded to two significant digits). The last policy is guaranteed only to be an improvement over the random policy, but in this case it, and all policies after the third iteration, are optimal.\n\n",
"_____no_output_____"
],
[
"### Exercise 4.1 \nIn Example 4.1, if $\\pi$ is the equiprobable random policy, what is $q_{\\pi}(11,down)$? What is $q_{\\pi}(7,down)$?",
"_____no_output_____"
],
[
"### Answer:\nBased on this equation:\n$$q_{\\pi}(s,a) = E[R_{t+1} + \\gamma v_{\\pi}(S_{t+1})|S_t = s, A_t = a]\n\t= \\sum_{s, s’} p(s’, r|s,a)[r + \\gamma v_{\\pi}(s’)] \n$$\n\n$p(11, r|T,down)=0$, $V^{\\pi}(T) = 0$, so $q_{\\pi}(11,down) = p(11, r|T,down) \\times 1 + \\gamma \\times p(11, r|T,down) \\times V^{\\pi}(T) = 0 \\gamma$\n\nIf $\\gamma=1$, $q_{\\pi}(11,down) = 0$ \n\nFor the second case: \n\n$p(11, r|7,down)=-1$, $V^{\\pi}(11) = -14$, so $q_{\\pi}(11,down) = p(11, r|7,down) \\times 1 + \\gamma \\times p(11, r|7,down) \\times V^{\\pi}(11) = -15 \\gamma$\n\nIf $\\gamma=1$, $q_{\\pi}(7,down) = -15$",
"_____no_output_____"
],
[
"### Exercise 4.2 \n\nIn Example 4.1, suppose a new state 15 is added to the gridworld just below state 13, and its actions, left, up, right, and down, take the agent to states 12, 13, 14, and 15, respectively. Assume that the transitions from the original states are unchanged. What, then, is $v^{\\pi}(15)$ for the equiprobable random policy? Now suppose the dynamics of state 13 are also changed, such that action down from state 13 takes the agent to the new state 15. What is $v^{\\pi}(15)$ for the equiprobable random policy in this case?\n\n$$ v_{k+1}(s) = \\sum_a \\pi(a|s) \\sum_{s’, r} p(s’, r|s, a)[r + \\gamma v_k (s’)] $$\n\nWhat is $\\pi(a|s)$ in this example?",
"_____no_output_____"
],
[
"### Solution:\n\n$ V^{\\pi}(15) = \\sum_a \\pi(a|15) \\sum_{s’, r} p(s’, r|15, a)[r + \\gamma v_k (s’)]\n = \\frac{1}{4}(r(left, 15, 12) + \\gamma V(12) + r(up, 15, 13) + \\gamma V(13) \\\\\n + r(right, 15, 14) + \\gamma V(14) + r(down, 15,15) + \\gamma V(15)) $ \\\\\n \n$ V^{\\pi}(15) = \\frac{1}{4}(-1 -22\\gamma -1 -20\\gamma -1 - 14\\gamma -1 + \\gamma V(15)) \\\\\n = -1 -14\\gamma + \\frac{\\gamma}{4} V(15) $\n \n$ V(15) = - \\frac{4(1+14\\gamma)}{4-\\gamma} $, if let $\\gamma = 1$, \n\n$ V(15) = - 20 $",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e7415b5edb0098897c0b545c939b15f695259073 | 6,291 | ipynb | Jupyter Notebook | 01_notebooks/02_converting_to_tif.ipynb | Bankimchandrayadav/PythonInGeomatics | 9a826c5f126192875798acca361d63e4ed47d532 | [
"MIT"
] | null | null | null | 01_notebooks/02_converting_to_tif.ipynb | Bankimchandrayadav/PythonInGeomatics | 9a826c5f126192875798acca361d63e4ed47d532 | [
"MIT"
] | null | null | null | 01_notebooks/02_converting_to_tif.ipynb | Bankimchandrayadav/PythonInGeomatics | 9a826c5f126192875798acca361d63e4ed47d532 | [
"MIT"
] | null | null | null | 6,291 | 6,291 | 0.649658 | [
[
[
"# [1] About\n\n<img style=\"float: right;\" src=\"../BCYadav_about.png\">\n\n - This notebook is a part of tutorial series prepared by B. C. Yadav, Research Scholar @ IIT Roorkee. \n - ORCID iD: https://orcid.org/0000-0001-7288-0551\n - Google Scholar: https://scholar.google.com/citations?user=6fJpxxQAAAAJ&hl=en&authuser=1\n - Github: https://github.com/Bankimchandrayadav/PythonInGeomatics\n - Twitter: https://twitter.com/DrBCY\n - **Recent Publication:** https://rmets.onlinelibrary.wiley.com/doi/10.1002/joc.6562\n - This notebook demonstrates the [conversion of netcdf files to tiff files (the most common format in GIS)] \n---",
"_____no_output_____"
],
[
"# [2] First time usage for conda users",
"_____no_output_____"
]
],
[
[
"# !conda install -c conda-forge rioxarray -y",
"_____no_output_____"
]
],
[
[
"# [3] First time usage for pip users",
"_____no_output_____"
]
],
[
[
"# !pip install rioxarray",
"_____no_output_____"
]
],
[
[
"# [4] Importing libraries",
"_____no_output_____"
]
],
[
[
"import rioxarray\nimport os \nimport numpy as np \nfrom tqdm.notebook import tqdm as td\nimport shutil\nimport time \nstart = time.time() # will be used to measure the effectiveness of automation",
"_____no_output_____"
]
],
[
[
"# [5] Creating routine functions",
"_____no_output_____"
]
],
[
[
"def fresh(where):\n if os.path.exists(where):\n shutil.rmtree(where)\n os.mkdir(where)\n else:\n os.mkdir(where) ",
"_____no_output_____"
]
],
[
[
"# [6] Read files",
"_____no_output_____"
],
[
"## [6.1] Specify input directory",
"_____no_output_____"
]
],
[
[
"rootDir = \"../02_data/02_netcdf_multiple\"",
"_____no_output_____"
]
],
[
[
"## [6.2] Read files from input directory ",
"_____no_output_____"
]
],
[
[
"# create an empty list \nrasters = [] \n\n# loop starts here \nfor dirname, subdirnames, filenames in os.walk(rootDir):\n\n # search message\n print('Searched in directory: {}\\n'.format(dirname))\n\n # subloop starts here\n for filename in filenames:\n\n # get complete file name \n filename = os.path.join(dirname, filename)\n\n # add name one by one to the above list (rasters)\n rasters.append(filename)\n\n# print success message\nprint('Files read')",
"Searched in directory: ../02_data/02_netcdf_multiple\n\nFiles read\n"
]
],
[
[
"## [6.3] Check the input data ",
"_____no_output_____"
]
],
[
[
"print('First file in sequence:', rasters[0])",
"First file in sequence: ../02_data/02_netcdf_multiple\\2000-01-01T00.nc\n"
],
[
"print('Last file in sequence:', rasters[-1])",
"Last file in sequence: ../02_data/02_netcdf_multiple\\2000-02-11T15.nc\n"
]
],
[
[
"# [7] Converting to tiff",
"_____no_output_____"
],
[
"## [7.1] Specify output directory:",
"_____no_output_____"
]
],
[
[
"outDir = \"../02_data/03_tiff/\"",
"_____no_output_____"
]
],
[
[
"## [7.2] Delete any existing or old files",
"_____no_output_____"
]
],
[
[
"fresh(where=outDir)",
"_____no_output_____"
]
],
[
[
"## [7.3] Check output directory [optional]",
"_____no_output_____"
]
],
[
[
"# os.startfile(os.path.realpath(outDir))",
"_____no_output_____"
]
],
[
[
"## [7.4] Conversion of netcdf to tiff",
"_____no_output_____"
]
],
[
[
"# loop starts here \nfor i in td(range(0, len(rasters)), desc = 'Converting to tiff'):\n\n # read file from 'rasters' list and remove path name \n fileName = rasters[i].split('\\\\')[1].split('.')[0] \n\n # read the file as a dataset \n ds = rioxarray.open_rasterio(rasters[i])\n\n # set projection 'datum = WGS84', 'proj = geogrpahic'\n ds = ds.rio.write_crs('epsg:4326')\n\n # convert the file to tiff format \n ds['tp'].rio.to_raster(outDir + fileName + \".tif\")",
"_____no_output_____"
]
],
[
[
"# [8] Time elapsed",
"_____no_output_____"
]
],
[
[
"end = time.time()\nprint('Time elapsed:', np.round(end-start ,2), 'secs')",
"Time elapsed: 514.1 secs\n"
]
],
[
[
"# [9] See results [1000 converted tiff files, optional]",
"_____no_output_____"
]
],
[
[
"os.startfile(os.path.realpath(outDir))",
"_____no_output_____"
]
],
[
[
"---\n# End of second tutorial\n---",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e741601738c6ce44afdcdef14062a7a9ada1cfa0 | 151,863 | ipynb | Jupyter Notebook | _notebooks/2021_12_31_Heart_Attack_Analysis_&_Prediction_Dataset.ipynb | JoGyeongDeok/PigDuck | 8c0d7b7785fecdc163170a608f13baa6d1460740 | [
"Apache-2.0"
] | null | null | null | _notebooks/2021_12_31_Heart_Attack_Analysis_&_Prediction_Dataset.ipynb | JoGyeongDeok/PigDuck | 8c0d7b7785fecdc163170a608f13baa6d1460740 | [
"Apache-2.0"
] | 1 | 2021-12-31T12:40:12.000Z | 2021-12-31T12:41:42.000Z | _notebooks/2021_12_31_Heart_Attack_Analysis_&_Prediction_Dataset.ipynb | JoGyeongDeok/PigDuck | 8c0d7b7785fecdc163170a608f13baa6d1460740 | [
"Apache-2.0"
] | null | null | null | 228.365414 | 128,330 | 0.874551 | [
[
[
"<a href=\"https://colab.research.google.com/github/JoGyeongDeok/PigDuck/blob/master/_notebooks/2021_12_31_Heart_Attack_Analysis_%26_Prediction_Dataset.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# \"Heart Attack Analysis & Prediction Dataset\"\n> \"[Kaggle]\"\n\n- toc:true\n- branch: master\n- badges: true\n- comments: true\n- author: Hamel Husain & Jeremy Howard\n- categories: [Colab, Kaggle Code Review]",
"_____no_output_____"
]
],
[
[
"# !mkdir \"/content/drive/MyDrive/Kaggle/Data/Heart Attack Analysis & Prediction Dataset\"\n# !unzip \"/content/drive/MyDrive/Kaggle/Data/Heart Attack Analysis & Prediction Dataset.zip\" -d \"/content/drive/MyDrive/Kaggle/Data/Heart Attack Analysis & Prediction Dataset/\"",
"_____no_output_____"
],
[
"from google.colab import drive\ndrive.mount('/content/drive') ",
"_____no_output_____"
]
],
[
[
"## 1.Library & Data Load",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom sklearn.preprocessing import StandardScaler\n\nfrom sklearn.metrics import accuracy_score, confusion_matrix\n\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVC\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.model_selection import cross_val_score\n\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
],
[
"heart=pd.read_csv('/content/drive/MyDrive/Kaggle/Data/Heart Attack Analysis & Prediction Dataset/heart.csv')\no2Saturation=pd.read_csv('/content/drive/MyDrive/Kaggle/Data/Heart Attack Analysis & Prediction Dataset/o2Saturation.csv')",
"_____no_output_____"
],
[
"heart.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 303 entries, 0 to 302\nData columns (total 14 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 age 303 non-null int64 \n 1 sex 303 non-null int64 \n 2 cp 303 non-null int64 \n 3 trtbps 303 non-null int64 \n 4 chol 303 non-null int64 \n 5 fbs 303 non-null int64 \n 6 restecg 303 non-null int64 \n 7 thalachh 303 non-null int64 \n 8 exng 303 non-null int64 \n 9 oldpeak 303 non-null float64\n 10 slp 303 non-null int64 \n 11 caa 303 non-null int64 \n 12 thall 303 non-null int64 \n 13 output 303 non-null int64 \ndtypes: float64(1), int64(13)\nmemory usage: 33.3 KB\n"
],
[
"heart.drop_duplicates(inplace=True)",
"_____no_output_____"
],
[
"heart.describe()",
"_____no_output_____"
],
[
"plt.figure(figsize=(16,8))\nsns.heatmap(heart.corr(),annot=True)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## 2. 모델링",
"_____no_output_____"
]
],
[
[
"X = heart.iloc[:, 0: -1]\ny = heart.iloc[:, -1:]",
"_____no_output_____"
],
[
"X_train,X_test, y_train, y_test= train_test_split(X,y,test_size=0.2,random_state=2)",
"_____no_output_____"
]
],
[
[
"https://mkjjo.github.io/python/2019/01/10/scaler.html\n\nhttps://blog.naver.com/chlxogns92/221767376389",
"_____no_output_____"
]
],
[
[
"pipe_svc=make_pipeline(StandardScaler(),\n SVC(random_state=1))\nparam_C_range=[0,1,2,3,4,5,6,7,8,9,10]\nparam_gamma_range=[0.005,0, 0.1, 0.015]\n\nparam_grid=[{'svc__C' : param_C_range,\n 'svc__kernel':['linear']},\n {'svc__C':param_C_range,\n 'svc__gamma':param_gamma_range,\n 'svc__kernel':['rbf'] \n }]\ngs=GridSearchCV(estimator=pipe_svc,\n param_grid=param_grid,\n scoring='accuracy',\n cv=10,\n n_jobs=-1)\n\ngs=gs.fit(X_train,y_train)\nprint(gs.best_score_)\nprint(gs.best_params_)",
"0.8463333333333335\n{'svc__C': 4, 'svc__gamma': 0.015, 'svc__kernel': 'rbf'}\n"
],
[
"clf=gs.best_estimator_\nclf.fit(X_train,y_train)\nprint(clf.score(X_test,y_test) )",
"0.8524590163934426\n"
],
[
"confusion_matrix(y_test,clf.predict(X_test))",
"_____no_output_____"
],
[
"clf.score(X_test,y_test)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e7416300aef47302275ef8c22d8d7d5dde88afa1 | 307,912 | ipynb | Jupyter Notebook | results_processed/publication/massbank/ssvm_lib=v2__exp_ver=4/exp_03__stereochemistry.ipynb | aalto-ics-kepaco/lcms2struct_exp | 5baa3edd0e58d24f739efd4086031f6fbdba6ad9 | [
"MIT"
] | null | null | null | results_processed/publication/massbank/ssvm_lib=v2__exp_ver=4/exp_03__stereochemistry.ipynb | aalto-ics-kepaco/lcms2struct_exp | 5baa3edd0e58d24f739efd4086031f6fbdba6ad9 | [
"MIT"
] | null | null | null | results_processed/publication/massbank/ssvm_lib=v2__exp_ver=4/exp_03__stereochemistry.ipynb | aalto-ics-kepaco/lcms2struct_exp | 5baa3edd0e58d24f739efd4086031f6fbdba6ad9 | [
"MIT"
] | null | null | null | 132.151073 | 173,700 | 0.792363 | [
[
[
"%load_ext autoreload\n%autoreload 2\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os\nimport numpy as np\n\nfrom typing import Optional, List\n\nfrom ssvm_evaluation.plotting_utils import table__top_k_acc_per_dataset_with_significance\n\n# Plots for the comparison of the different methods \nfrom ssvm_evaluation.plotting_utils import plot__03__a, plot__03__b\n\nfrom ssvm_evaluation.utils import load_topk__publication",
"_____no_output_____"
],
[
"def _check_onlyms(reference: pd.DataFrame, others: List[pd.DataFrame], n_samples: int = 1500, random_state: Optional[int] = None): \n # Ensure input being a list\n if isinstance(others, pd.DataFrame): \n others = [others]\n \n # We only need to ensure, that the Only MS performance is equal\n _ref = reference[reference[\"scoring_method\"] == \"Only MS\"]\n _others = [o[o[\"scoring_method\"] == \"Only MS\"] for o in others if len(o) > 0]\n \n # Go over a random sub-set of rows in the reference data\n n_tests = np.zeros(len(others))\n for _, (x, y, z) in _ref[[\"dataset\", \"eval_indx\", \"k\"]].sample(n=n_samples, random_state=random_state).drop_duplicates().iterrows():\n # Top-k accuracy of the reference\n _acc_ref = _ref[(_ref[\"dataset\"] == x) & (_ref[\"eval_indx\"] == y) & (_ref[\"k\"] == z)][\"top_k_acc\"]\n\n # Top-k accuracy of the others\n _acc_others = [_o[(_o[\"dataset\"] == x) & (_o[\"eval_indx\"] == y) & (_o[\"k\"] == z)][\"top_k_acc\"] for _o in _others]\n \n # The top-k accuracy of the reference must be equal to the one of the others\n for i in range(len(_others)):\n if len(_acc_others[i]) == 0:\n continue\n \n try: \n # assert _acc_ref.item() == _acc_others[i].item()\n assert np.allclose(_acc_ref.item(), _acc_others[i].item())\n except AssertionError:\n print(i, x, y, z, _acc_ref.item(), _acc_others[i].item())\n \n n_tests[i] += 1\n \n print(\"Performed tests:\", n_tests)",
"_____no_output_____"
]
],
[
[
"# Experiment 3: Study of LC-MS$^2$Struct ability to improve the identification of stereoisomers\n\nIn this experiment we output a score (either Only-MS$^2$ or LC-MS$^2$Struct) for each stereoisomer in the candidate set. Candidates are identified (or indexed, or distiguished) by their full InChIKey. Typically, the Only-MS$^2$ scores will be the same for a group of stereoisomers, that means for a group of candidates with the same InChIKey first block. \n\nBy using LC-MS$^2$Struct, we hope to be able to rank the stereoisomers the correct way. To study that, we train two LC-MS$^2$Struct models for each MS$^2$ scorer: One that uses candidate fingerprints that encode chirality (3D); and one that uses fingerprint without chirality encoding (2D). The motiviation behind this experiment is, that we want to see how much rank-improvement is coming from the fact that provide the SSVM with information to distinguish between stereoisomers. \n\nWhen predicting the scores using LC-MS$^2$Struct (2D) we can observe improved ranking. Perhaps not for the stereoisomers, but generally we still capture 2D structure that improves the ranking, e.g. by ranking the correct \"block\" of stereoisomers higher than another \"block\" due to a better fitting with the observed ROs. Using the 3D features, we actually allow the LC-MS$^2$Struct (3D) to predict a different score for each candidates (each candidate now \"looks different\" to the ML model). Comparing the performance of 2D and 3D should give us an isign of how well we actually use the 3D information. ",
"_____no_output_____"
],
[
"## Load raw results for all three MS$^2$ scorers",
"_____no_output_____"
]
],
[
[
"agg_setting = {\n \"marg_agg_fun\": \"average\",\n \"cand_agg_id\": \"inchikey\"\n}",
"_____no_output_____"
]
],
[
[
"### MetFrag\n\nMetFrag performs an in-silico fragmentation for each candidate structure and compares the predicted and observed (from the MS2 spectrum) fragments. ",
"_____no_output_____"
]
],
[
[
"# SSVM (2D)\nsetting = {\"ds\": \"*\", \"mol_feat\": \"FCFP__binary__all__2D\", \"mol_id\": \"cid\", \"ms2scorer\": \"metfrag__norm\", \"ssvm_flavor\": \"default\", \"lloss_mode\": \"mol_feat_fps\"}\nres__ssvm__metfrag__2D = load_topk__publication(\n setting, agg_setting, basedir=os.path.join(\"massbank__with_stereo\"), top_k_method=\"csi\", load_max_model_number=True\n)\n\n# SSVM (3D)\nsetting = {\"ds\": \"*\", \"mol_feat\": \"FCFP__binary__all__3D\", \"mol_id\": \"cid\", \"ms2scorer\": \"metfrag__norm\", \"ssvm_flavor\": \"default\", \"lloss_mode\": \"mol_feat_fps\"}\nres__ssvm__metfrag__3D = load_topk__publication(\n setting, agg_setting, basedir=os.path.join(\"massbank__with_stereo\"), top_k_method=\"csi\", load_max_model_number=True\n)\n\n# Perform some sanity checks\nassert res__ssvm__metfrag__2D[\"scoring_method\"].nunique() == 2\nassert res__ssvm__metfrag__3D[\"scoring_method\"].nunique() == 2\n\n_check_onlyms(res__ssvm__metfrag__2D, [res__ssvm__metfrag__3D])",
"Performed tests: [1500.]\n"
]
],
[
[
"#### Overview result table (LC-MS$^2$Struct)\n\n##### Without chrirality encoding (2D)",
"_____no_output_____"
]
],
[
[
"tab = table__top_k_acc_per_dataset_with_significance(res__ssvm__metfrag__2D, test=\"ttest\", ks=[1, 5, 10, 20])\ntab.pivot(columns=[\"k\", \"scoring_method\"], index=[\"dataset\", \"n_samples\"], values=\"top_k_acc__as_labels\")",
"_____no_output_____"
]
],
[
[
"##### With chirality encoding (3D)",
"_____no_output_____"
]
],
[
[
"tab = table__top_k_acc_per_dataset_with_significance(res__ssvm__metfrag__3D, test=\"ttest\", ks=[1, 5, 10, 20])\ntab.pivot(columns=[\"k\", \"scoring_method\"], index=[\"dataset\", \"n_samples\"], values=\"top_k_acc__as_labels\")",
"_____no_output_____"
]
],
[
[
"## SIRIUS",
"_____no_output_____"
]
],
[
[
"# SSVM (2D)\nsetting = {\"ds\": \"*\", \"mol_feat\": \"FCFP__binary__all__2D\", \"mol_id\": \"cid\", \"ms2scorer\": \"sirius__norm\", \"ssvm_flavor\": \"default\", \"lloss_mode\": \"mol_feat_fps\"}\nres__ssvm__sirius__2D = load_topk__publication(\n setting, agg_setting, basedir=os.path.join(\"massbank__with_stereo\"), top_k_method=\"csi\", load_max_model_number=True\n)\n\n# SSVM (3D)\nsetting = {\"ds\": \"*\", \"mol_feat\": \"FCFP__binary__all__3D\", \"mol_id\": \"cid\", \"ms2scorer\": \"sirius__norm\", \"ssvm_flavor\": \"default\", \"lloss_mode\": \"mol_feat_fps\"}\nres__ssvm__sirius__3D = load_topk__publication(\n setting, agg_setting, basedir=os.path.join(\"massbank__with_stereo\"), top_k_method=\"csi\", load_max_model_number=True\n)\n\n# Perform some sanity checks\nassert res__ssvm__sirius__2D[\"scoring_method\"].nunique() == 2\nassert res__ssvm__sirius__3D[\"scoring_method\"].nunique() == 2\n\n_check_onlyms(res__ssvm__sirius__2D, [res__ssvm__sirius__3D])",
"Performed tests: [1500.]\n"
]
],
[
[
"#### Overview result table (LC-MS$^2$Struct)\n\n##### Without chrirality encoding (2D)",
"_____no_output_____"
]
],
[
[
"tab = table__top_k_acc_per_dataset_with_significance(res__ssvm__sirius__2D, test=\"ttest\", ks=[1, 5, 10, 20])\ntab.pivot(columns=[\"k\", \"scoring_method\"], index=[\"dataset\", \"n_samples\"], values=\"top_k_acc__as_labels\")",
"_____no_output_____"
]
],
[
[
"##### With chirality encoding (3D)",
"_____no_output_____"
]
],
[
[
"tab = table__top_k_acc_per_dataset_with_significance(res__ssvm__sirius__3D, test=\"ttest\", ks=[1, 5, 10, 20])\ntab.pivot(columns=[\"k\", \"scoring_method\"], index=[\"dataset\", \"n_samples\"], values=\"top_k_acc__as_labels\")",
"_____no_output_____"
]
],
[
[
"## CFM-ID",
"_____no_output_____"
]
],
[
[
"# SSVM (2D)\nsetting = {\"ds\": \"*\", \"mol_feat\": \"FCFP__binary__all__2D\", \"mol_id\": \"cid\", \"ms2scorer\": \"cfmid4__norm\", \"ssvm_flavor\": \"default\", \"lloss_mode\": \"mol_feat_fps\"}\nres__ssvm__cfmid4__2D = load_topk__publication(\n setting, agg_setting, basedir=os.path.join(\"massbank__with_stereo\"), top_k_method=\"csi\", load_max_model_number=True\n)\n\n# SSVM (3D)\nsetting = {\"ds\": \"*\", \"mol_feat\": \"FCFP__binary__all__3D\", \"mol_id\": \"cid\", \"ms2scorer\": \"cfmid4__norm\", \"ssvm_flavor\": \"default\", \"lloss_mode\": \"mol_feat_fps\"}\nres__ssvm__cfmid4__3D = load_topk__publication(\n setting, agg_setting, basedir=os.path.join(\"massbank__with_stereo\"), top_k_method=\"csi\", load_max_model_number=True\n)\n\n# Perform some sanity checks\nassert res__ssvm__cfmid4__2D[\"scoring_method\"].nunique() == 2\nassert res__ssvm__cfmid4__3D[\"scoring_method\"].nunique() == 2\n\n_check_onlyms(res__ssvm__cfmid4__2D, [res__ssvm__cfmid4__3D])",
"Performed tests: [1500.]\n"
]
],
[
[
"#### Overview result table (LC-MS$^2$Struct)\n\n##### Without chrirality encoding (2D)",
"_____no_output_____"
]
],
[
[
"tab = table__top_k_acc_per_dataset_with_significance(res__ssvm__cfmid4__2D, test=\"ttest\", ks=[1, 5, 10, 20])\ntab.pivot(columns=[\"k\", \"scoring_method\"], index=[\"dataset\", \"n_samples\"], values=\"top_k_acc__as_labels\")",
"_____no_output_____"
]
],
[
[
"##### With chirality encoding (3D)",
"_____no_output_____"
]
],
[
[
"tab = table__top_k_acc_per_dataset_with_significance(res__ssvm__cfmid4__3D, test=\"ttest\", ks=[1, 5, 10, 20])\ntab.pivot(columns=[\"k\", \"scoring_method\"], index=[\"dataset\", \"n_samples\"], values=\"top_k_acc__as_labels\")",
"_____no_output_____"
]
],
[
[
"## Visualization of the ranking performance\n\nTop-k curve for each MS2-scoring method: CFM-ID, MetFrag and SIRIUS.",
"_____no_output_____"
]
],
[
[
"__tmp__03__a = plot__03__a(\n res__baseline=[\n res__ssvm__cfmid4__2D[(res__ssvm__cfmid4__2D[\"scoring_method\"] == \"Only MS\") & (res__ssvm__cfmid4__2D[\"n_models\"] == 8)].assign(scoring_method=\"Only-MS$^2$\", ms2scorer=\"CFM-ID\"),\n res__ssvm__metfrag__2D[(res__ssvm__metfrag__2D[\"scoring_method\"] == \"Only MS\") & (res__ssvm__metfrag__2D[\"n_models\"] == 8)].assign(scoring_method=\"Only-MS$^2$\", ms2scorer=\"MetFrag\"),\n res__ssvm__sirius__2D[(res__ssvm__sirius__2D[\"scoring_method\"] == \"Only MS\") & (res__ssvm__sirius__2D[\"n_models\"] == 8)].assign(scoring_method=\"Only-MS$^2$\", ms2scorer=\"SIRIUS\")\n ], \n res__ssvm__2D=[\n res__ssvm__cfmid4__2D[(res__ssvm__cfmid4__2D[\"scoring_method\"] == \"MS + RT\") & (res__ssvm__cfmid4__2D[\"n_models\"] == 8)].assign(scoring_method=\"LC-MS$^2$Struct (2D)\", ms2scorer=\"CFM-ID\"),\n res__ssvm__metfrag__2D[(res__ssvm__metfrag__2D[\"scoring_method\"] == \"MS + RT\") & (res__ssvm__metfrag__2D[\"n_models\"] == 8)].assign(scoring_method=\"LC-MS$^2$Struct (2D)\", ms2scorer=\"MetFrag\"),\n res__ssvm__sirius__2D[(res__ssvm__sirius__2D[\"scoring_method\"] == \"MS + RT\") & (res__ssvm__sirius__2D[\"n_models\"] == 8)].assign(scoring_method=\"LC-MS$^2$Struct (2D)\", ms2scorer=\"SIRIUS\")\n ],\n res__ssvm__3D=[\n res__ssvm__cfmid4__3D[(res__ssvm__cfmid4__3D[\"scoring_method\"] == \"MS + RT\") & (res__ssvm__cfmid4__3D[\"n_models\"] == 8)].assign(scoring_method=\"LC-MS$^2$Struct (3D)\", ms2scorer=\"CFM-ID\"),\n res__ssvm__metfrag__3D[(res__ssvm__metfrag__3D[\"scoring_method\"] == \"MS + RT\") & (res__ssvm__metfrag__3D[\"n_models\"] == 8)].assign(scoring_method=\"LC-MS$^2$Struct (3D)\", ms2scorer=\"MetFrag\"),\n res__ssvm__sirius__3D[(res__ssvm__sirius__3D[\"scoring_method\"] == \"MS + RT\") & (res__ssvm__sirius__3D[\"n_models\"] == 8)].assign(scoring_method=\"LC-MS$^2$Struct (3D)\", ms2scorer=\"SIRIUS\")\n ],\n max_k=20,\n weighted_average=False,\n raise_on_missing_results=True,\n aspect=\"landscape\",\n verbose=True\n)\n\nfor ext in [\"pdf\", \"svg\"]:\n plt.savefig(os.path.join(\".\", os.extsep.join([\"plot_03__a\", ext])))",
"We expect 4700 result rows\nRows (MS2-scorer='CFM-ID'):\nNumber of samples: 94\nBaseline: 4700\nSSVM (2D): 4700\n\ttop-1: baseline = 3.0%, other = 3.4%, improvement = 0.4%p, gain = 13.8%, n = 0.2\n\ttop-20: baseline = 32.3%, other = 35.8%, improvement = 3.5%p, gain = 10.8%, n = 1.8\nSSVM (3D): 4700\n\ttop-1: baseline = 3.0%, other = 5.6%, improvement = 2.6%p, gain = 87.3%, n = 1.4\n\ttop-20: baseline = 32.3%, other = 41.4%, improvement = 9.2%p, gain = 28.5%, n = 4.7\nRows (MS2-scorer='MetFrag'):\nNumber of samples: 94\nBaseline: 4700\nSSVM (2D): 4700\n\ttop-1: baseline = 4.0%, other = 4.3%, improvement = 0.3%p, gain = 7.7%, n = 0.2\n\ttop-20: baseline = 37.6%, other = 38.7%, improvement = 1.2%p, gain = 3.1%, n = 0.6\nSSVM (3D): 4700\n\ttop-1: baseline = 4.0%, other = 7.8%, improvement = 3.8%p, gain = 95.9%, n = 2.0\n\ttop-20: baseline = 37.6%, other = 44.8%, improvement = 7.2%p, gain = 19.1%, n = 3.7\nRows (MS2-scorer='SIRIUS'):\nNumber of samples: 94\nBaseline: 4700\nSSVM (2D): 4700\n\ttop-1: baseline = 7.3%, other = 7.4%, improvement = 0.1%p, gain = 1.0%, n = 0.0\n\ttop-20: baseline = 54.1%, other = 54.1%, improvement = -0.1%p, gain = -0.1%, n = -0.0\nSSVM (3D): 4700\n\ttop-1: baseline = 7.3%, other = 10.6%, improvement = 3.2%p, gain = 44.3%, n = 1.6\n\ttop-20: baseline = 54.1%, other = 58.7%, improvement = 4.6%p, gain = 8.5%, n = 2.4\n"
],
[
"__tmp__03__b = plot__03__b(\n res__baseline=[\n res__ssvm__cfmid4__2D[(res__ssvm__cfmid4__2D[\"scoring_method\"] == \"Only MS\") & (res__ssvm__cfmid4__2D[\"n_models\"] == 8)].assign(scoring_method=\"Only-MS$^2$\", ms2scorer=\"CFM-ID\"),\n res__ssvm__metfrag__2D[(res__ssvm__metfrag__2D[\"scoring_method\"] == \"Only MS\") & (res__ssvm__metfrag__2D[\"n_models\"] == 8)].assign(scoring_method=\"Only-MS$^2$\", ms2scorer=\"MetFrag\"),\n res__ssvm__sirius__2D[(res__ssvm__sirius__2D[\"scoring_method\"] == \"Only MS\") & (res__ssvm__sirius__2D[\"n_models\"] == 8)].assign(scoring_method=\"Only-MS$^2$\", ms2scorer=\"SIRIUS\")\n ], \n res__ssvm__2D=[\n res__ssvm__cfmid4__2D[(res__ssvm__cfmid4__2D[\"scoring_method\"] == \"MS + RT\") & (res__ssvm__cfmid4__2D[\"n_models\"] == 8)].assign(scoring_method=\"LC-MS$^2$Struct (2D)\", ms2scorer=\"CFM-ID\"),\n res__ssvm__metfrag__2D[(res__ssvm__metfrag__2D[\"scoring_method\"] == \"MS + RT\") & (res__ssvm__metfrag__2D[\"n_models\"] == 8)].assign(scoring_method=\"LC-MS$^2$Struct (2D)\", ms2scorer=\"MetFrag\"),\n res__ssvm__sirius__2D[(res__ssvm__sirius__2D[\"scoring_method\"] == \"MS + RT\") & (res__ssvm__sirius__2D[\"n_models\"] == 8)].assign(scoring_method=\"LC-MS$^2$Struct (2D)\", ms2scorer=\"SIRIUS\")\n ],\n res__ssvm__3D=[\n res__ssvm__cfmid4__3D[(res__ssvm__cfmid4__3D[\"scoring_method\"] == \"MS + RT\") & (res__ssvm__cfmid4__3D[\"n_models\"] == 8)].assign(scoring_method=\"LC-MS$^2$Struct (3D)\", ms2scorer=\"CFM-ID\"),\n res__ssvm__metfrag__3D[(res__ssvm__metfrag__3D[\"scoring_method\"] == \"MS + RT\") & (res__ssvm__metfrag__3D[\"n_models\"] == 8)].assign(scoring_method=\"LC-MS$^2$Struct (3D)\", ms2scorer=\"MetFrag\"),\n res__ssvm__sirius__3D[(res__ssvm__sirius__3D[\"scoring_method\"] == \"MS + RT\") & (res__ssvm__sirius__3D[\"n_models\"] == 8)].assign(scoring_method=\"LC-MS$^2$Struct (3D)\", ms2scorer=\"SIRIUS\")\n ],\n ks=[1, 20],\n weighted_average=False,\n raise_on_missing_results=True,\n ctype=\"improvement\",\n label_format=\".0f\"\n)\n\nfor ext in [\"pdf\", \"svg\"]:\n plt.savefig(os.path.join(\".\", os.extsep.join([\"plot_03__b\", ext])))",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e74164cad053da3a3f4cba0d098f7ba04720d97f | 21,402 | ipynb | Jupyter Notebook | cnn-karpathy/lec04.ipynb | JasonWayne/course-notes | feff7a0636e7f8f2353c1dea24fe25296a8b33c3 | [
"MIT"
] | null | null | null | cnn-karpathy/lec04.ipynb | JasonWayne/course-notes | feff7a0636e7f8f2353c1dea24fe25296a8b33c3 | [
"MIT"
] | null | null | null | cnn-karpathy/lec04.ipynb | JasonWayne/course-notes | feff7a0636e7f8f2353c1dea24fe25296a8b33c3 | [
"MIT"
] | null | null | null | 16.488444 | 78 | 0.476591 | [
[
[
"##### 1",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 2",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 3",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 4",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 5",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 6",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 7",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 8",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 9",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 10",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 11",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 12",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 13",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 14",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 15",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 16",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 17",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 18",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 19",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 20",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 21",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 22",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 23",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 24",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 25",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 26",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 27",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 28",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 29",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 30",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 31",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 32",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 33",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 34",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 35",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 36",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 37",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 38",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 39",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 40",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 41",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 42",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 43",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 44",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 45",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 46",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 47",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 48",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 49",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 50",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 51",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 52",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 53",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 54",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 55",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 56",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 57",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 58",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 59",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 60",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 61",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 62",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 63",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 64",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 65",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 66",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 67",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 68",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 69",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 70",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 71",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 72",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 73",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 74",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 75",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 76",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 77",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 78",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 79",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 80",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 81",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 82",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 83",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### 84",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e74172b1623e818bc26c7ee171143deed8e56855 | 472,348 | ipynb | Jupyter Notebook | monte-carlo/Monte_Carlo.ipynb | piotrbazan/deep-reinforcement-learning | 165ab29a1d85630b7baa7ccb31a1ab91cd0c6413 | [
"MIT"
] | null | null | null | monte-carlo/Monte_Carlo.ipynb | piotrbazan/deep-reinforcement-learning | 165ab29a1d85630b7baa7ccb31a1ab91cd0c6413 | [
"MIT"
] | null | null | null | monte-carlo/Monte_Carlo.ipynb | piotrbazan/deep-reinforcement-learning | 165ab29a1d85630b7baa7ccb31a1ab91cd0c6413 | [
"MIT"
] | null | null | null | 922.554688 | 222,472 | 0.950464 | [
[
[
"# Monte Carlo Methods\n\nIn this notebook, you will write your own implementations of many Monte Carlo (MC) algorithms. \n\nWhile we have provided some starter code, you are welcome to erase these hints and write your code from scratch.\n\n### Part 0: Explore BlackjackEnv\n\nWe begin by importing the necessary packages.",
"_____no_output_____"
]
],
[
[
"import sys\nimport gym\nimport numpy as np\nfrom collections import defaultdict\n\nfrom plot_utils import plot_blackjack_values, plot_policy",
"_____no_output_____"
]
],
[
[
"Use the code cell below to create an instance of the [Blackjack](https://github.com/openai/gym/blob/master/gym/envs/toy_text/blackjack.py) environment.",
"_____no_output_____"
]
],
[
[
"env = gym.make('Blackjack-v1')",
"_____no_output_____"
]
],
[
[
"Each state is a 3-tuple of:\n- the player's current sum $\\in \\{0, 1, \\ldots, 31\\}$,\n- the dealer's face up card $\\in \\{1, \\ldots, 10\\}$, and\n- whether or not the player has a usable ace (`no` $=0$, `yes` $=1$).\n\nThe agent has two potential actions:\n\n```\n STICK = 0\n HIT = 1\n```\nVerify this by running the code cell below.",
"_____no_output_____"
]
],
[
[
"print(env.observation_space)\nprint(env.action_space)",
"Tuple(Discrete(32), Discrete(11), Discrete(2))\nDiscrete(2)\n"
]
],
[
[
"Execute the code cell below to play Blackjack with a random policy. \n\n(_The code currently plays Blackjack three times - feel free to change this number, or to run the cell multiple times. The cell is designed for you to get some experience with the output that is returned as the agent interacts with the environment._)",
"_____no_output_____"
]
],
[
[
"for i_episode in range(3):\n state = env.reset()\n while True:\n prev_state = state\n action = env.action_space.sample()\n state, reward, done, info = env.step(action)\n print(f\"S={prev_state}, A={action}, R={reward}, S'={state}\")\n if done:\n print('End game! Reward: ', reward)\n print('You won :)\\n') if reward > 0 else print('You lost :(\\n')\n break",
"S=(15, 10, False), A=0, R=-1.0, S'=(15, 10, False)\nEnd game! Reward: -1.0\nYou lost :(\n\nS=(11, 8, False), A=0, R=-1.0, S'=(11, 8, False)\nEnd game! Reward: -1.0\nYou lost :(\n\nS=(9, 6, False), A=0, R=1.0, S'=(9, 6, False)\nEnd game! Reward: 1.0\nYou won :)\n\n"
]
],
[
[
"### Part 1: MC Prediction\n\nIn this section, you will write your own implementation of MC prediction (for estimating the action-value function). \n\nWe will begin by investigating a policy where the player _almost_ always sticks if the sum of her cards exceeds 18. In particular, she selects action `STICK` with 80% probability if the sum is greater than 18; and, if the sum is 18 or below, she selects action `HIT` with 80% probability. The function `generate_episode_from_limit_stochastic` samples an episode using this policy. \n\nThe function accepts as **input**:\n- `bj_env`: This is an instance of OpenAI Gym's Blackjack environment.\n\nIt returns as **output**:\n- `episode`: This is a list of (state, action, reward) tuples (of tuples) and corresponds to $(S_0, A_0, R_1, \\ldots, S_{T-1}, A_{T-1}, R_{T})$, where $T$ is the final time step. In particular, `episode[i]` returns $(S_i, A_i, R_{i+1})$, and `episode[i][0]`, `episode[i][1]`, and `episode[i][2]` return $S_i$, $A_i$, and $R_{i+1}$, respectively.",
"_____no_output_____"
]
],
[
[
"def generate_episode_from_limit_stochastic(bj_env):\n episode = []\n state = bj_env.reset()\n while True:\n probs = [0.8, 0.2] if state[0] > 18 else [0.2, 0.8]\n action = np.random.choice(np.arange(2), p=probs)\n next_state, reward, done, info = bj_env.step(action)\n episode.append((state, action, reward))\n state = next_state\n if done:\n break\n return episode",
"_____no_output_____"
]
],
[
[
"Execute the code cell below to play Blackjack with the policy. \n\n(*The code currently plays Blackjack three times - feel free to change this number, or to run the cell multiple times. The cell is designed for you to gain some familiarity with the output of the `generate_episode_from_limit_stochastic` function.*)",
"_____no_output_____"
]
],
[
[
"for i in range(3):\n print(generate_episode_from_limit_stochastic(env))",
"[((18, 1, True), 1, 0.0), ((13, 1, False), 1, 0.0), ((20, 1, False), 0, 0.0)]\n[((12, 10, False), 1, 0.0), ((20, 10, False), 0, 1.0)]\n[((14, 1, False), 0, -1.0)]\n"
]
],
[
[
"Now, you are ready to write your own implementation of MC prediction. Feel free to implement either first-visit or every-visit MC prediction; in the case of the Blackjack environment, the techniques are equivalent.\n\nYour algorithm has three arguments:\n- `env`: This is an instance of an OpenAI Gym environment.\n- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.\n- `generate_episode`: This is a function that returns an episode of interaction.\n- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).\n\nThe algorithm returns as output:\n- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.",
"_____no_output_____"
]
],
[
[
"def mc_prediction_q(env, num_episodes, generate_episode, gamma=1.0, first_visit=True):\n # initialize empty dictionaries of arrays\n returns_sum = defaultdict(lambda: np.zeros(env.action_space.n))\n N = defaultdict(lambda: np.zeros(env.action_space.n))\n Q = defaultdict(lambda: np.zeros(env.action_space.n))\n # loop over episodes\n for i_episode in range(1, num_episodes+1):\n # monitor progress\n if i_episode % 1000 == 0:\n print(\"\\rEpisode {}/{}.\".format(i_episode, num_episodes), end=\"\")\n sys.stdout.flush()\n \n \n ## TODO: complete the function\n episode = generate_episode(env) \n episode = list(reversed(episode))\n visited = dict() # contains index of the first occurance if (state, action)\n if first_visit: \n for i, (state, action, reward) in enumerate(episode):\n visited[(state, action)] = i\n G = 0\n for i, (state, action, reward) in enumerate(episode):\n G += reward\n if not first_visit or visited[(state, action)] == i:\n returns_sum[state][action] += G\n N[state][action] += 1\n G *= gamma\n\n for k in returns_sum:\n Q[k] = returns_sum[k] / N[k] \n return Q",
"_____no_output_____"
]
],
[
[
"Use the cell below to obtain the action-value function estimate $Q$. We have also plotted the corresponding state-value function.\n\nTo check the accuracy of your implementation, compare the plot below to the corresponding plot in the solutions notebook **Monte_Carlo_Solution.ipynb**.",
"_____no_output_____"
]
],
[
[
"# obtain the action-value function\nQ = mc_prediction_q(env, 50000, generate_episode_from_limit_stochastic)\n\n# obtain the corresponding state-value function\nV_to_plot = dict((k,(k[0]>18)*(np.dot([0.8, 0.2],v)) + (k[0]<=18)*(np.dot([0.2, 0.8],v))) \\\n for k, v in Q.items())\n\n# plot the state-value function\nplot_blackjack_values(V_to_plot)",
"<ipython-input-34-1d29bf91437b>:30: RuntimeWarning: invalid value encountered in true_divide\n Q[k] = returns_sum[k] / N[k]\n"
]
],
[
[
"### Part 2: MC Control\n\nIn this section, you will write your own implementation of constant-$\\alpha$ MC control. \n\nYour algorithm has four arguments:\n- `env`: This is an instance of an OpenAI Gym environment.\n- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.\n- `alpha`: This is the step-size parameter for the update step.\n- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).\n\nThe algorithm returns as output:\n- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.\n- `policy`: This is a dictionary where `policy[s]` returns the action that the agent chooses after observing state `s`.\n\n(_Feel free to define additional functions to help you to organize your code._)",
"_____no_output_____"
]
],
[
[
"import pdb",
"_____no_output_____"
],
[
"def generate_episode_with_policy(env, policy, eps):\n episode = []\n state = env.reset() \n while True:\n if np.random.rand() > eps and state in policy: \n action = policy[state] # greedy\n else:\n action = env.action_space.sample()\n \n new_state, reward, done, info = env.step(action)\n episode.append((state, action, reward))\n if done:\n break\n state = new_state\n return episode \n\ndef mc_control(env, num_episodes, alpha, gamma=1.0):\n nA = env.action_space.n\n # initialize empty dictionary of arrays\n Q = defaultdict(lambda: np.zeros(nA))\n # loop over episodes\n eps = 1\n eps_delta = (1 - .1) / num_episodes\n policy = {}\n for i_episode in range(1, num_episodes+1):\n # monitor progress\n if i_episode % 1000 == 0:\n print(\"\\rEpisode {}/{}.\".format(i_episode, num_episodes), end=\"\")\n sys.stdout.flush()\n \n ## TODO: complete the function\n episode = generate_episode_with_policy(env, policy, eps)\n states, actions, rewards = zip(*episode)\n discounts = np.array([gamma**i for i in range(len(rewards)+1)]) \n rewards = np.array(rewards)\n for i, (state, action) in enumerate(zip(states, actions)):\n G = sum(rewards[i:] * discounts[:-(i+1)])\n Q[state][action] *= (1 - alpha)\n Q[state][action] += alpha * G\n \n policy= {k:np.argmax(Q[k]) for k in Q}\n eps -= eps_delta\n \n return policy, Q",
"_____no_output_____"
]
],
[
[
"Use the cell below to obtain the estimated optimal policy and action-value function. Note that you should fill in your own values for the `num_episodes` and `alpha` parameters.",
"_____no_output_____"
]
],
[
[
"# obtain the estimated optimal policy and action-value function\npolicy, Q = mc_control(env, 50000, .2)",
"Episode 50000/50000."
]
],
[
[
"Next, we plot the corresponding state-value function.",
"_____no_output_____"
]
],
[
[
"# obtain the corresponding state-value function\nV = dict((k,np.max(v)) for k, v in Q.items())\n\n# plot the state-value function\nplot_blackjack_values(V)",
"_____no_output_____"
]
],
[
[
"Finally, we visualize the policy that is estimated to be optimal.",
"_____no_output_____"
]
],
[
[
"# plot the policy\nplot_policy(policy)",
"_____no_output_____"
]
],
[
[
"The **true** optimal policy $\\pi_*$ can be found in Figure 5.2 of the [textbook](http://go.udacity.com/rl-textbook) (and appears below). Compare your final estimate to the optimal policy - how close are you able to get? If you are not happy with the performance of your algorithm, take the time to tweak the decay rate of $\\epsilon$, change the value of $\\alpha$, and/or run the algorithm for more episodes to attain better results.\n\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7417e1fa79d0cc93f2c695df5bb97fdc4f888c2 | 2,897 | ipynb | Jupyter Notebook | mlops/mlops-zoomcamp/03-orchestration/notebooks/homework.ipynb | Elkinmt19/data-engineer-dojo | 15857ba5b72681e15c4b170f5a2505513e6d43ec | [
"MIT"
] | 1 | 2022-01-14T03:16:23.000Z | 2022-01-14T03:16:23.000Z | mlops/mlops-zoomcamp/03-orchestration/notebooks/homework.ipynb | Elkinmt19/data-engineer-dojo | 15857ba5b72681e15c4b170f5a2505513e6d43ec | [
"MIT"
] | null | null | null | mlops/mlops-zoomcamp/03-orchestration/notebooks/homework.ipynb | Elkinmt19/data-engineer-dojo | 15857ba5b72681e15c4b170f5a2505513e6d43ec | [
"MIT"
] | null | null | null | 22.284615 | 116 | 0.563341 | [
[
[
"# Homework solution - 03 workflow orchestration\n\nIn this notebook are the answers of the homework of the module 03 of the course (Workflow Orchestration).",
"_____no_output_____"
],
[
"## Q1. Converting the script to a Prefect flow",
"_____no_output_____"
],
[
"The one task that has to be called using the `.result` method inside the flow is `train_model`.\n\nRefer to [training_fhv_model.py](../scripts/training_fhv_model.py)",
"_____no_output_____"
],
[
"## Q2. Parameterizing the flow",
"_____no_output_____"
],
[
"The validation RMSE is `11.637`, this result was got from the following prefect log:\n\n```bash \n<timestamp> | INFO | Task run 'run_model-6559300c-0' - The MSE of validation is: 11.637032699898606\n```\nRefer to [training_fhv_model.py](../scripts/training_fhv_model.py)",
"_____no_output_____"
],
[
"## Q3. Saving the model and artifacts",
"_____no_output_____"
],
[
"The size of the `DictVectorizer` object that was trained at the date 2021-08-15 is `12.88KB` or `13000 bytes`.",
"_____no_output_____"
],
[
"## Q4. Creating a deployment with a CronSchedule",
"_____no_output_____"
],
[
"The Cron expression to run a flow at 9 AM every 15th of the month is `[0 9 15 * *]`. ",
"_____no_output_____"
],
[
"## Q5. Viewing the Deployment",
"_____no_output_____"
],
[
"The number of upcoming runs that i got in the prefect ui dashboard is `3`.",
"_____no_output_____"
],
[
"## Q6. Creating a work-queue",
"_____no_output_____"
],
[
"The prefect command to view the available work-queues is `prefect work-queue ls`.",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e741807fb6dcb4a4195857959218f7aff8f3b804 | 14,121 | ipynb | Jupyter Notebook | docs/examples/custom-model/README.ipynb | RafalSkolasinski/tempo | f3ed81fcd533e2912f03e9c818853ea2499a32f7 | [
"Apache-2.0"
] | null | null | null | docs/examples/custom-model/README.ipynb | RafalSkolasinski/tempo | f3ed81fcd533e2912f03e9c818853ea2499a32f7 | [
"Apache-2.0"
] | null | null | null | docs/examples/custom-model/README.ipynb | RafalSkolasinski/tempo | f3ed81fcd533e2912f03e9c818853ea2499a32f7 | [
"Apache-2.0"
] | null | null | null | 28.017857 | 225 | 0.564762 | [
[
[
"# Serving a Custom Model\n\nThis example walks you through how to deploy a custom model with Tempo.\nIn particular, we will walk you through how to write custom logic to run inference on a [numpyro model](http://num.pyro.ai/en/stable/).\n\nNote that we've picked `numpyro` for this example simply because it's not supported out of the box, but it should be possible to adapt this example easily to any other custom model.",
"_____no_output_____"
],
[
"## Prerequisites\n\nThis notebooks needs to be run in the `tempo-examples` conda environment defined below. Create from project root folder:\n\n```bash\nconda env create --name tempo-examples --file conda/tempo-examples.yaml\n```",
"_____no_output_____"
],
[
"## Project Structure",
"_____no_output_____"
]
],
[
[
"!tree -P \"*.py\" -I \"__init__.py|__pycache__\" -L 2",
"_____no_output_____"
]
],
[
[
"## Training\n\nThe first step will be to train our model.\nThis will be a very simple bayesian regression model, based on an example provided in the [`numpyro` docs](https://nbviewer.jupyter.org/github/pyro-ppl/numpyro/blob/master/notebooks/source/bayesian_regression.ipynb).\n\nSince this is a probabilistic model, during training we will compute an approximation to the posterior distribution of our model using MCMC.",
"_____no_output_____"
]
],
[
[
"# %load src/train.py\n# Original source code and more details can be found in:\n# https://nbviewer.jupyter.org/github/pyro-ppl/numpyro/blob/master/notebooks/source/bayesian_regression.ipynb\n\n\nimport numpy as np\nimport pandas as pd\nfrom jax import random\nfrom numpyro.infer import MCMC, NUTS\nfrom src.tempo import model_function\n\n\ndef train():\n DATASET_URL = \"https://raw.githubusercontent.com/rmcelreath/rethinking/master/data/WaffleDivorce.csv\"\n dset = pd.read_csv(DATASET_URL, sep=\";\")\n\n def standardize(x):\n (x - x.mean()) / x.std()\n\n dset[\"AgeScaled\"] = dset.MedianAgeMarriage.pipe(standardize)\n dset[\"MarriageScaled\"] = dset.Marriage.pipe(standardize)\n dset[\"DivorceScaled\"] = dset.Divorce.pipe(standardize)\n\n # Start from this source of randomness. We will split keys for subsequent operations.\n rng_key = random.PRNGKey(0)\n rng_key, rng_key_ = random.split(rng_key)\n\n num_warmup, num_samples = 1000, 2000\n\n # Run NUTS.\n kernel = NUTS(model_function)\n mcmc = MCMC(kernel, num_warmup, num_samples)\n mcmc.run(rng_key_, marriage=dset.MarriageScaled.values, divorce=dset.DivorceScaled.values)\n mcmc.print_summary()\n return mcmc\n\n\ndef save(mcmc, folder: str):\n import json\n\n samples = mcmc.get_samples()\n serialisable = {}\n for k, v in samples.items():\n serialisable[k] = np.asarray(v).tolist()\n\n model_file_name = f\"{folder}/numpyro-divorce.json\"\n with open(model_file_name, \"w\") as model_file:\n json.dump(serialisable, model_file)\n",
"_____no_output_____"
],
[
"import os\nfrom tempo.utils import logger\nimport logging\nimport numpy as np\nlogger.setLevel(logging.ERROR)\nlogging.basicConfig(level=logging.ERROR)\nARTIFACTS_FOLDER = os.getcwd()+\"/artifacts\"\nfrom src.train import train, save, model_function\nmcmc = train()",
"_____no_output_____"
]
],
[
[
"### Saving trained model\n\nNow that we have _trained_ our model, the next step will be to save it so that it can be loaded afterwards at serving-time.\nNote that, since this is a probabilistic model, we will only need to save the traces that approximate the posterior distribution over latent parameters.\n\nThis will get saved in a `numpyro-divorce.json` file.",
"_____no_output_____"
]
],
[
[
"save(mcmc, ARTIFACTS_FOLDER)",
"_____no_output_____"
]
],
[
[
"## Serving\n\nThe next step will be to serve our model through Tempo. \nFor that, we will implement a custom model to perform inference using our custom `numpyro` model.\nOnce our custom model is defined, we will be able to deploy it on any of the available runtimes using the same environment that we used for training.",
"_____no_output_____"
],
[
"### Custom inference logic \n\nOur custom model will be responsible of:\n\n- Loading the model from the set samples we saved previously.\n- Running inference using our model structure, and the posterior approximated from the samples.\n\nWith Tempo, this can be achieved as:",
"_____no_output_____"
]
],
[
[
"# %load src/tempo.py\nimport os\nimport json\nimport numpy as np\nimport numpyro\nfrom numpyro import distributions as dist\nfrom numpyro.infer import Predictive\nfrom jax import random\nfrom tempo import model, ModelFramework\n\ndef model_function(marriage : np.ndarray = None, age : np.ndarray = None, divorce : np.ndarray = None):\n a = numpyro.sample('a', dist.Normal(0., 0.2))\n M, A = 0., 0.\n if marriage is not None:\n bM = numpyro.sample('bM', dist.Normal(0., 0.5))\n M = bM * marriage\n if age is not None:\n bA = numpyro.sample('bA', dist.Normal(0., 0.5))\n A = bA * age\n sigma = numpyro.sample('sigma', dist.Exponential(1.))\n mu = a + M + A\n numpyro.sample('obs', dist.Normal(mu, sigma), obs=divorce)\n\n\ndef get_tempo_artifact(local_folder: str):\n @model(\n name='numpyro-divorce',\n platform=ModelFramework.Custom,\n local_folder=local_folder,\n uri=\"s3://tempo/divorce\",\n )\n def numpyro_divorce(marriage: np.ndarray, age: np.ndarray) -> np.ndarray:\n rng_key = random.PRNGKey(0)\n predictions = numpyro_divorce.context.predictive_dist(\n rng_key=rng_key,\n marriage=marriage,\n age=age\n )\n\n mean = predictions['obs'].mean(axis=0)\n return np.asarray(mean)\n\n @numpyro_divorce.loadmethod\n def load_numpyro_divorce():\n model_uri = os.path.join(\n numpyro_divorce.details.local_folder,\n \"numpyro-divorce.json\"\n )\n\n with open(model_uri) as model_file:\n raw_samples = json.load(model_file)\n\n samples = {}\n for k, v in raw_samples.items():\n samples[k] = np.array(v)\n\n print(model_function.__module__)\n numpyro_divorce.context.predictive_dist = Predictive(model_function, samples)\n\n return numpyro_divorce\n\n\n",
"_____no_output_____"
],
[
"from src.tempo import get_tempo_artifact\nnumpyro_divorce = get_tempo_artifact(ARTIFACTS_FOLDER)",
"_____no_output_____"
]
],
[
[
"We can now test our custom logic by running inference locally.",
"_____no_output_____"
]
],
[
[
"marriage = np.array([28.0])\nage = np.array([63])\npred = numpyro_divorce(marriage=marriage, age=age)\n\nprint(pred)",
"_____no_output_____"
]
],
[
[
"### Deploy the Model to Docker\n\nFinally, we'll be able to deploy our model using Tempo against one of the available runtimes (i.e. Kubernetes, Docker or Seldon Deploy).\n\nWe'll deploy first to Docker to test.",
"_____no_output_____"
]
],
[
[
"!cat artifacts/conda.yaml",
"_____no_output_____"
],
[
"from tempo.serve.loader import save\nsave(numpyro_divorce)",
"_____no_output_____"
],
[
"from tempo import deploy\nremote_model = deploy(numpyro_divorce)",
"_____no_output_____"
]
],
[
[
"We can now test our model deployed in Docker as:",
"_____no_output_____"
]
],
[
[
"remote_model.predict(marriage=marriage, age=age)",
"_____no_output_____"
],
[
"remote_model.undeploy()",
"_____no_output_____"
]
],
[
[
"## Production Option 1 (Deploy to Kubernetes with Tempo)\n\n * Here we illustrate how to run the final models in \"production\" on Kubernetes by using Tempo to deploy\n \n### Prerequisites\n \nCreate a Kind Kubernetes cluster with Minio and Seldon Core installed using Ansible as described [here](../../overview/quickstart.md#kubernetes-cluster-with-seldon-core).",
"_____no_output_____"
]
],
[
[
"!kubectl apply -f k8s/rbac -n production",
"_____no_output_____"
],
[
"from tempo.examples.minio import create_minio_rclone\nimport os\ncreate_minio_rclone(os.getcwd()+\"/rclone.conf\")",
"_____no_output_____"
],
[
"from tempo.serve.loader import upload\nupload(numpyro_divorce)",
"_____no_output_____"
],
[
"from tempo.serve.metadata import KubernetesOptions\nfrom tempo.seldon.k8s import SeldonCoreOptions\nruntime_options = SeldonCoreOptions(\n k8s_options=KubernetesOptions(\n namespace=\"production\",\n authSecretName=\"minio-secret\"\n )\n )",
"_____no_output_____"
],
[
"from tempo import deploy\nremote_model = deploy(numpyro_divorce, options=runtime_options)",
"_____no_output_____"
],
[
"remote_model.predict(marriage=marriage, age=age)",
"_____no_output_____"
],
[
"remote_model.undeploy()",
"_____no_output_____"
]
],
[
[
"## Production Option 2 (Gitops)\n\n * We create yaml to provide to our DevOps team to deploy to a production cluster\n * We add Kustomize patches to modify the base Kubernetes yaml created by Tempo",
"_____no_output_____"
]
],
[
[
"from tempo.seldon.k8s import SeldonKubernetesRuntime\nfrom tempo.serve.metadata import RuntimeOptions, KubernetesOptions\nruntime_options = RuntimeOptions(\n k8s_options=KubernetesOptions(\n namespace=\"production\",\n authSecretName=\"minio-secret\"\n )\n )\nk8s_runtime = SeldonKubernetesRuntime(runtime_options)\nyaml_str = k8s_runtime.manifest(numpyro_divorce)\nwith open(os.getcwd()+\"/k8s/tempo.yaml\",\"w\") as f:\n f.write(yaml_str)",
"_____no_output_____"
],
[
"!kustomize build k8s",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e741b3a087b080b9d57a8b95d8639c02fe685efe | 73,144 | ipynb | Jupyter Notebook | ch06-classes-objects.ipynb | lkissin2/examples | b1343c7c4061bdf11c250beeffb63c018253d4c8 | [
"CC0-1.0"
] | null | null | null | ch06-classes-objects.ipynb | lkissin2/examples | b1343c7c4061bdf11c250beeffb63c018253d4c8 | [
"CC0-1.0"
] | null | null | null | ch06-classes-objects.ipynb | lkissin2/examples | b1343c7c4061bdf11c250beeffb63c018253d4c8 | [
"CC0-1.0"
] | null | null | null | 32.222026 | 1,591 | 0.481448 | [
[
[
"# Classes and Objects",
"_____no_output_____"
],
[
"Object Orienation is a powerful tool that allows scientists to write larger, more complex simulations. Object orientation organizes data, methods, and functions into classes. Pages 117 and 118 provide a great introduction to this topic.",
"_____no_output_____"
],
[
"## Objects\n\nEverything in python is an object. We will use the help() function to view the docstrings of a simple object. The following exercise tells us that the integer 1 is an object belonging to the class int.",
"_____no_output_____"
]
],
[
[
"a = 1\nhelp(a)",
"Help on int object:\n\nclass int(object)\n | int(x=0) -> integer\n | int(x, base=10) -> integer\n | \n | Convert a number or string to an integer, or return 0 if no arguments\n | are given. If x is a number, return x.__int__(). For floating point\n | numbers, this truncates towards zero.\n | \n | If x is not a number or if base is given, then x must be a string,\n | bytes, or bytearray instance representing an integer literal in the\n | given base. The literal can be preceded by '+' or '-' and be surrounded\n | by whitespace. The base defaults to 10. Valid bases are 0 and 2-36.\n | Base 0 means to interpret the base from the string as an integer literal.\n | >>> int('0b100', base=0)\n | 4\n | \n | Methods defined here:\n | \n | __abs__(self, /)\n | abs(self)\n | \n | __add__(self, value, /)\n | Return self+value.\n | \n | __and__(self, value, /)\n | Return self&value.\n | \n | __bool__(self, /)\n | self != 0\n | \n | __ceil__(...)\n | Ceiling of an Integral returns itself.\n | \n | __divmod__(self, value, /)\n | Return divmod(self, value).\n | \n | __eq__(self, value, /)\n | Return self==value.\n | \n | __float__(self, /)\n | float(self)\n | \n | __floor__(...)\n | Flooring an Integral returns itself.\n | \n | __floordiv__(self, value, /)\n | Return self//value.\n | \n | __format__(...)\n | default object formatter\n | \n | __ge__(self, value, /)\n | Return self>=value.\n | \n | __getattribute__(self, name, /)\n | Return getattr(self, name).\n | \n | __getnewargs__(...)\n | \n | __gt__(self, value, /)\n | Return self>value.\n | \n | __hash__(self, /)\n | Return hash(self).\n | \n | __index__(self, /)\n | Return self converted to an integer, if self is suitable for use as an index into a list.\n | \n | __int__(self, /)\n | int(self)\n | \n | __invert__(self, /)\n | ~self\n | \n | __le__(self, value, /)\n | Return self<=value.\n | \n | __lshift__(self, value, /)\n | Return self<<value.\n | \n | __lt__(self, value, /)\n | Return self<value.\n | \n | __mod__(self, value, /)\n | Return self%value.\n | \n | __mul__(self, value, /)\n | Return self*value.\n | \n | __ne__(self, value, /)\n | Return self!=value.\n | \n | __neg__(self, /)\n | -self\n | \n | __new__(*args, **kwargs) from builtins.type\n | Create and return a new object. See help(type) for accurate signature.\n | \n | __or__(self, value, /)\n | Return self|value.\n | \n | __pos__(self, /)\n | +self\n | \n | __pow__(self, value, mod=None, /)\n | Return pow(self, value, mod).\n | \n | __radd__(self, value, /)\n | Return value+self.\n | \n | __rand__(self, value, /)\n | Return value&self.\n | \n | __rdivmod__(self, value, /)\n | Return divmod(value, self).\n | \n | __repr__(self, /)\n | Return repr(self).\n | \n | __rfloordiv__(self, value, /)\n | Return value//self.\n | \n | __rlshift__(self, value, /)\n | Return value<<self.\n | \n | __rmod__(self, value, /)\n | Return value%self.\n | \n | __rmul__(self, value, /)\n | Return value*self.\n | \n | __ror__(self, value, /)\n | Return value|self.\n | \n | __round__(...)\n | Rounding an Integral returns itself.\n | Rounding with an ndigits argument also returns an integer.\n | \n | __rpow__(self, value, mod=None, /)\n | Return pow(value, self, mod).\n | \n | __rrshift__(self, value, /)\n | Return value>>self.\n | \n | __rshift__(self, value, /)\n | Return self>>value.\n | \n | __rsub__(self, value, /)\n | Return value-self.\n | \n | __rtruediv__(self, value, /)\n | Return value/self.\n | \n | __rxor__(self, value, /)\n | Return value^self.\n | \n | __sizeof__(...)\n | Returns size in memory, in bytes\n | \n | __str__(self, /)\n | Return str(self).\n | \n | __sub__(self, value, /)\n | Return self-value.\n | \n | __truediv__(self, value, /)\n | Return self/value.\n | \n | __trunc__(...)\n | Truncating an Integral returns itself.\n | \n | __xor__(self, value, /)\n | Return self^value.\n | \n | bit_length(...)\n | int.bit_length() -> int\n | \n | Number of bits necessary to represent self in binary.\n | >>> bin(37)\n | '0b100101'\n | >>> (37).bit_length()\n | 6\n | \n | conjugate(...)\n | Returns self, the complex conjugate of any int.\n | \n | from_bytes(...) from builtins.type\n | int.from_bytes(bytes, byteorder, *, signed=False) -> int\n | \n | Return the integer represented by the given array of bytes.\n | \n | The bytes argument must be a bytes-like object (e.g. bytes or bytearray).\n | \n | The byteorder argument determines the byte order used to represent the\n | integer. If byteorder is 'big', the most significant byte is at the\n | beginning of the byte array. If byteorder is 'little', the most\n | significant byte is at the end of the byte array. To request the native\n | byte order of the host system, use `sys.byteorder' as the byte order value.\n | \n | The signed keyword-only argument indicates whether two's complement is\n | used to represent the integer.\n | \n | to_bytes(...)\n | int.to_bytes(length, byteorder, *, signed=False) -> bytes\n | \n | Return an array of bytes representing an integer.\n | \n | The integer is represented using length bytes. An OverflowError is\n | raised if the integer is not representable with the given number of\n | bytes.\n | \n | The byteorder argument determines the byte order used to represent the\n | integer. If byteorder is 'big', the most significant byte is at the\n | beginning of the byte array. If byteorder is 'little', the most\n | significant byte is at the end of the byte array. To request the native\n | byte order of the host system, use `sys.byteorder' as the byte order value.\n | \n | The signed keyword-only argument determines whether two's complement is\n | used to represent the integer. If signed is False and a negative integer\n | is given, an OverflowError is raised.\n | \n | ----------------------------------------------------------------------\n | Data descriptors defined here:\n | \n | denominator\n | the denominator of a rational number in lowest terms\n | \n | imag\n | the imaginary part of a complex number\n | \n | numerator\n | the numerator of a rational number in lowest terms\n | \n | real\n | the real part of a complex number\n\n"
]
],
[
[
"The help() function has told us that the integer is an object. This object must have be associated with rules and behaviors. The dir() function will tell us about some of those behaviors. The dir() function lists all of the attributes and methods associated with the argument. ",
"_____no_output_____"
]
],
[
[
"a = 1\ndir(a)",
"_____no_output_____"
]
],
[
[
"Dir() has told us that objects in the int class have absolute values(\\__abs__), and that they may be added together (\\__add__). These objects also have real (real) and imaginary (imag) components.",
"_____no_output_____"
]
],
[
[
"a = 1\na.__abs__()",
"_____no_output_____"
],
[
"b = -2\nb.__abs__()",
"_____no_output_____"
]
],
[
[
"In the preceding cells, we have directly called the \\__abs__ method on two objects. However, it is almost never a good idea to directly call these double underscore (aka \"dunder\") methods. It is much safer to get the absolute value by just using the abs() function, as follows:",
"_____no_output_____"
]
],
[
[
"a = 1\nabs(a)",
"_____no_output_____"
],
[
"b = -2\nabs(b)",
"_____no_output_____"
]
],
[
[
"The help() and dir() functions are applied to some of the data types from chapter 2 in the following cells. You should try examining every data type you can think of.",
"_____no_output_____"
]
],
[
[
"a = ' '\nhelp(a)\ndir(a)",
"Help on class str in module builtins:\n\nclass str(object)\n | str(object='') -> str\n | str(bytes_or_buffer[, encoding[, errors]]) -> str\n | \n | Create a new string object from the given object. If encoding or\n | errors is specified, then the object must expose a data buffer\n | that will be decoded using the given encoding and error handler.\n | Otherwise, returns the result of object.__str__() (if defined)\n | or repr(object).\n | encoding defaults to sys.getdefaultencoding().\n | errors defaults to 'strict'.\n | \n | Methods defined here:\n | \n | __add__(self, value, /)\n | Return self+value.\n | \n | __contains__(self, key, /)\n | Return key in self.\n | \n | __eq__(self, value, /)\n | Return self==value.\n | \n | __format__(...)\n | S.__format__(format_spec) -> str\n | \n | Return a formatted version of S as described by format_spec.\n | \n | __ge__(self, value, /)\n | Return self>=value.\n | \n | __getattribute__(self, name, /)\n | Return getattr(self, name).\n | \n | __getitem__(self, key, /)\n | Return self[key].\n | \n | __getnewargs__(...)\n | \n | __gt__(self, value, /)\n | Return self>value.\n | \n | __hash__(self, /)\n | Return hash(self).\n | \n | __iter__(self, /)\n | Implement iter(self).\n | \n | __le__(self, value, /)\n | Return self<=value.\n | \n | __len__(self, /)\n | Return len(self).\n | \n | __lt__(self, value, /)\n | Return self<value.\n | \n | __mod__(self, value, /)\n | Return self%value.\n | \n | __mul__(self, value, /)\n | Return self*value.n\n | \n | __ne__(self, value, /)\n | Return self!=value.\n | \n | __new__(*args, **kwargs) from builtins.type\n | Create and return a new object. See help(type) for accurate signature.\n | \n | __repr__(self, /)\n | Return repr(self).\n | \n | __rmod__(self, value, /)\n | Return value%self.\n | \n | __rmul__(self, value, /)\n | Return self*value.\n | \n | __sizeof__(...)\n | S.__sizeof__() -> size of S in memory, in bytes\n | \n | __str__(self, /)\n | Return str(self).\n | \n | capitalize(...)\n | S.capitalize() -> str\n | \n | Return a capitalized version of S, i.e. make the first character\n | have upper case and the rest lower case.\n | \n | casefold(...)\n | S.casefold() -> str\n | \n | Return a version of S suitable for caseless comparisons.\n | \n | center(...)\n | S.center(width[, fillchar]) -> str\n | \n | Return S centered in a string of length width. Padding is\n | done using the specified fill character (default is a space)\n | \n | count(...)\n | S.count(sub[, start[, end]]) -> int\n | \n | Return the number of non-overlapping occurrences of substring sub in\n | string S[start:end]. Optional arguments start and end are\n | interpreted as in slice notation.\n | \n | encode(...)\n | S.encode(encoding='utf-8', errors='strict') -> bytes\n | \n | Encode S using the codec registered for encoding. Default encoding\n | is 'utf-8'. errors may be given to set a different error\n | handling scheme. Default is 'strict' meaning that encoding errors raise\n | a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and\n | 'xmlcharrefreplace' as well as any other name registered with\n | codecs.register_error that can handle UnicodeEncodeErrors.\n | \n | endswith(...)\n | S.endswith(suffix[, start[, end]]) -> bool\n | \n | Return True if S ends with the specified suffix, False otherwise.\n | With optional start, test S beginning at that position.\n | With optional end, stop comparing S at that position.\n | suffix can also be a tuple of strings to try.\n | \n | expandtabs(...)\n | S.expandtabs(tabsize=8) -> str\n | \n | Return a copy of S where all tab characters are expanded using spaces.\n | If tabsize is not given, a tab size of 8 characters is assumed.\n | \n | find(...)\n | S.find(sub[, start[, end]]) -> int\n | \n | Return the lowest index in S where substring sub is found,\n | such that sub is contained within S[start:end]. Optional\n | arguments start and end are interpreted as in slice notation.\n | \n | Return -1 on failure.\n | \n | format(...)\n | S.format(*args, **kwargs) -> str\n | \n | Return a formatted version of S, using substitutions from args and kwargs.\n | The substitutions are identified by braces ('{' and '}').\n | \n | format_map(...)\n | S.format_map(mapping) -> str\n | \n | Return a formatted version of S, using substitutions from mapping.\n | The substitutions are identified by braces ('{' and '}').\n | \n | index(...)\n | S.index(sub[, start[, end]]) -> int\n | \n | Like S.find() but raise ValueError when the substring is not found.\n | \n | isalnum(...)\n | S.isalnum() -> bool\n | \n | Return True if all characters in S are alphanumeric\n | and there is at least one character in S, False otherwise.\n | \n | isalpha(...)\n | S.isalpha() -> bool\n | \n | Return True if all characters in S are alphabetic\n | and there is at least one character in S, False otherwise.\n | \n | isdecimal(...)\n | S.isdecimal() -> bool\n | \n | Return True if there are only decimal characters in S,\n | False otherwise.\n | \n | isdigit(...)\n | S.isdigit() -> bool\n | \n | Return True if all characters in S are digits\n | and there is at least one character in S, False otherwise.\n | \n | isidentifier(...)\n | S.isidentifier() -> bool\n | \n | Return True if S is a valid identifier according\n | to the language definition.\n | \n | Use keyword.iskeyword() to test for reserved identifiers\n | such as \"def\" and \"class\".\n | \n | islower(...)\n | S.islower() -> bool\n | \n | Return True if all cased characters in S are lowercase and there is\n | at least one cased character in S, False otherwise.\n | \n | isnumeric(...)\n | S.isnumeric() -> bool\n | \n | Return True if there are only numeric characters in S,\n | False otherwise.\n | \n | isprintable(...)\n | S.isprintable() -> bool\n | \n | Return True if all characters in S are considered\n | printable in repr() or S is empty, False otherwise.\n | \n | isspace(...)\n | S.isspace() -> bool\n | \n | Return True if all characters in S are whitespace\n | and there is at least one character in S, False otherwise.\n | \n | istitle(...)\n | S.istitle() -> bool\n | \n | Return True if S is a titlecased string and there is at least one\n | character in S, i.e. upper- and titlecase characters may only\n | follow uncased characters and lowercase characters only cased ones.\n | Return False otherwise.\n | \n | isupper(...)\n | S.isupper() -> bool\n | \n | Return True if all cased characters in S are uppercase and there is\n | at least one cased character in S, False otherwise.\n | \n | join(...)\n | S.join(iterable) -> str\n | \n | Return a string which is the concatenation of the strings in the\n | iterable. The separator between elements is S.\n | \n | ljust(...)\n | S.ljust(width[, fillchar]) -> str\n | \n | Return S left-justified in a Unicode string of length width. Padding is\n | done using the specified fill character (default is a space).\n | \n | lower(...)\n | S.lower() -> str\n | \n | Return a copy of the string S converted to lowercase.\n | \n | lstrip(...)\n | S.lstrip([chars]) -> str\n | \n | Return a copy of the string S with leading whitespace removed.\n | If chars is given and not None, remove characters in chars instead.\n | \n | partition(...)\n | S.partition(sep) -> (head, sep, tail)\n | \n | Search for the separator sep in S, and return the part before it,\n | the separator itself, and the part after it. If the separator is not\n | found, return S and two empty strings.\n | \n | replace(...)\n | S.replace(old, new[, count]) -> str\n | \n | Return a copy of S with all occurrences of substring\n | old replaced by new. If the optional argument count is\n | given, only the first count occurrences are replaced.\n | \n | rfind(...)\n | S.rfind(sub[, start[, end]]) -> int\n | \n | Return the highest index in S where substring sub is found,\n | such that sub is contained within S[start:end]. Optional\n | arguments start and end are interpreted as in slice notation.\n | \n | Return -1 on failure.\n | \n | rindex(...)\n | S.rindex(sub[, start[, end]]) -> int\n | \n | Like S.rfind() but raise ValueError when the substring is not found.\n | \n | rjust(...)\n | S.rjust(width[, fillchar]) -> str\n | \n | Return S right-justified in a string of length width. Padding is\n | done using the specified fill character (default is a space).\n | \n | rpartition(...)\n | S.rpartition(sep) -> (head, sep, tail)\n | \n | Search for the separator sep in S, starting at the end of S, and return\n | the part before it, the separator itself, and the part after it. If the\n | separator is not found, return two empty strings and S.\n | \n | rsplit(...)\n | S.rsplit(sep=None, maxsplit=-1) -> list of strings\n | \n | Return a list of the words in S, using sep as the\n | delimiter string, starting at the end of the string and\n | working to the front. If maxsplit is given, at most maxsplit\n | splits are done. If sep is not specified, any whitespace string\n | is a separator.\n | \n | rstrip(...)\n | S.rstrip([chars]) -> str\n | \n | Return a copy of the string S with trailing whitespace removed.\n | If chars is given and not None, remove characters in chars instead.\n | \n | split(...)\n | S.split(sep=None, maxsplit=-1) -> list of strings\n | \n | Return a list of the words in S, using sep as the\n | delimiter string. If maxsplit is given, at most maxsplit\n | splits are done. If sep is not specified or is None, any\n | whitespace string is a separator and empty strings are\n | removed from the result.\n | \n | splitlines(...)\n | S.splitlines([keepends]) -> list of strings\n | \n | Return a list of the lines in S, breaking at line boundaries.\n | Line breaks are not included in the resulting list unless keepends\n | is given and true.\n | \n | startswith(...)\n | S.startswith(prefix[, start[, end]]) -> bool\n | \n | Return True if S starts with the specified prefix, False otherwise.\n | With optional start, test S beginning at that position.\n | With optional end, stop comparing S at that position.\n | prefix can also be a tuple of strings to try.\n | \n | strip(...)\n | S.strip([chars]) -> str\n | \n | Return a copy of the string S with leading and trailing\n | whitespace removed.\n | If chars is given and not None, remove characters in chars instead.\n | \n | swapcase(...)\n | S.swapcase() -> str\n | \n | Return a copy of S with uppercase characters converted to lowercase\n | and vice versa.\n | \n | title(...)\n | S.title() -> str\n | \n | Return a titlecased version of S, i.e. words start with title case\n | characters, all remaining cased characters have lower case.\n | \n | translate(...)\n | S.translate(table) -> str\n | \n | Return a copy of the string S in which each character has been mapped\n | through the given translation table. The table must implement\n | lookup/indexing via __getitem__, for instance a dictionary or list,\n | mapping Unicode ordinals to Unicode ordinals, strings, or None. If\n | this operation raises LookupError, the character is left untouched.\n | Characters mapped to None are deleted.\n | \n | upper(...)\n | S.upper() -> str\n | \n | Return a copy of S converted to uppercase.\n | \n | zfill(...)\n | S.zfill(width) -> str\n | \n | Pad a numeric string S with zeros on the left, to fill a field\n | of the specified width. The string S is never truncated.\n | \n | ----------------------------------------------------------------------\n | Static methods defined here:\n | \n | maketrans(x, y=None, z=None, /)\n | Return a translation table usable for str.translate().\n | \n | If there is only one argument, it must be a dictionary mapping Unicode\n | ordinals (integers) or characters to Unicode ordinals, strings or None.\n | Character keys will be then converted to ordinals.\n | If there are two arguments, they must be strings of equal length, and\n | in the resulting dictionary, each character in x will be mapped to the\n | character at the same position in y. If there is a third argument, it\n | must be a string, whose characters will be mapped to None in the result.\n\n"
],
[
"a = 3.14\nhelp(a)\ndir(a)",
"Help on float object:\n\nclass float(object)\n | float(x) -> floating point number\n | \n | Convert a string or number to a floating point number, if possible.\n | \n | Methods defined here:\n | \n | __abs__(self, /)\n | abs(self)\n | \n | __add__(self, value, /)\n | Return self+value.\n | \n | __bool__(self, /)\n | self != 0\n | \n | __divmod__(self, value, /)\n | Return divmod(self, value).\n | \n | __eq__(self, value, /)\n | Return self==value.\n | \n | __float__(self, /)\n | float(self)\n | \n | __floordiv__(self, value, /)\n | Return self//value.\n | \n | __format__(...)\n | float.__format__(format_spec) -> string\n | \n | Formats the float according to format_spec.\n | \n | __ge__(self, value, /)\n | Return self>=value.\n | \n | __getattribute__(self, name, /)\n | Return getattr(self, name).\n | \n | __getformat__(...) from builtins.type\n | float.__getformat__(typestr) -> string\n | \n | You probably don't want to use this function. It exists mainly to be\n | used in Python's test suite.\n | \n | typestr must be 'double' or 'float'. This function returns whichever of\n | 'unknown', 'IEEE, big-endian' or 'IEEE, little-endian' best describes the\n | format of floating point numbers used by the C type named by typestr.\n | \n | __getnewargs__(...)\n | \n | __gt__(self, value, /)\n | Return self>value.\n | \n | __hash__(self, /)\n | Return hash(self).\n | \n | __int__(self, /)\n | int(self)\n | \n | __le__(self, value, /)\n | Return self<=value.\n | \n | __lt__(self, value, /)\n | Return self<value.\n | \n | __mod__(self, value, /)\n | Return self%value.\n | \n | __mul__(self, value, /)\n | Return self*value.\n | \n | __ne__(self, value, /)\n | Return self!=value.\n | \n | __neg__(self, /)\n | -self\n | \n | __new__(*args, **kwargs) from builtins.type\n | Create and return a new object. See help(type) for accurate signature.\n | \n | __pos__(self, /)\n | +self\n | \n | __pow__(self, value, mod=None, /)\n | Return pow(self, value, mod).\n | \n | __radd__(self, value, /)\n | Return value+self.\n | \n | __rdivmod__(self, value, /)\n | Return divmod(value, self).\n | \n | __repr__(self, /)\n | Return repr(self).\n | \n | __rfloordiv__(self, value, /)\n | Return value//self.\n | \n | __rmod__(self, value, /)\n | Return value%self.\n | \n | __rmul__(self, value, /)\n | Return value*self.\n | \n | __round__(...)\n | Return the Integral closest to x, rounding half toward even.\n | When an argument is passed, work like built-in round(x, ndigits).\n | \n | __rpow__(self, value, mod=None, /)\n | Return pow(value, self, mod).\n | \n | __rsub__(self, value, /)\n | Return value-self.\n | \n | __rtruediv__(self, value, /)\n | Return value/self.\n | \n | __setformat__(...) from builtins.type\n | float.__setformat__(typestr, fmt) -> None\n | \n | You probably don't want to use this function. It exists mainly to be\n | used in Python's test suite.\n | \n | typestr must be 'double' or 'float'. fmt must be one of 'unknown',\n | 'IEEE, big-endian' or 'IEEE, little-endian', and in addition can only be\n | one of the latter two if it appears to match the underlying C reality.\n | \n | Override the automatic determination of C-level floating point type.\n | This affects how floats are converted to and from binary strings.\n | \n | __str__(self, /)\n | Return str(self).\n | \n | __sub__(self, value, /)\n | Return self-value.\n | \n | __truediv__(self, value, /)\n | Return self/value.\n | \n | __trunc__(...)\n | Return the Integral closest to x between 0 and x.\n | \n | as_integer_ratio(...)\n | float.as_integer_ratio() -> (int, int)\n | \n | Return a pair of integers, whose ratio is exactly equal to the original\n | float and with a positive denominator.\n | Raise OverflowError on infinities and a ValueError on NaNs.\n | \n | >>> (10.0).as_integer_ratio()\n | (10, 1)\n | >>> (0.0).as_integer_ratio()\n | (0, 1)\n | >>> (-.25).as_integer_ratio()\n | (-1, 4)\n | \n | conjugate(...)\n | Return self, the complex conjugate of any float.\n | \n | fromhex(...) from builtins.type\n | float.fromhex(string) -> float\n | \n | Create a floating-point number from a hexadecimal string.\n | >>> float.fromhex('0x1.ffffp10')\n | 2047.984375\n | >>> float.fromhex('-0x1p-1074')\n | -5e-324\n | \n | hex(...)\n | float.hex() -> string\n | \n | Return a hexadecimal representation of a floating-point number.\n | >>> (-0.1).hex()\n | '-0x1.999999999999ap-4'\n | >>> 3.14159.hex()\n | '0x1.921f9f01b866ep+1'\n | \n | is_integer(...)\n | Return True if the float is an integer.\n | \n | ----------------------------------------------------------------------\n | Data descriptors defined here:\n | \n | imag\n | the imaginary part of a complex number\n | \n | real\n | the real part of a complex number\n\n"
]
],
[
[
"Functions are objects too, and we can use the dir() function on them to learn more.",
"_____no_output_____"
]
],
[
[
"import math\ndir(math.sin)",
"_____no_output_____"
]
],
[
[
"We can access the docstrings of a function with the \\__docs__ methods",
"_____no_output_____"
]
],
[
[
"import math \nmath.sin.__doc__",
"_____no_output_____"
]
],
[
[
"The docstring is also an object, check this out:",
"_____no_output_____"
]
],
[
[
"dir(math.sin.__doc__)",
"_____no_output_____"
]
],
[
[
"## Classes \n\nClasses serve many roles. Classes define the collection of attributes that describe a type of object. They also describe how to create that type of object and may inherit attributes from other classes hierarchically.\n\nThe following cells are examples of classes we would create during a particle physics simulation:",
"_____no_output_____"
]
],
[
[
"class Particle(object): # Begins the class definition and names the class particle\n \"\"\"A particle is a constituent unit of the universe.\"\"\" # A docstring for the class \n # class body definition here",
"_____no_output_____"
]
],
[
[
"### Class variables\n\nMany attributes should be included in the class definition. The first of these that we will introduce is the class variable. Class variables are data that are applicable to every object of the class. For example, in our particles class, every object should be able to say \"I am a particle.\" We can then set a class-level atrribute equal to the string \"I am a particle\" ",
"_____no_output_____"
]
],
[
[
"# particle.py\nclass Particle(object):\n \"\"\"A particle is a constituent unit of the universe.\"\"\"\n roar = \"I am a particle!\" ",
"_____no_output_____"
],
[
"import os\nimport sys\nsys.path.insert(0, os.path.abspath('obj'))",
"_____no_output_____"
]
],
[
[
"We can access class variables even without declaring an instance of the class, as in the following code:",
"_____no_output_____"
]
],
[
[
"# import the particle module\nimport particle as p\nprint(p.Particle.roar)",
"I am a particle!\n"
],
[
"# import the particle module\nimport particle as p\nhiggs = p.Particle()\nprint(higgs.roar)",
"I am a particle!\n"
]
],
[
[
"### Instance Variables\n\nSome variables should only apply to certain objects in the class. These are called \"instance variables.\" For example, every particle should have its own position vector. A rather clumsy way to assign position vectors to all of the objects in the particle class is demonstrated in the following cell:",
"_____no_output_____"
]
],
[
[
"# import the Particle class from the particle module\nfrom particle import Particle\n\n# create an empty list to hold observed particle data\nobs = []\n\n# append the first particle\nobs.append(Particle())\n\n# assign its position\nobs[0].r = {'x': 100.0, 'y': 38.0, 'z': -42.0}\n\n# append the second particle\nobs.append(Particle())\n\n# assign the position of the second particle\nobs[1].r = {'x': 0.01, 'y': 99.0, 'z': 32.0}\n\n# print the positions of each particle\nprint(obs[0].r)\nprint(obs[1].r)",
"{'x': 100.0, 'z': -42.0, 'y': 38.0}\n{'x': 0.01, 'z': 32.0, 'y': 99.0}\n"
]
],
[
[
"#### Constructors\n\nConstructors allow us to associate data attributes with a specific instance of a class. Whenever an object is created as part of a class, the constructor function, which is always named `\\__init__()`, is executed. \n\nThe next example defines a constructor function that assigns values of charge, mass, and position to the particle.\n\nNote that `\\__init__()` always takes `self ` as an argument. The other paramters are assigned with the syntax `self.<var> = <val>`",
"_____no_output_____"
]
],
[
[
"# particle.py\nclass Particle(object):\n \"\"\"A particle is a constituent unit of the universe.\n \n Attributes\n ----------\n c : charge in units of [e]\n m : mass in units of [kg]\n r : position in units of [meters]\n \"\"\"\n\n roar = \"I am a particle!\"\n\n def __init__(self):\n \"\"\"Initializes the particle with default values for \n charge c, mass m, and position r.\n \"\"\"\n self.c = 0\n self.m = 0\n self.r = {'x': 0, 'y': 0, 'z': 0}\n",
"_____no_output_____"
]
],
[
[
"The constructor can be made for powerful by passing arguments to it, as in this example:",
"_____no_output_____"
]
],
[
[
"# particle.py\nclass Particle(object):\n \"\"\"A particle is a constituent unit of the universe.\n \n Attributes\n ----------\n c : charge in units of [e]\n m : mass in units of [kg]\n r : position in units of [meters]\n \"\"\"\n\n roar = \"I am a particle!\"\n\n def __init__(self, charge, mass, position): # Self is the first argument, followed by three positional arguments\n \"\"\"Initializes the particle with supplied values for \n charge c, mass m, and position r.\n \"\"\"\n self.c = charge # C is introduced and assigned to self with the value provided in the method call\n self.m = mass\n self.r = position\n",
"_____no_output_____"
]
],
[
[
"### Methods\n\nWe have discussed methods a bit without formally introducing them. Methods are a special type of function that is associated with a class defintion. Methods may be used to operate on data contained by the object, as in these examples:",
"_____no_output_____"
]
],
[
[
"# particle.py\nclass Particle(object):\n \"\"\"A particle is a constituent unit of the universe.\n \n Attributes\n ----------\n c : charge in units of [e]\n m : mass in units of [kg]\n r : position in units of [meters]\n \"\"\"\n\n roar = \"I am a particle!\"\n\n def __init__(self, charge, mass, position): \n \"\"\"Initializes the particle with supplied values for \n charge c, mass m, and position r.\n \"\"\"\n self.c = charge\n self.m = mass\n self.r = position\n\n def hear_me(self): # The object and all of its data are passed to the method as self\n myroar = self.roar + (\n \" My charge is: \" + str(self.c) + # The self argument is used to access the instance variable c\n \" My mass is: \" + str(self.m) +\n \" My x position is: \" + str(self.r['x']) +\n \" My y position is: \" + str(self.r['y']) +\n \" My z position is: \" + str(self.r['z']))\n print(myroar)",
"_____no_output_____"
],
[
"from scipy import constants\n\nimport particle as p\n\nm_p = constants.m_p\nr_p = {'x': 1, 'y': 1, 'z': 53}\na_p = p.Particle(1, m_p, r_p)\na_p.hear_me()",
"I am a particle!\n My mass is: 1.672621898e-27\n My charge is: 1\n My x position is: 1\n My y position is: 1\n My z position is: 53\n"
],
[
"from scipy import constants\n\nimport particle as e\n\nm_e = constants.m_e\nr_e = {'x': 11, 'y': 1, 'z': 53}\na_e = p.Particle(-1, m_e, r_e)\na_e.hear_me()",
"I am a particle!\n My mass is: 9.10938356e-31\n My charge is: -1\n My x position is: 11\n My y position is: 1\n My z position is: 53\n"
]
],
[
[
"The next example creates a `flip` method that changes a quark's flavor while maintaining symmetry",
"_____no_output_____"
]
],
[
[
"def flip(self):\n if self.flavor == \"up\":\n self.flavor = \"down\"\n elif self.flavor == \"down\":\n self.flavor = \"up\"\n elif self.flavor == \"top\":\n self.flavor = \"bottom\"\n elif self.flavor == \"bottom\":\n self.flavor = \"top\"\n elif self.flavor == \"strange\":\n self.flavor = \"charm\"\n elif self.flavor == \"charm\":\n self.flavor = \"strange\"\n else :\n raise AttributeError(\"The quark cannot be flipped, because the \"\n \"flavor is not valid.\")",
"_____no_output_____"
]
],
[
[
"Here is our method in action, changing the attributes of an object after it is called:",
"_____no_output_____"
]
],
[
[
"# import the class\nfrom quark import Quark\n\n# create a Quark object\nt = Quark()\n\n# set the flavor\nt.flavor = \"top\"\n\n# flip the flavor\nt.flip()\n\n# print the flavor\nprint(t.flavor)",
"bottom\n"
]
],
[
[
"Another powerful method that we could add to the `Particles` class uses the Heisenberg principle to determine the minimum uncertainty in position, given an uncertainty in momentum",
"_____no_output_____"
]
],
[
[
"from scipy import constants\n\nclass Particle(object):\n \"\"\"A particle is a constituent unit of the universe.\"\"\"\n\n # ... other parts of the class definition ...\n\n def delta_x_min(self, delta_p_x):\n hbar = constants.hbar\n delx_min = hbar / (2.0 * delta_p_x)\n return delx_min",
"_____no_output_____"
]
],
[
[
"### Static Methods\n\nWe can add a method that behaves the same for every instance. Consider a function that lists the possible quark flavors. This list does not depend on the flavor of the quark. Such a function could like the following:",
"_____no_output_____"
]
],
[
[
"def possible_flavors():\n return [\"up\", \"down\", \"top\", \"bottom\", \"strange\", \"charm\"]",
"_____no_output_____"
]
],
[
[
"We can add this function as a method of a class by using python's built-in `@staticmethod` decorator so that the method does not take any arguments, and behaves the same for all objects in the class.",
"_____no_output_____"
]
],
[
[
"from scipy import constants\n\ndef possible_flavors():\n return[\"up\",\"down\",\"top\",\"bottom\",\"strange\",\"charm\"]\n\nclass Particle(object):\n \"\"\"A particle is a constituent unit of the universe.\"\"\"\n\n # ... other parts of the class definition ...\n\n def delta_x_min(self, delta_p_x):\n hbar = constants.hbar\n delx_min = hbar / (2.0 * delta_p_x)\n return delx_min\n\n @staticmethod\n def possible_flavors():\n return [\"up\", \"down\", \"top\", \"bottom\", \"strange\", \"charm\"]",
"_____no_output_____"
]
],
[
[
"### Duck Typing\n\nDuck typing was introduced in Chapter 3. We will explore it in more detail here. Duck typing refers to Python's tactic of only checking the types of an object that are relevant to its use at the time of its use. \nThus, any particles with a valid `Charge()` method may be used identically, as in this example:",
"_____no_output_____"
]
],
[
[
"def total_charge(particles):\n tot = 0\n for p in particles:\n tot += p.c\n return tot",
"_____no_output_____"
],
[
"p = a_p\ne1 = a_e\ne2 = a_e\n\nparticles = [p, e1, e2]\ntotal_charge(particles)",
"_____no_output_____"
]
],
[
[
"Sometimes duck typing is undesirable. The isinstance() function can be used with an if statement to ensure only objects of a certain type are passed to a method",
"_____no_output_____"
]
],
[
[
"def total_charge(collection):\n tot = 0\n for p in collection:\n if isinstance(p, Particle):\n tot += p.c\n return tot",
"_____no_output_____"
]
],
[
[
"### Polymorphism\n\nPolymorphism occurs when a class inherits the attributes of a parent class. Generally, what works for a parent class should also work for the subclass, but the subclass should be able to execute its own specialized behavior as well. \n\nConsider a subclass of `particles` that describes elementary particles such as electrons, quarks, and muons. Such a class might contain a method that checks a particle's spin, and names the particle as a fermion when it has non-integer spin, and a boson when it has integer spin. This subclass is written below:",
"_____no_output_____"
]
],
[
[
"# elementary.py\nclass ElementaryParticle(Particle):\n\n def __init__(self, spin):\n self.s = spin\n self.is_fermion = bool(spin % 1.0)\n self.is_boson = not self.is_fermion",
"_____no_output_____"
]
],
[
[
"It seems that `ElementaryParticle` takes `Particle` as an argument. This syntax establishes `Particle` as the parent class to `ElementaryParticle`, following the inheritance diagram in figure 6-2 on page 136. \n\nAnother subclass of `Particle` could be `CompositeParticle`. This class may have all the properties of the `Particle` class, as well as a list of its constituents. The only properties it shares with the `ElementaryParticle` class are those inherited from `Particle`.",
"_____no_output_____"
]
],
[
[
"# composite.py\nclass CompositeParticle(Particle):\n\n def __init__(self, parts):\n self.constituents = parts",
"_____no_output_____"
]
],
[
[
"#### Subclasses\n\nObjects in the `ElementaryParticle` class and in the `CompositeParticle` class __are__ in the the `Particle` class because those classes inherit from `Particle`. Inheritance has thus allowed us to reuse code without any rewriting. \n\nHowever, the behavior from `Particle` can be overwritten, as in the following example:",
"_____no_output_____"
]
],
[
[
"# elementary.py\nclass ElementaryParticle(Particle):\n\n roar = \"I am an Elementary Particle!\"\n\n def __init__(self, spin):\n self.s = spin\n self.is_fermion = bool(spin % 1.0)\n self.is_boson = not self.is_fermion",
"_____no_output_____"
],
[
"from elementary import ElementaryParticle\n\nspin = 1.5\np = ElementaryParticle(spin)\np.s\np.hear_me()",
"_____no_output_____"
]
],
[
[
"#### Superclasses\n\nWhile `ElementaryParticle` is a subclass of `Particle`, it can also be a superclass to other classes, such as `Quark`, which is defined in the next example:",
"_____no_output_____"
]
],
[
[
"import randphys as rp\n\nclass Quark(ElementaryParticle):\n\n def __init__(self):\n phys = rp.RandomPhysics()\n self.color = phys.color()\n self.charge = phys.charge()\n self.color_charge = phys.color_charge()\n self.spin = phys.spin()\n self.flavor = phys.flavor()",
"_____no_output_____"
]
],
[
[
"### Decorators and Metaclasses\n\n_MetaProgramming_ is when the definition of a class or a function is specified outside of that function or class. This practice is more common in other languages such as c++ than it is in python. Most of our metaprogramming needs are accomplished with decorators. We can define our own decorators and then add the line `@<decorator>` above the function definition, as in these examples:",
"_____no_output_____"
]
],
[
[
"def add_is_particle(cls): # Defines the class decorator, which takes one argument that is the class itself.\n cls.is_particle = True # Modifies the class by adding the is_particle attribute.\n return cls # Returns the class\n\n\n@add_is_particle # Applies the decorator to the class. This line uses the same syntax as a function decorator.\nclass Particle(object):\n \"\"\"A particle is a constituent unit of the universe.\"\"\"\n\n # ... other parts of the class definition ...",
"_____no_output_____"
]
],
[
[
"We can even add methods to a class, as follows:",
"_____no_output_____"
]
],
[
[
"from math import sqrt\n\ndef add_distance(cls):\n def distance(self, other): \n d2 = 0.0\n for axis in ['x', 'y', 'z']:\n d2 += (self.r[axis] - other.r[axis])**2\n d = sqrt(d2)\n return d\n cls.distance = distance\n return cls \n\n\n@add_distance\nclass Particle(object):\n \"\"\"A particle is a constituent unit of the universe.\"\"\"\n\n # ... other parts of the class definition ...",
"_____no_output_____"
]
],
[
[
"Metaclasses also exist, for when decorators are not enough. Learn about them from these examples, if you dare:",
"_____no_output_____"
]
],
[
[
"type(type)",
"_____no_output_____"
],
[
"class IsParticle(type):\n pass",
"_____no_output_____"
],
[
"class Particle(metaclass=IsParticle):\n \"\"\"A particle is a constituent unit of the universe.\"\"\"\n\n # ... other parts of the class definition ...",
"_____no_output_____"
],
[
"isinstance(Particle, IsParticle)\np = Particle()",
"_____no_output_____"
],
[
"isinstance(p, IsParticle)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e741caf4db049f9c5d7e848de19f85b9d0ca8def | 446,133 | ipynb | Jupyter Notebook | Webscraping_Covid19data.ipynb | Kaminibokefode/Webscraping_Covid19data_csvfile | f54a32d7c330179dae04c5f44c00521ccbe19494 | [
"Apache-2.0"
] | null | null | null | Webscraping_Covid19data.ipynb | Kaminibokefode/Webscraping_Covid19data_csvfile | f54a32d7c330179dae04c5f44c00521ccbe19494 | [
"Apache-2.0"
] | null | null | null | Webscraping_Covid19data.ipynb | Kaminibokefode/Webscraping_Covid19data_csvfile | f54a32d7c330179dae04c5f44c00521ccbe19494 | [
"Apache-2.0"
] | null | null | null | 47.673969 | 179 | 0.502749 | [
[
[
"\n\nimport pandas as pd",
"_____no_output_____"
],
[
"import requests\nURL = \"https://www.worldometers.info/coronavirus/#countries\"\npage = requests.get(URL)\nfrom bs4 import BeautifulSoup\nsoup = BeautifulSoup(page.content,\"html.parser\")\nresults = soup.find(id='main_table_countries_today')\ncontent = results.find_all('td')",
"_____no_output_____"
],
[
"content",
"_____no_output_____"
],
[
"con = results.find_all(\"tr\") ",
"_____no_output_____"
],
[
"con",
"_____no_output_____"
],
[
"for td in con:\n print(td.text)\n ",
"\nCountry,Other\nTotalCases\nNewCases\nTotalDeaths\nNewDeaths\nTotalRecovered\nActiveCases\nSerious,Critical\nTot Cases/1M pop\nDeaths/1M pop\nTotalTests\nTests/\n1M pop\n\nContinent\n\n\n\nNorth America\n\n820,749\n+666\n43,369\n+32\n86,885\n690,495\n14,651\n\n\n\n\nNorth America\n\n\n\nEurope\n\n1,097,423\n+8,167\n102,454\n+618\n316,674\n678,295\n25,383\n\n\n\n\nEurope\n\n\n\nAsia\n\n386,077\n+2,920\n14,840\n+54\n181,254\n189,983\n6,403\n\n\n\n\nAsia\n\n\n\nSouth America\n\n82,310\n+58\n3,850\n+1\n36,339\n42,121\n7,533\n\n\n\n\nSouth America\n\n\n\nOceania\n\n8,157\n+17\n83\n\n5,252\n2,822\n54\n\n\n\n\nAustralia/Oceania\n\n\n\nAfrica\n\n22,992\n+26\n1,128\n+3\n5,847\n16,017\n174\n\n\n\n\nAfrica\n\n\n\n\n\n721\n\n15\n\n644\n62\n7\n\n\n\n\n\n\n\nWorld\n2,418,429\n+11,854\n165,739\n+708\n632,895\n1,619,795\n54,205\n310\n21.3\n\n\nAll\n\n\nUSA\n764,265\n\n40,565 \n\n71,012\n652,688\n13,566\n2,309\n123\n3,861,596\n11,666\nNorth America\n\n\nSpain\n200,210\n+1,536\n20,852 \n+399\n80,587\n98,771\n7,371\n4,282\n446\n930,230\n19,896\nEurope\n\n\nItaly\n178,972\n\n23,660 \n\n47,055\n108,257\n2,635\n2,960\n391\n1,356,541\n22,436\nEurope\n\n\nFrance\n152,894\n\n19,718 \n\n36,578\n96,598\n5,744\n2,342\n302\n463,662\n7,103\nEurope\n\n\nGermany\n145,743\n+1\n4,642 \n\n91,500\n49,601\n2,889\n1,740\n55\n1,728,357\n20,629\nEurope\n\n\nUK\n120,067\n\n16,060 \n\nN/A\n103,663\n1,559\n1,769\n237\n482,063\n7,101\nEurope\n\n\nTurkey\n86,306\n\n2,017 \n\n11,976\n72,313\n1,922\n1,023\n24\n634,277\n7,521\nAsia\n\n\nIran\n82,211\n\n5,118 \n\n57,023\n20,070\n3,456\n979\n61\n341,662\n4,068\nAsia\n\n\nRussia\n47,121\n+4,268\n405 \n+44\n3,446\n43,270\n8\n323\n3\n2,050,000\n14,047\nEurope\n\n\nBelgium\n39,983\n+1,487\n5,828 \n+145\n8,895\n25,260\n1,071\n3,450\n503\n153,778\n13,269\nEurope\n\n\nBrazil\n38,654\n\n2,462 \n\n22,130\n14,062\n6,634\n182\n12\n62,985\n296\nSouth America\n\n\nCanada\n35,056\n\n1,587 \n\n11,843\n21,626\n557\n929\n42\n549,349\n14,555\nNorth America\n\n\nNetherlands\n32,655\n\n3,684 \n\n250\n28,721\n1,176\n1,906\n215\n154,911\n9,041\nEurope\n\n\nSwitzerland\n27,740\n\n1,393 \n\n17,800\n8,547\n386\n3,205\n161\n221,263\n25,566\nEurope\n\n\nPortugal\n20,206\n\n714 \n\n610\n18,882\n224\n1,982\n70\n235,878\n23,133\nEurope\n\n\nIndia\n17,615\n\n559 \n\n2,854\n14,202\n\n13\n0.4\n401,586\n291\nAsia\n\n\nPeru\n15,628\n\n400 \n\n6,811\n8,417\n167\n474\n12\n143,745\n4,360\nSouth America\n\n\nIreland\n15,251\n\n610 \n\n77\n14,564\n294\n3,089\n124\n90,646\n18,358\nEurope\n\n\nAustria\n14,749\n\n452 \n\n10,501\n3,796\n204\n1,638\n50\n179,243\n19,902\nEurope\n\n\nSweden\n14,385\n\n1,540 \n\n550\n12,295\n450\n1,424\n152\n74,600\n7,387\nEurope\n\n\nIsrael\n13,654\n+163\n173 \n+1\n3,872\n9,609\n150\n1,577\n20\n187,250\n21,634\nAsia\n\n\nJapan\n10,797\n\n236 \n\n1,159\n9,402\n217\n85\n2\n112,816\n892\nAsia\n\n\nS. Korea\n10,674\n+13\n236 \n+2\n8,114\n2,324\n55\n208\n5\n563,035\n10,982\nAsia\n\n\nChile\n10,088\n\n133 \n\n4,338\n5,617\n360\n528\n7\n113,649\n5,945\nSouth America\n\n\nEcuador\n9,468\n\n474 \n\n1,061\n7,933\n124\n537\n27\n31,809\n1,803\nSouth America\n\n\nPoland\n9,453\n+166\n362 \n+2\n1,133\n7,958\n160\n250\n10\n204,246\n5,397\nEurope\n\n\nSaudi Arabia\n9,362\n\n97 \n\n1,398\n7,867\n97\n269\n3\n180,000\n5,170\nAsia\n\n\nRomania\n8,746\n\n451 \n\n1,892\n6,403\n256\n455\n23\n93,611\n4,866\nEurope\n\n\nPakistan\n8,418\n+70\n176 \n+8\n1,970\n6,272\n46\n38\n0.8\n104,302\n472\nAsia\n\n\nMexico\n8,261\n+764\n686 \n+36\n2,627\n4,948\n207\n64\n5\n49,570\n384\nNorth America\n\n\nSingapore\n8,014\n+1,426\n11 \n\n768\n7,235\n22\n1,370\n2\n94,796\n16,203\nAsia\n\n\nDenmark\n7,515\n+131\n355 \n\n4,141\n3,019\n84\n1,297\n61\n96,244\n16,616\nEurope\n\n\nNorway\n7,103\n+25\n165 \n\n32\n6,906\n58\n1,310\n30\n142,166\n26,224\nEurope\n\n\nCzechia\n6,787\n+41\n188 \n+2\n1,311\n5,288\n84\n634\n18\n172,123\n16,073\nEurope\n\n\nUAE\n6,781\n\n41 \n\n1,286\n5,454\n1\n686\n4\n767,000\n77,550\nAsia\n\n\nIndonesia\n6,760\n+185\n590 \n+8\n747\n5,423\n\n25\n2\n42,219\n154\nAsia\n\n\nAustralia\n6,619\n+7\n71 \n\n4,258\n2,290\n49\n260\n3\n431,734\n16,931\nAustralia/Oceania\n\n\nPhilippines\n6,459\n+200\n428 \n+19\n613\n5,418\n1\n59\n4\n59,928\n547\nAsia\n\n\nSerbia\n6,318\n\n122 \n\n753\n5,443\n120\n723\n14\n38,701\n4,429\nEurope\n\n\nUkraine\n5,710\n+261\n151 \n+10\n359\n5,200\n45\n131\n3\n58,093\n1,328\nEurope\n\n\nQatar\n5,448\n\n8 \n\n518\n4,922\n37\n1,891\n3\n62,538\n21,707\nAsia\n\n\nMalaysia\n5,389\n\n89 \n\n3,197\n2,103\n46\n167\n3\n103,892\n3,210\nAsia\n\n\nBelarus\n4,779\n\n47 \n\n494\n4,238\n65\n506\n5\n98,231\n10,396\nEurope\n\n\nDominican Republic\n4,680\n\n226 \n\n363\n4,091\n114\n431\n21\n15,583\n1,436\nNorth America\n\n\nPanama\n4,467\n+194\n126 \n+6\n165\n4,176\n98\n1,035\n29\n19,091\n4,425\nNorth America\n\n\nFinland\n3,868\n+85\n94 \n\n1,700\n2,074\n68\n698\n17\n58,727\n10,599\nEurope\n\n\nColombia\n3,792\n\n179 \n\n711\n2,902\n98\n75\n4\n62,746\n1,233\nSouth America\n\n\nLuxembourg\n3,550\n\n73 \n\n627\n2,850\n31\n5,671\n117\n33,666\n53,781\nEurope\n\n\nSouth Africa\n3,158\n\n54 \n\n903\n2,201\n36\n53\n0.9\n114,711\n1,934\nAfrica\n\n\nEgypt\n3,144\n\n239 \n\n732\n2,173\n\n31\n2\n55,000\n537\nAfrica\n\n\nBangladesh\n2,948\n+492\n101 \n+10\n85\n2,762\n1\n18\n0.6\n26,604\n162\nAsia\n\n\nArgentina\n2,941\n\n134 \n\n709\n2,098\n123\n65\n3\n32,712\n724\nSouth America\n\n\nMorocco\n2,855\n\n141 \n\n327\n2,387\n1\n77\n4\n15,123\n410\nAfrica\n\n\nThailand\n2,792\n+27\n47 \n\n1,999\n746\n61\n40\n0.7\n142,589\n2,043\nAsia\n\n\nAlgeria\n2,629\n\n375 \n\n1,047\n1,207\n40\n60\n9\n6,500\n148\nAfrica\n\n\nMoldova\n2,472\n\n67 \n\n457\n1,948\n80\n613\n17\n10,488\n2,600\nEurope\n\n\nGreece\n2,235\n\n113 \n\n269\n1,853\n67\n214\n11\n53,290\n5,113\nEurope\n\n\nHungary\n1,984\n+68\n199 \n+10\n267\n1,518\n60\n205\n21\n48,057\n4,975\nEurope\n\n\nKuwait\n1,915\n\n7 \n\n305\n1,603\n38\n448\n2\n\n\nAsia\n\n\nBahrain\n1,881\n\n7 \n\n762\n1,112\n2\n1,105\n4\n86,732\n50,972\nAsia\n\n\nCroatia\n1,871\n\n47 \n\n709\n1,115\n23\n456\n11\n24,186\n5,891\nEurope\n\n\nIceland\n1,771\n\n9 \n\n1,291\n471\n3\n5,190\n26\n41,091\n120,416\nEurope\n\n\nKazakhstan\n1,757\n+81\n19 \n+2\n417\n1,321\n22\n94\n1\n117,949\n6,282\nAsia\n\n\nUzbekistan\n1,565\n\n5 \n\n225\n1,335\n8\n47\n0.1\n136,000\n4,063\nAsia\n\n\nIraq\n1,539\n\n82 \n\n1,009\n448\n\n38\n2\n56,147\n1,396\nAsia\n\n\nEstonia\n1,535\n+7\n40 \n\n165\n1,330\n9\n1,157\n30\n40,930\n30,855\nEurope\n\n\nNew Zealand\n1,440\n+9\n12 \n\n974\n454\n3\n299\n2\n86,305\n17,897\nAustralia/Oceania\n\n\nOman\n1,410\n+144\n7 \n\n238\n1,165\n3\n276\n1\n\n\nAsia\n\n\nAzerbaijan\n1,398\n\n19 \n\n712\n667\n21\n138\n2\n95,747\n9,443\nAsia\n\n\nArmenia\n1,339\n+48\n22 \n+2\n580\n737\n30\n452\n7\n13,373\n4,513\nAsia\n\n\nSlovenia\n1,335\n+5\n77 \n+3\n193\n1,065\n26\n642\n37\n41,802\n20,107\nEurope\n\n\nLithuania\n1,326\n+28\n37 \n+2\n242\n1,047\n14\n487\n14\n66,352\n24,374\nEurope\n\n\nBosnia and Herzegovina\n1,285\n\n48 \n\n347\n890\n4\n392\n15\n17,818\n5,431\nEurope\n\n\nNorth Macedonia\n1,207\n\n51 \n\n179\n977\n15\n579\n24\n11,870\n5,697\nEurope\n\n\nSlovakia\n1,161\n\n12 \n\n229\n920\n10\n213\n2\n44,278\n8,110\nEurope\n\n\nGhana\n1,042\n\n9 \n\n99\n934\n4\n34\n0.3\n68,591\n2,207\nAfrica\n\n\nCuba\n1,035\n\n34 \n\n255\n746\n16\n91\n3\n26,982\n2,382\nNorth America\n\n\nHong Kong\n1,026\n\n4 \n\n602\n420\n8\n137\n0.5\n131,786\n17,579\nAsia\n\n\nCameroon\n1,017\n\n42 \n\n305\n670\n33\n38\n2\n\n\nAfrica\n\n\nAfghanistan\n996\n\n33 \n\n131\n832\n7\n26\n0.8\n6,422\n165\nAsia\n\n\nBulgaria\n915\n+21\n43 \n+1\n167\n705\n36\n132\n6\n26,417\n3,802\nEurope\n\n\nTunisia\n879\n\n38 \n\n43\n798\n33\n74\n3\n16,098\n1,362\nAfrica\n\n\nIvory Coast\n847\n\n9 \n\n260\n578\n\n32\n0.3\n\n\nAfrica\n\n\nDjibouti\n846\n\n2 \n\n102\n742\n\n856\n2\n8,144\n8,243\nAfrica\n\n\nCyprus\n767\n\n12 \n\n81\n674\n15\n635\n10\n34,087\n28,233\nAsia\n\n\nLatvia\n739\n+12\n5 \n\n88\n646\n5\n392\n3\n36,668\n19,440\nEurope\n\n\nAndorra\n713\n\n36 \n\n235\n442\n17\n9,228\n466\n1,673\n21,653\nEurope\n\n\nDiamond Princess\n712\n\n13 \n\n644\n55\n7\n\n\n\n\n\n\n\nLebanon\n673\n\n21 \n\n102\n550\n27\n99\n3\n20,929\n3,066\nAsia\n\n\nCosta Rica\n660\n\n5 \n\n112\n543\n14\n130\n1.0\n10,417\n2,045\nNorth America\n\n\nNiger\n648\n\n20 \n\n117\n511\n\n27\n0.8\n4,715\n195\nAfrica\n\n\nNigeria\n627\n\n21 \n\n170\n436\n2\n3\n0.1\n7,153\n35\nAfrica\n\n\nAlbania\n584\n+22\n26 \n\n327\n231\n5\n203\n9\n5,775\n2,007\nEurope\n\n\nGuinea\n579\n\n5 \n\n87\n487\n\n44\n0.4\n\n\nAfrica\n\n\nBurkina Faso\n576\n\n36 \n\n338\n202\n\n28\n2\n\n\nAfrica\n\n\nKyrgyzstan\n568\n+14\n7 \n+2\n201\n360\n5\n87\n1\n26,147\n4,008\nAsia\n\n\nBolivia\n564\n+44\n33 \n+1\n31\n500\n3\n48\n3\n3,569\n306\nSouth America\n\n\nUruguay\n528\n+11\n10 \n\n298\n220\n13\n152\n3\n13,207\n3,802\nSouth America\n\n\nChannel Islands\n488\n\n21 \n\n73\n394\n\n2,807\n121\n3,320\n19,095\nEurope\n\n\nHonduras\n477\n+5\n46 \n\n25\n406\n10\n48\n5\n2,535\n256\nNorth America\n\n\nSan Marino\n461\n\n39 \n\n60\n362\n4\n13,586\n1,149\n1,711\n50,426\nEurope\n\n\nPalestine\n449\n+10\n3 \n\n71\n375\n\n88\n0.6\n17,329\n3,397\nAsia\n\n\nMalta\n427\n\n3 \n\n118\n306\n2\n967\n7\n23,253\n52,663\nEurope\n\n\nTaiwan\n422\n+2\n6 \n\n203\n213\n\n18\n0.3\n53,632\n2,252\nAsia\n\n\nJordan\n417\n\n7 \n\n276\n134\n5\n41\n0.7\n29,000\n2,842\nAsia\n\n\nRéunion\n408\n\n \n\n237\n171\n4\n456\n\n\n\nAfrica\n\n\nGeorgia\n399\n+5\n4 \n\n93\n302\n6\n100\n1\n5,027\n1,260\nAsia\n\n\nSenegal\n367\n\n4 \n+1\n220\n143\n1\n22\n0.2\n\n\nAfrica\n\n\nMauritius\n328\n\n9 \n\n208\n111\n3\n258\n7\n9,755\n7,670\nAfrica\n\n\nDRC\n327\n\n25 \n\n27\n275\n\n4\n0.3\n\n\nAfrica\n\n\nMontenegro\n311\n+3\n5 \n\n55\n251\n7\n495\n8\n3,874\n6,168\nEurope\n\n\nIsle of Man\n298\n\n6 \n\n193\n99\n10\n3,505\n71\n2,465\n28,989\nEurope\n\n\nSri Lanka\n295\n+24\n7 \n\n97\n191\n1\n14\n0.3\n6,463\n302\nAsia\n\n\nGuatemala\n289\n+32\n7 \n\n21\n261\n3\n16\n0.4\n7,200\n402\nNorth America\n\n\nMayotte\n271\n\n4 \n\n117\n150\n5\n993\n15\n1,850\n6,781\nAfrica\n\n\nKenya\n270\n\n14 \n\n67\n189\n2\n5\n0.3\n13,239\n246\nAfrica\n\n\nVietnam\n268\n\n \n\n202\n66\n8\n3\n\n206,253\n2,119\nAsia\n\n\nVenezuela\n256\n\n9 \n\n117\n130\n4\n9\n0.3\n336,169\n11,822\nSouth America\n\n\nMali\n224\n\n14 \n\n42\n168\n\n11\n0.7\n\n\nAfrica\n\n\nEl Salvador\n218\n+17\n7 \n\n46\n165\n2\n34\n1\n12,210\n1,882\nNorth America\n\n\nParaguay\n208\n+2\n8 \n\n46\n154\n1\n29\n1\n4,950\n694\nSouth America\n\n\nJamaica\n196\n+23\n5 \n\n27\n164\n\n66\n2\n1,809\n611\nNorth America\n\n\nFaeroe Islands\n185\n\n \n\n176\n9\n\n3,786\n\n6,021\n123,222\nEurope\n\n\nTanzania\n170\n\n7 \n\n11\n152\n4\n3\n0.1\n\n\nAfrica\n\n\nSomalia\n164\n\n7 \n\n3\n154\n2\n10\n0.4\n\n\nAfrica\n\n\nMartinique\n163\n\n12 \n\n73\n78\n11\n434\n32\n\n\nNorth America\n\n\nGuadeloupe\n148\n\n8 \n\n73\n67\n13\n370\n20\n\n\nNorth America\n\n\nRwanda\n147\n\n \n\n76\n71\n\n11\n\n6,959\n537\nAfrica\n\n\nCongo\n143\n\n6 \n\n11\n126\n\n26\n1\n\n\nAfrica\n\n\nBrunei \n138\n\n1 \n\n116\n21\n2\n315\n2\n11,633\n26,591\nAsia\n\n\nGibraltar\n132\n\n \n\n120\n12\n1\n3,918\n\n1,912\n56,751\nEurope\n\n\nCambodia\n122\n\n \n\n107\n15\n1\n7\n\n5,768\n345\nAsia\n\n\nMadagascar\n121\n\n \n\n39\n82\n1\n4\n\n2,357\n85\nAfrica\n\n\nTrinidad and Tobago\n114\n\n8 \n\n21\n85\n\n81\n6\n1,335\n954\nNorth America\n\n\nMyanmar\n111\n\n5 \n\n7\n99\n\n2\n0.09\n4,786\n88\nAsia\n\n\nGabon\n109\n\n1 \n\n7\n101\n\n49\n0.4\n572\n257\nAfrica\n\n\nEthiopia\n108\n\n3 \n\n16\n89\n1\n0.9\n0.03\n7,557\n66\nAfrica\n\n\nAruba\n97\n\n2 \n\n49\n46\n4\n909\n19\n1,442\n13,506\nNorth America\n\n\nFrench Guiana\n97\n+1\n \n\n69\n28\n2\n325\n\n\n\nSouth America\n\n\nMonaco\n94\n\n3 \n\n22\n69\n3\n2,395\n76\n\n\nEurope\n\n\nSudan\n92\n+26\n12 \n+2\n8\n72\n\n2\n0.3\n\n\nAfrica\n\n\nLiberia\n91\n\n8 \n\n7\n76\n\n18\n2\n\n\nAfrica\n\n\nBermuda\n86\n\n5 \n\n35\n46\n10\n1,381\n80\n638\n10,244\nNorth America\n\n\nTogo\n84\n\n5 \n\n52\n27\n\n10\n0.6\n4,295\n519\nAfrica\n\n\nLiechtenstein\n81\n\n1 \n\n55\n25\n\n2,124\n26\n900\n23,605\nEurope\n\n\nEquatorial Guinea\n79\n\n \n\n4\n75\n\n56\n\n854\n609\nAfrica\n\n\nBarbados\n75\n\n5 \n\n19\n51\n4\n261\n17\n1,035\n3,602\nNorth America\n\n\nSint Maarten\n67\n\n10 \n\n12\n45\n6\n1,563\n233\n163\n3,802\nNorth America\n\n\nGuyana\n65\n\n7 \n\n9\n49\n4\n83\n9\n322\n409\nSouth America\n\n\nZambia\n61\n\n3 \n\n33\n25\n1\n3\n0.2\n2,586\n141\nAfrica\n\n\nCabo Verde\n61\n\n1 \n\n1\n59\n\n110\n2\n\n\nAfrica\n\n\nCayman Islands\n61\n\n1 \n\n7\n53\n3\n928\n15\n702\n10,681\nNorth America\n\n\nBahamas\n60\n\n9 \n\n11\n40\n1\n153\n23\n\n\nNorth America\n\n\nFrench Polynesia\n55\n\n \n\n2\n53\n1\n196\n\n1,302\n4,635\nAustralia/Oceania\n\n\nUganda\n55\n\n \n\n28\n27\n\n1\n\n12,565\n275\nAfrica\n\n\nMaldives\n52\n\n \n\n16\n36\n\n96\n\n3,714\n6,871\nAsia\n\n\nLibya\n51\n\n1 \n\n11\n39\n\n7\n0.1\n725\n106\nAfrica\n\n\nGuinea-Bissau\n50\n\n \n\n3\n47\n\n25\n\n1,500\n762\nAfrica\n\n\nHaiti\n47\n\n3 \n\n\n44\n\n4\n0.3\n498\n44\nNorth America\n\n\nMacao\n45\n\n \n\n17\n28\n1\n69\n\n\n\nAsia\n\n\nSyria\n39\n\n3 \n\n5\n31\n\n2\n0.2\n\n\nAsia\n\n\nEritrea\n39\n\n \n\n3\n36\n\n11\n\n\n\nAfrica\n\n\nMozambique\n39\n\n \n\n8\n31\n\n1\n\n1,037\n33\nAfrica\n\n\nSaint Martin\n37\n\n2 \n\n19\n16\n5\n957\n52\n\n\nNorth America\n\n\nBenin\n35\n\n1 \n\n18\n16\n\n3\n0.08\n\n\nAfrica\n\n\nSierra Leone\n35\n\n \n\n6\n29\n\n4\n\n\n\nAfrica\n\n\nChad\n33\n\n \n\n8\n25\n\n2\n\n\n\nAfrica\n\n\nMongolia\n33\n+1\n \n\n7\n26\n\n10\n\n1,554\n474\nAsia\n\n\nNepal\n31\n\n \n\n4\n27\n\n1\n\n29,567\n1,015\nAsia\n\n\nZimbabwe\n25\n\n3 \n\n2\n20\n\n2\n0.2\n2,851\n192\nAfrica\n\n\nAngola\n24\n\n2 \n\n6\n16\n\n0.7\n0.06\n\n\nAfrica\n\n\nAntigua and Barbuda\n23\n\n3 \n\n3\n17\n1\n235\n31\n73\n745\nNorth America\n\n\nEswatini\n22\n\n1 \n\n8\n13\n\n19\n0.9\n714\n615\nAfrica\n\n\nTimor-Leste\n22\n+3\n \n\n1\n21\n\n17\n\n\n\nAsia\n\n\nBotswana\n20\n\n1 \n\n\n19\n\n9\n0.4\n4,432\n1,885\nAfrica\n\n\nLaos\n19\n\n \n\n2\n17\n\n3\n\n1,349\n185\nAsia\n\n\nBelize\n18\n\n2 \n\n2\n14\n1\n45\n5\n651\n1,637\nNorth America\n\n\nFiji\n18\n+1\n \n\n3\n15\n\n20\n\n\n\nAustralia/Oceania\n\n\nNew Caledonia\n18\n\n \n\n15\n3\n1\n63\n\n3,399\n11,906\nAustralia/Oceania\n\n\nMalawi\n17\n\n2 \n\n3\n12\n1\n0.9\n0.1\n429\n22\nAfrica\n\n\nDominica\n16\n\n \n\n8\n8\n\n222\n\n345\n4,793\nNorth America\n\n\nNamibia\n16\n\n \n\n6\n10\n\n6\n\n524\n206\nAfrica\n\n\nSaint Kitts and Nevis\n15\n+1\n \n\n\n15\n\n282\n\n247\n4,643\nNorth America\n\n\nSaint Lucia\n15\n\n \n\n11\n4\n\n82\n\n344\n1,873\nNorth America\n\n\nCuraçao\n14\n\n1 \n\n11\n2\n\n85\n6\n286\n1,743\nNorth America\n\n\nGrenada\n14\n\n \n\n6\n8\n4\n124\n\n92\n818\nNorth America\n\n\nCAR\n12\n\n \n\n4\n8\n\n2\n\n\n\nAfrica\n\n\nSt. Vincent Grenadines\n12\n\n \n\n1\n11\n\n108\n\n87\n784\nNorth America\n\n\nTurks and Caicos\n11\n\n1 \n\n\n10\n\n284\n26\n83\n2,144\nNorth America\n\n\nFalkland Islands\n11\n\n \n\n3\n8\n\n3,161\n\n319\n91,667\nSouth America\n\n\nGreenland\n11\n\n \n\n11\n0\n\n194\n\n1,043\n18,372\nNorth America\n\n\nMontserrat\n11\n\n \n\n2\n9\n1\n2,204\n\n36\n7,212\nNorth America\n\n\nSeychelles\n11\n\n \n\n5\n6\n\n112\n\n\n\nAfrica\n\n\nNicaragua\n10\n\n2 \n\n6\n2\n\n2\n0.3\n\n\nNorth America\n\n\nGambia\n10\n\n1 \n\n2\n7\n\n4\n0.4\n316\n131\nAfrica\n\n\nSuriname\n10\n\n1 \n\n6\n3\n\n17\n2\n\n\nSouth America\n\n\nMS Zaandam\n9\n\n2 \n\n\n7\n\n\n\n\n\n\n\n\nVatican City\n8\n\n \n\n2\n6\n\n9,988\n\n\n\nEurope\n\n\nMauritania\n7\n\n1 \n\n6\n0\n\n2\n0.2\n913\n196\nAfrica\n\n\nPapua New Guinea\n7\n\n \n\n\n7\n\n0.8\n\n167\n19\nAustralia/Oceania\n\n\nSt. Barth\n6\n\n \n\n5\n1\n\n607\n\n\n\nNorth America\n\n\nWestern Sahara\n6\n\n \n\n\n6\n\n10\n\n\n\nAfrica\n\n\nBritish Virgin Islands\n5\n+1\n1 \n\n3\n1\n\n165\n33\n\n\nNorth America\n\n\nBurundi\n5\n\n1 \n\n4\n0\n\n0.4\n0.08\n80\n7\nAfrica\n\n\nBhutan\n5\n\n \n\n2\n3\n\n6\n\n8,107\n10,507\nAsia\n\n\nCaribbean Netherlands\n5\n\n \n\n\n5\n\n191\n\n110\n4,195\nNorth America\n\n\nSao Tome and Principe\n4\n\n \n\n\n4\n\n18\n\n19\n87\nAfrica\n\n\nSouth Sudan\n4\n\n \n\n\n4\n\n0.4\n\n\n\nAfrica\n\n\nAnguilla\n3\n\n \n\n1\n2\n\n200\n\n\n\nNorth America\n\n\nSaint Pierre Miquelon\n1\n\n \n\n\n1\n\n173\n\n\n\nNorth America\n\n\nYemen\n1\n\n \n\n\n1\n\n0.03\n\n\n\nAsia\n\n\nChina\n82,747\n+12\n4,632 \n\n77,084\n1,031\n81\n57\n3\n\n\nAsia\n\n\nTotal:\n820,749\n+666\n43,369\n+32\n86,885\n690,495\n14,651\n\n\n\n\nNorth America\n\n\nTotal:\n1,097,423\n+8,167\n102,454\n+618\n316,674\n678,295\n25,383\n\n\n\n\nEurope\n\n\nTotal:\n386,077\n+2,920\n14,840\n+54\n181,254\n189,983\n6,403\n\n\n\n\nAsia\n\n\nTotal:\n82,310\n+58\n3,850\n+1\n36,339\n42,121\n7,533\n\n\n\n\nSouth America\n\n\nTotal:\n8,157\n+17\n83\n\n5,252\n2,822\n54\n\n\n\n\nAustralia/Oceania\n\n\nTotal:\n22,992\n+26\n1,128\n+3\n5,847\n16,017\n174\n\n\n\n\nAfrica\n\n\nTotal:\n721\n\n15\n\n644\n62\n7\n\n\n\n\n\n\n\nTotal:\n2,418,429\n+11,854\n165,739\n+708\n632,895\n1,619,795\n54,205\n310.3\n21.3\n\n\nAll\n\n"
],
[
"countrydatasaved =\"\"\nfor record in soup.findAll('tr'):\n countrydata=\"\"\n for data in record.findAll('td'):\n countrydata =countrydata+\",\"+data.text\n print(countrydata)\n #countrydatasaved = countrydata +\"\\n\"+countrydata[1:]\n \n#print(countrydatasaved)",
"\n,\nNorth America\n,820,749,+666,43,369,+32,86,885,690,495,14,651,,,,,North America\n,\nEurope\n,1,097,423,+8,167,102,454,+618,316,674,678,295,25,383,,,,,Europe\n,\nAsia\n,386,077,+2,920,14,840,+54,181,254,189,983,6,403,,,,,Asia\n,\nSouth America\n,82,310,+58,3,850,+1,36,339,42,121,7,533,,,,,South America\n,\nOceania\n,8,157,+17,83,,5,252,2,822,54,,,,,Australia/Oceania\n,\nAfrica\n,22,992,+26,1,128,+3,5,847,16,017,174,,,,,Africa\n,\n\n,721,,15,,644,62,7,,,,,\n,World,2,418,429,+11,854,165,739,+708,632,895,1,619,795,54,205,310,21.3,,,All\n,USA,764,265,,40,565 ,,71,012,652,688,13,566,2,309,123,3,861,596,11,666,North America\n,Spain,200,210,+1,536,20,852 ,+399,80,587,98,771,7,371,4,282,446,930,230,19,896,Europe\n,Italy,178,972,,23,660 ,,47,055,108,257,2,635,2,960,391,1,356,541,22,436,Europe\n,France,152,894,,19,718 ,,36,578,96,598,5,744,2,342,302,463,662,7,103,Europe\n,Germany,145,743,+1,4,642 ,,91,500,49,601,2,889,1,740,55,1,728,357,20,629,Europe\n,UK,120,067,,16,060 ,,N/A,103,663,1,559,1,769,237,482,063,7,101,Europe\n,Turkey,86,306,,2,017 ,,11,976,72,313,1,922,1,023,24,634,277,7,521,Asia\n,Iran,82,211,,5,118 ,,57,023,20,070,3,456,979,61,341,662,4,068,Asia\n,Russia,47,121,+4,268,405 ,+44,3,446,43,270,8,323,3,2,050,000,14,047,Europe\n,Belgium,39,983,+1,487,5,828 ,+145,8,895,25,260,1,071,3,450,503,153,778,13,269,Europe\n,Brazil,38,654,,2,462 ,,22,130,14,062,6,634,182,12,62,985,296,South America\n,Canada,35,056,,1,587 ,,11,843,21,626,557,929,42,549,349,14,555,North America\n,Netherlands,32,655,,3,684 ,,250,28,721,1,176,1,906,215,154,911,9,041,Europe\n,Switzerland,27,740,,1,393 ,,17,800,8,547,386,3,205,161,221,263,25,566,Europe\n,Portugal,20,206,,714 ,,610,18,882,224,1,982,70,235,878,23,133,Europe\n,India,17,615,,559 ,,2,854,14,202,,13,0.4,401,586,291,Asia\n,Peru,15,628,,400 ,,6,811,8,417,167,474,12,143,745,4,360,South America\n,Ireland,15,251,,610 ,,77,14,564,294,3,089,124,90,646,18,358,Europe\n,Austria,14,749,,452 ,,10,501,3,796,204,1,638,50,179,243,19,902,Europe\n,Sweden,14,385,,1,540 ,,550,12,295,450,1,424,152,74,600,7,387,Europe\n,Israel,13,654,+163,173 ,+1,3,872,9,609,150,1,577,20,187,250,21,634,Asia\n,Japan,10,797,,236 ,,1,159,9,402,217,85,2,112,816,892,Asia\n,S. Korea,10,674,+13,236 ,+2,8,114,2,324,55,208,5,563,035,10,982,Asia\n,Chile,10,088,,133 ,,4,338,5,617,360,528,7,113,649,5,945,South America\n,Ecuador,9,468,,474 ,,1,061,7,933,124,537,27,31,809,1,803,South America\n,Poland,9,453,+166,362 ,+2,1,133,7,958,160,250,10,204,246,5,397,Europe\n,Saudi Arabia,9,362,,97 ,,1,398,7,867,97,269,3,180,000,5,170,Asia\n,Romania,8,746,,451 ,,1,892,6,403,256,455,23,93,611,4,866,Europe\n,Pakistan,8,418,+70,176 ,+8,1,970,6,272,46,38,0.8,104,302,472,Asia\n,Mexico,8,261,+764,686 ,+36,2,627,4,948,207,64,5,49,570,384,North America\n,Singapore,8,014,+1,426,11 ,,768,7,235,22,1,370,2,94,796,16,203,Asia\n,Denmark,7,515,+131,355 ,,4,141,3,019,84,1,297,61,96,244,16,616,Europe\n,Norway,7,103,+25,165 ,,32,6,906,58,1,310,30,142,166,26,224,Europe\n,Czechia,6,787,+41,188 ,+2,1,311,5,288,84,634,18,172,123,16,073,Europe\n,UAE,6,781,,41 ,,1,286,5,454,1,686,4,767,000,77,550,Asia\n,Indonesia,6,760,+185,590 ,+8,747,5,423,,25,2,42,219,154,Asia\n,Australia,6,619,+7,71 ,,4,258,2,290,49,260,3,431,734,16,931,Australia/Oceania\n,Philippines,6,459,+200,428 ,+19,613,5,418,1,59,4,59,928,547,Asia\n,Serbia,6,318,,122 ,,753,5,443,120,723,14,38,701,4,429,Europe\n,Ukraine,5,710,+261,151 ,+10,359,5,200,45,131,3,58,093,1,328,Europe\n,Qatar,5,448,,8 ,,518,4,922,37,1,891,3,62,538,21,707,Asia\n,Malaysia,5,389,,89 ,,3,197,2,103,46,167,3,103,892,3,210,Asia\n,Belarus,4,779,,47 ,,494,4,238,65,506,5,98,231,10,396,Europe\n,Dominican Republic,4,680,,226 ,,363,4,091,114,431,21,15,583,1,436,North America\n,Panama,4,467,+194,126 ,+6,165,4,176,98,1,035,29,19,091,4,425,North America\n,Finland,3,868,+85,94 ,,1,700,2,074,68,698,17,58,727,10,599,Europe\n,Colombia,3,792,,179 ,,711,2,902,98,75,4,62,746,1,233,South America\n,Luxembourg,3,550,,73 ,,627,2,850,31,5,671,117,33,666,53,781,Europe\n,South Africa,3,158,,54 ,,903,2,201,36,53,0.9,114,711,1,934,Africa\n,Egypt,3,144,,239 ,,732,2,173,,31,2,55,000,537,Africa\n,Bangladesh,2,948,+492,101 ,+10,85,2,762,1,18,0.6,26,604,162,Asia\n,Argentina,2,941,,134 ,,709,2,098,123,65,3,32,712,724,South America\n,Morocco,2,855,,141 ,,327,2,387,1,77,4,15,123,410,Africa\n,Thailand,2,792,+27,47 ,,1,999,746,61,40,0.7,142,589,2,043,Asia\n,Algeria,2,629,,375 ,,1,047,1,207,40,60,9,6,500,148,Africa\n,Moldova,2,472,,67 ,,457,1,948,80,613,17,10,488,2,600,Europe\n,Greece,2,235,,113 ,,269,1,853,67,214,11,53,290,5,113,Europe\n,Hungary,1,984,+68,199 ,+10,267,1,518,60,205,21,48,057,4,975,Europe\n,Kuwait,1,915,,7 ,,305,1,603,38,448,2,,,Asia\n,Bahrain,1,881,,7 ,,762,1,112,2,1,105,4,86,732,50,972,Asia\n,Croatia,1,871,,47 ,,709,1,115,23,456,11,24,186,5,891,Europe\n,Iceland,1,771,,9 ,,1,291,471,3,5,190,26,41,091,120,416,Europe\n,Kazakhstan,1,757,+81,19 ,+2,417,1,321,22,94,1,117,949,6,282,Asia\n,Uzbekistan,1,565,,5 ,,225,1,335,8,47,0.1,136,000,4,063,Asia\n,Iraq,1,539,,82 ,,1,009,448,,38,2,56,147,1,396,Asia\n,Estonia,1,535,+7,40 ,,165,1,330,9,1,157,30,40,930,30,855,Europe\n,New Zealand,1,440,+9,12 ,,974,454,3,299,2,86,305,17,897,Australia/Oceania\n,Oman,1,410,+144,7 ,,238,1,165,3,276,1,,,Asia\n,Azerbaijan,1,398,,19 ,,712,667,21,138,2,95,747,9,443,Asia\n,Armenia,1,339,+48,22 ,+2,580,737,30,452,7,13,373,4,513,Asia\n,Slovenia,1,335,+5,77 ,+3,193,1,065,26,642,37,41,802,20,107,Europe\n,Lithuania,1,326,+28,37 ,+2,242,1,047,14,487,14,66,352,24,374,Europe\n,Bosnia and Herzegovina,1,285,,48 ,,347,890,4,392,15,17,818,5,431,Europe\n,North Macedonia,1,207,,51 ,,179,977,15,579,24,11,870,5,697,Europe\n,Slovakia,1,161,,12 ,,229,920,10,213,2,44,278,8,110,Europe\n,Ghana,1,042,,9 ,,99,934,4,34,0.3,68,591,2,207,Africa\n,Cuba,1,035,,34 ,,255,746,16,91,3,26,982,2,382,North America\n,Hong Kong,1,026,,4 ,,602,420,8,137,0.5,131,786,17,579,Asia\n,Cameroon,1,017,,42 ,,305,670,33,38,2,,,Africa\n,Afghanistan,996,,33 ,,131,832,7,26,0.8,6,422,165,Asia\n,Bulgaria,915,+21,43 ,+1,167,705,36,132,6,26,417,3,802,Europe\n,Tunisia,879,,38 ,,43,798,33,74,3,16,098,1,362,Africa\n,Ivory Coast,847,,9 ,,260,578,,32,0.3,,,Africa\n,Djibouti,846,,2 ,,102,742,,856,2,8,144,8,243,Africa\n,Cyprus,767,,12 ,,81,674,15,635,10,34,087,28,233,Asia\n,Latvia,739,+12,5 ,,88,646,5,392,3,36,668,19,440,Europe\n,Andorra,713,,36 ,,235,442,17,9,228,466,1,673,21,653,Europe\n,Diamond Princess,712,,13 ,,644,55,7,,,,,\n,Lebanon,673,,21 ,,102,550,27,99,3,20,929,3,066,Asia\n,Costa Rica,660,,5 ,,112,543,14,130,1.0,10,417,2,045,North America\n,Niger,648,,20 ,,117,511,,27,0.8,4,715,195,Africa\n,Nigeria,627,,21 ,,170,436,2,3,0.1,7,153,35,Africa\n,Albania,584,+22,26 ,,327,231,5,203,9,5,775,2,007,Europe\n,Guinea,579,,5 ,,87,487,,44,0.4,,,Africa\n,Burkina Faso,576,,36 ,,338,202,,28,2,,,Africa\n,Kyrgyzstan,568,+14,7 ,+2,201,360,5,87,1,26,147,4,008,Asia\n,Bolivia,564,+44,33 ,+1,31,500,3,48,3,3,569,306,South America\n,Uruguay,528,+11,10 ,,298,220,13,152,3,13,207,3,802,South America\n,Channel Islands,488,,21 ,,73,394,,2,807,121,3,320,19,095,Europe\n,Honduras,477,+5,46 ,,25,406,10,48,5,2,535,256,North America\n,San Marino,461,,39 ,,60,362,4,13,586,1,149,1,711,50,426,Europe\n,Palestine,449,+10,3 ,,71,375,,88,0.6,17,329,3,397,Asia\n,Malta,427,,3 ,,118,306,2,967,7,23,253,52,663,Europe\n,Taiwan,422,+2,6 ,,203,213,,18,0.3,53,632,2,252,Asia\n,Jordan,417,,7 ,,276,134,5,41,0.7,29,000,2,842,Asia\n,Réunion,408,, ,,237,171,4,456,,,,Africa\n,Georgia,399,+5,4 ,,93,302,6,100,1,5,027,1,260,Asia\n,Senegal,367,,4 ,+1,220,143,1,22,0.2,,,Africa\n,Mauritius,328,,9 ,,208,111,3,258,7,9,755,7,670,Africa\n,DRC,327,,25 ,,27,275,,4,0.3,,,Africa\n,Montenegro,311,+3,5 ,,55,251,7,495,8,3,874,6,168,Europe\n,Isle of Man,298,,6 ,,193,99,10,3,505,71,2,465,28,989,Europe\n,Sri Lanka,295,+24,7 ,,97,191,1,14,0.3,6,463,302,Asia\n,Guatemala,289,+32,7 ,,21,261,3,16,0.4,7,200,402,North America\n,Mayotte,271,,4 ,,117,150,5,993,15,1,850,6,781,Africa\n,Kenya,270,,14 ,,67,189,2,5,0.3,13,239,246,Africa\n,Vietnam,268,, ,,202,66,8,3,,206,253,2,119,Asia\n,Venezuela,256,,9 ,,117,130,4,9,0.3,336,169,11,822,South America\n,Mali,224,,14 ,,42,168,,11,0.7,,,Africa\n,El Salvador,218,+17,7 ,,46,165,2,34,1,12,210,1,882,North America\n,Paraguay,208,+2,8 ,,46,154,1,29,1,4,950,694,South America\n,Jamaica,196,+23,5 ,,27,164,,66,2,1,809,611,North America\n,Faeroe Islands,185,, ,,176,9,,3,786,,6,021,123,222,Europe\n,Tanzania,170,,7 ,,11,152,4,3,0.1,,,Africa\n,Somalia,164,,7 ,,3,154,2,10,0.4,,,Africa\n,Martinique,163,,12 ,,73,78,11,434,32,,,North America\n,Guadeloupe,148,,8 ,,73,67,13,370,20,,,North America\n,Rwanda,147,, ,,76,71,,11,,6,959,537,Africa\n,Congo,143,,6 ,,11,126,,26,1,,,Africa\n,Brunei ,138,,1 ,,116,21,2,315,2,11,633,26,591,Asia\n,Gibraltar,132,, ,,120,12,1,3,918,,1,912,56,751,Europe\n,Cambodia,122,, ,,107,15,1,7,,5,768,345,Asia\n,Madagascar,121,, ,,39,82,1,4,,2,357,85,Africa\n,Trinidad and Tobago,114,,8 ,,21,85,,81,6,1,335,954,North America\n,Myanmar,111,,5 ,,7,99,,2,0.09,4,786,88,Asia\n,Gabon,109,,1 ,,7,101,,49,0.4,572,257,Africa\n,Ethiopia,108,,3 ,,16,89,1,0.9,0.03,7,557,66,Africa\n,Aruba,97,,2 ,,49,46,4,909,19,1,442,13,506,North America\n,French Guiana,97,+1, ,,69,28,2,325,,,,South America\n,Monaco,94,,3 ,,22,69,3,2,395,76,,,Europe\n,Sudan,92,+26,12 ,+2,8,72,,2,0.3,,,Africa\n,Liberia,91,,8 ,,7,76,,18,2,,,Africa\n,Bermuda,86,,5 ,,35,46,10,1,381,80,638,10,244,North America\n,Togo,84,,5 ,,52,27,,10,0.6,4,295,519,Africa\n,Liechtenstein,81,,1 ,,55,25,,2,124,26,900,23,605,Europe\n,Equatorial Guinea,79,, ,,4,75,,56,,854,609,Africa\n,Barbados,75,,5 ,,19,51,4,261,17,1,035,3,602,North America\n,Sint Maarten,67,,10 ,,12,45,6,1,563,233,163,3,802,North America\n,Guyana,65,,7 ,,9,49,4,83,9,322,409,South America\n,Zambia,61,,3 ,,33,25,1,3,0.2,2,586,141,Africa\n,Cabo Verde,61,,1 ,,1,59,,110,2,,,Africa\n,Cayman Islands,61,,1 ,,7,53,3,928,15,702,10,681,North America\n,Bahamas,60,,9 ,,11,40,1,153,23,,,North America\n,French Polynesia,55,, ,,2,53,1,196,,1,302,4,635,Australia/Oceania\n,Uganda,55,, ,,28,27,,1,,12,565,275,Africa\n,Maldives,52,, ,,16,36,,96,,3,714,6,871,Asia\n,Libya,51,,1 ,,11,39,,7,0.1,725,106,Africa\n,Guinea-Bissau,50,, ,,3,47,,25,,1,500,762,Africa\n,Haiti,47,,3 ,,,44,,4,0.3,498,44,North America\n,Macao,45,, ,,17,28,1,69,,,,Asia\n,Syria,39,,3 ,,5,31,,2,0.2,,,Asia\n,Eritrea,39,, ,,3,36,,11,,,,Africa\n,Mozambique,39,, ,,8,31,,1,,1,037,33,Africa\n,Saint Martin,37,,2 ,,19,16,5,957,52,,,North America\n,Benin,35,,1 ,,18,16,,3,0.08,,,Africa\n,Sierra Leone,35,, ,,6,29,,4,,,,Africa\n,Chad,33,, ,,8,25,,2,,,,Africa\n,Mongolia,33,+1, ,,7,26,,10,,1,554,474,Asia\n,Nepal,31,, ,,4,27,,1,,29,567,1,015,Asia\n,Zimbabwe,25,,3 ,,2,20,,2,0.2,2,851,192,Africa\n,Angola,24,,2 ,,6,16,,0.7,0.06,,,Africa\n,Antigua and Barbuda,23,,3 ,,3,17,1,235,31,73,745,North America\n,Eswatini,22,,1 ,,8,13,,19,0.9,714,615,Africa\n,Timor-Leste,22,+3, ,,1,21,,17,,,,Asia\n,Botswana,20,,1 ,,,19,,9,0.4,4,432,1,885,Africa\n,Laos,19,, ,,2,17,,3,,1,349,185,Asia\n,Belize,18,,2 ,,2,14,1,45,5,651,1,637,North America\n,Fiji,18,+1, ,,3,15,,20,,,,Australia/Oceania\n,New Caledonia,18,, ,,15,3,1,63,,3,399,11,906,Australia/Oceania\n,Malawi,17,,2 ,,3,12,1,0.9,0.1,429,22,Africa\n,Dominica,16,, ,,8,8,,222,,345,4,793,North America\n,Namibia,16,, ,,6,10,,6,,524,206,Africa\n,Saint Kitts and Nevis,15,+1, ,,,15,,282,,247,4,643,North America\n,Saint Lucia,15,, ,,11,4,,82,,344,1,873,North America\n,Curaçao,14,,1 ,,11,2,,85,6,286,1,743,North America\n,Grenada,14,, ,,6,8,4,124,,92,818,North America\n,CAR,12,, ,,4,8,,2,,,,Africa\n,St. Vincent Grenadines,12,, ,,1,11,,108,,87,784,North America\n,Turks and Caicos,11,,1 ,,,10,,284,26,83,2,144,North America\n,Falkland Islands,11,, ,,3,8,,3,161,,319,91,667,South America\n,Greenland,11,, ,,11,0,,194,,1,043,18,372,North America\n,Montserrat,11,, ,,2,9,1,2,204,,36,7,212,North America\n,Seychelles,11,, ,,5,6,,112,,,,Africa\n,Nicaragua,10,,2 ,,6,2,,2,0.3,,,North America\n,Gambia,10,,1 ,,2,7,,4,0.4,316,131,Africa\n,Suriname,10,,1 ,,6,3,,17,2,,,South America\n,MS Zaandam,9,,2 ,,,7,,,,,,\n,Vatican City,8,, ,,2,6,,9,988,,,,Europe\n,Mauritania,7,,1 ,,6,0,,2,0.2,913,196,Africa\n,Papua New Guinea,7,, ,,,7,,0.8,,167,19,Australia/Oceania\n,St. Barth,6,, ,,5,1,,607,,,,North America\n,Western Sahara,6,, ,,,6,,10,,,,Africa\n,British Virgin Islands,5,+1,1 ,,3,1,,165,33,,,North America\n,Burundi,5,,1 ,,4,0,,0.4,0.08,80,7,Africa\n,Bhutan,5,, ,,2,3,,6,,8,107,10,507,Asia\n,Caribbean Netherlands,5,, ,,,5,,191,,110,4,195,North America\n,Sao Tome and Principe,4,, ,,,4,,18,,19,87,Africa\n,South Sudan,4,, ,,,4,,0.4,,,,Africa\n,Anguilla,3,, ,,1,2,,200,,,,North America\n,Saint Pierre Miquelon,1,, ,,,1,,173,,,,North America\n,Yemen,1,, ,,,1,,0.03,,,,Asia\n,China,82,747,+12,4,632 ,,77,084,1,031,81,57,3,,,Asia\n,Total:,820,749,+666,43,369,+32,86,885,690,495,14,651,,,,,North America\n,Total:,1,097,423,+8,167,102,454,+618,316,674,678,295,25,383,,,,,Europe\n,Total:,386,077,+2,920,14,840,+54,181,254,189,983,6,403,,,,,Asia\n,Total:,82,310,+58,3,850,+1,36,339,42,121,7,533,,,,,South America\n,Total:,8,157,+17,83,,5,252,2,822,54,,,,,Australia/Oceania\n,Total:,22,992,+26,1,128,+3,5,847,16,017,174,,,,,Africa\n,Total:,721,,15,,644,62,7,,,,,\n,Total:,2,418,429,+11,854,165,739,+708,632,895,1,619,795,54,205,310.3,21.3,,,All\n\n,\nAsia\n,383,157,+12,373,14,786,+385,180,611,187,760,6,403,,,,,Asia\n,\nNorth America\n,820,083,+28,674,43,337,+1,800,87,020,689,726,14,644,,,,,North America\n,\nEurope\n,1,089,256,+29,218,101,836,+2,558,309,495,677,925,25,395,,,,,Europe\n,\nSouth America\n,82,252,+4,279,3,849,+196,36,329,42,074,7,534,,,,,South America\n,\nOceania\n,8,140,+35,83,+2,5,159,2,898,68,,,,,Australia/Oceania\n,\nAfrica\n,22,966,+1,225,1,125,+43,5,840,16,001,174,,,,,Africa\n,\n\n,721,,15,,644,62,7,,,,,\n,World,2,406,575,+75,804,165,031,+4,984,625,098,1,616,446,54,225,309,21.2,,,All\n,China,82,735,+16,4,632 ,,77,062,1,041,85,57,3,,,Asia\n,USA,764,636,+25,844,40,575 ,+1,561,71,187,652,874,13,566,2,310,123,3,861,549,11,666,North America\n,Spain,198,674,+4,258,20,453 ,+410,77,357,100,864,7,371,4,249,437,930,230,19,896,Europe\n,Italy,178,972,+3,047,23,660 ,+433,47,055,108,257,2,635,2,960,391,1,356,541,22,436,Europe\n,France,152,894,+1,101,19,718 ,+395,36,578,96,598,5,744,2,342,302,463,662,7,103,Europe\n,Germany,145,742,+2,018,4,642 ,+104,88,000,53,100,2,889,1,739,55,1,728,357,20,629,Europe\n,UK,120,067,+5,850,16,060 ,+596,N/A,103,663,1,559,1,769,237,482,063,7,101,Europe\n,Turkey,86,306,+3,977,2,017 ,+127,11,976,72,313,1,922,1,023,24,634,277,7,521,Asia\n,Iran,82,211,+1,343,5,118 ,+87,57,023,20,070,3,456,979,61,341,662,4,068,Asia\n,Russia,42,853,+6,060,361 ,+48,3,291,39,201,8,294,2,1,940,000,13,294,Europe\n,Brazil,38,654,+1,932,2,462 ,+101,22,130,14,062,6,634,182,12,62,985,296,South America\n,Belgium,38,496,+1,313,5,683 ,+230,8,757,24,056,1,081,3,322,490,153,778,13,269,Europe\n,Canada,35,056,+1,673,1,587 ,+117,11,843,21,626,557,929,42,536,062,14,203,North America\n,Netherlands,32,655,+1,066,3,684 ,+83,250,28,721,1,176,1,906,215,154,911,9,041,Europe\n,Switzerland,27,740,+336,1,393 ,+25,17,800,8,547,386,3,205,161,221,263,25,566,Europe\n,Portugal,20,206,+521,714 ,+27,610,18,882,224,1,982,70,235,878,23,133,Europe\n,India,17,615,+1,250,559 ,+38,2,854,14,202,,13,0.4,401,586,291,Asia\n,Peru,15,628,+1,208,400 ,+52,6,811,8,417,167,474,12,143,745,4,360,South America\n,Ireland,15,251,+493,610 ,+39,77,14,564,294,3,089,124,90,646,18,358,Europe\n,Austria,14,749,+78,452 ,+9,10,501,3,796,204,1,638,50,179,243,19,902,Europe\n,Sweden,14,385,+563,1,540 ,+29,550,12,295,450,1,424,152,74,600,7,387,Europe\n,Israel,13,491,+226,172 ,+8,3,754,9,565,146,1,559,20,187,250,21,634,Asia\n,Japan,10,797,+501,236 ,+14,1,159,9,402,217,85,2,112,816,892,Asia\n,S. Korea,10,661,+8,234 ,+2,8,042,2,385,55,208,5,559,109,10,905,Asia\n,Chile,10,088,+358,133 ,+7,4,338,5,617,360,528,7,113,649,5,945,South America\n,Ecuador,9,468,+446,474 ,+18,1,061,7,933,124,537,27,31,809,1,803,South America\n,Saudi Arabia,9,362,+1,088,97 ,+5,1,398,7,867,97,269,3,180,000,5,170,Asia\n,Poland,9,287,+545,360 ,+13,1,040,7,887,160,245,10,204,246,5,397,Europe\n,Romania,8,746,+328,451 ,+30,1,892,6,403,256,455,23,93,611,4,866,Europe\n,Pakistan,8,348,+710,168 ,+25,1,868,6,312,46,38,0.8,98,522,446,Asia\n,Mexico,7,497,+622,650 ,+104,2,627,4,220,207,58,5,49,167,381,North America\n,Denmark,7,384,+142,355 ,+9,4,141,2,888,84,1,275,61,94,277,16,277,Europe\n,Norway,7,078,+42,165 ,+1,32,6,881,58,1,306,30,142,166,26,224,Europe\n,UAE,6,781,+479,41 ,+4,1,286,5,454,1,686,4,767,000,77,550,Asia\n,Czechia,6,746,+140,186 ,+5,1,298,5,262,84,630,17,168,096,15,697,Europe\n,Australia,6,612,+26,71 ,+1,4,230,2,311,48,259,3,420,996,16,510,Australia/Oceania\n,Singapore,6,588,+596,11 ,,768,5,809,22,1,126,2,94,796,16,203,Asia\n,Indonesia,6,575,+327,582 ,+47,686,5,307,,24,2,42,219,154,Asia\n,Serbia,6,318,+324,122 ,+5,753,5,443,120,723,14,38,701,4,429,Europe\n,Philippines,6,259,+172,409 ,+12,572,5,278,1,57,4,59,928,547,Asia\n,Ukraine,5,449,+343,141 ,+8,347,4,961,45,125,3,57,111,1,306,Europe\n,Qatar,5,448,+440,8 ,,518,4,922,37,1,891,3,62,538,21,707,Asia\n,Malaysia,5,389,+84,89 ,+1,3,197,2,103,46,167,3,103,892,3,210,Asia\n,Belarus,4,779,,47 ,+2,494,4,238,65,506,5,98,231,10,396,Europe\n,Dominican Republic,4,680,+345,226 ,+9,363,4,091,114,431,21,15,583,1,436,North America\n,Panama,4,273,+63,120 ,+4,140,4,013,96,990,28,19,091,4,425,North America\n,Colombia,3,792,+171,179 ,+13,711,2,902,98,75,4,62,746,1,233,South America\n,Finland,3,783,+102,94 ,+4,1,700,1,989,68,683,17,57,300,10,342,Europe\n,Luxembourg,3,550,+13,73 ,+1,627,2,850,31,5,671,117,33,666,53,781,Europe\n,South Africa,3,158,+124,54 ,+2,903,2,201,36,53,0.9,114,711,1,934,Africa\n,Egypt,3,144,+112,239 ,+15,732,2,173,,31,2,55,000,537,Africa\n,Argentina,2,941,+102,134 ,+2,709,2,098,123,65,3,32,712,724,South America\n,Morocco,2,855,+170,141 ,+4,327,2,387,1,77,4,15,123,410,Africa\n,Thailand,2,765,+32,47 ,,1,928,790,61,40,0.7,100,498,1,440,Asia\n,Algeria,2,629,+95,375 ,+8,1,047,1,207,40,60,9,6,500,148,Africa\n,Moldova,2,472,+121,67 ,+10,457,1,948,80,613,17,10,488,2,600,Europe\n,Bangladesh,2,456,+312,91 ,+7,75,2,290,1,15,0.6,23,941,145,Asia\n,Greece,2,235,,113 ,+3,269,1,853,67,214,11,53,290,5,113,Europe\n,Hungary,1,916,+82,189 ,+17,250,1,477,61,198,20,46,353,4,798,Europe\n,Kuwait,1,915,+164,7 ,+1,305,1,603,38,448,2,,,Asia\n,Bahrain,1,881,+108,7 ,,759,1,115,2,1,105,4,85,126,50,028,Asia\n,Croatia,1,871,+39,47 ,+8,709,1,115,23,456,11,24,186,5,891,Europe\n,Iceland,1,771,+11,9 ,,1,291,471,3,5,190,26,41,091,120,416,Europe\n,Kazakhstan,1,676,+61,17 ,,400,1,259,22,89,0.9,100,878,5,373,Asia\n,Uzbekistan,1,565,+75,5 ,,225,1,335,8,47,0.1,136,000,4,063,Asia\n,Iraq,1,539,+26,82 ,,1,009,448,,38,2,56,147,1,396,Asia\n,Estonia,1,528,+16,40 ,+2,164,1,324,10,1,152,30,40,333,30,405,Europe\n,New Zealand,1,431,+9,12 ,+1,912,507,18,297,2,83,224,17,258,Australia/Oceania\n,Azerbaijan,1,398,+25,19 ,+1,712,667,21,138,2,95,747,9,443,Asia\n,Slovenia,1,330,+13,74 ,+4,192,1,064,26,640,36,41,265,19,849,Europe\n,Lithuania,1,298,+59,35 ,+2,242,1,021,14,477,13,64,035,23,522,Europe\n,Armenia,1,291,+43,20 ,,545,726,30,436,7,12,680,4,279,Asia\n,Bosnia and Herzegovina,1,285,+17,48 ,+1,347,890,4,392,15,17,818,5,431,Europe\n,Oman,1,266,+86,7 ,+1,233,1,026,3,248,1,,,Asia\n,North Macedonia,1,207,+37,51 ,+2,179,977,15,579,24,11,870,5,697,Europe\n,Slovakia,1,161,+72,12 ,+1,229,920,10,213,2,44,278,8,110,Europe\n,Ghana,1,042,+208,9 ,,99,934,4,34,0.3,68,591,2,207,Africa\n,Cuba,1,035,+49,34 ,+2,255,746,16,91,3,26,982,2,382,North America\n,Hong Kong,1,026,+2,4 ,,602,420,8,137,0.5,131,786,17,579,Asia\n,Cameroon,1,017,,42 ,,305,670,33,38,2,,,Africa\n,Afghanistan,996,+63,33 ,+3,131,832,7,26,0.8,6,422,165,Asia\n,Bulgaria,894,+16,42 ,+1,161,691,36,129,6,26,417,3,802,Europe\n,Tunisia,879,+13,38 ,+1,43,798,33,74,3,16,098,1,362,Africa\n,Ivory Coast,847,+46,9 ,+1,260,578,,32,0.3,,,Africa\n,Djibouti,846,+114,2 ,,102,742,,856,2,8,144,8,243,Africa\n,Cyprus,767,+6,12 ,,81,674,15,635,10,34,087,28,233,Asia\n,Latvia,727,+15,5 ,,88,634,5,385,3,35,881,19,023,Europe\n,Andorra,713,+9,36 ,+1,235,442,17,9,228,466,1,673,21,653,Europe\n,Diamond Princess,712,,13 ,,644,55,7,,,,,\n,Lebanon,673,+1,21 ,,102,550,27,99,3,20,929,3,066,Asia\n,Costa Rica,660,+5,5 ,+1,112,543,10,130,1.0,10,417,2,045,North America\n,Niger,648,+9,20 ,+1,117,511,,27,0.8,4,715,195,Africa\n,Nigeria,627,+85,21 ,+2,170,436,2,3,0.1,7,153,35,Africa\n,Guinea,579,+61,5 ,+2,87,487,,44,0.4,,,Africa\n,Burkina Faso,576,+11,36 ,,338,202,,28,2,,,Africa\n,Albania,562,+14,26 ,,314,222,5,195,9,5,542,1,926,Europe\n,Kyrgyzstan,554,+48,5 ,,133,416,5,85,0.8,26,147,4,008,Asia\n,Bolivia,520,+27,32 ,+1,31,457,3,45,3,3,569,306,South America\n,Uruguay,517,,10 ,+1,298,209,14,149,3,13,207,3,802,South America\n,Channel Islands,488,+4,21 ,,73,394,,2,807,121,3,320,19,095,Europe\n,Honduras,472,+15,46 ,,15,411,10,48,5,2,535,256,North America\n,San Marino,461,+6,39 ,,60,362,4,13,586,1,149,1,711,50,426,Europe\n,Palestine,439,+21,3 ,+1,71,365,,86,0.6,17,329,3,397,Asia\n,Malta,427,+1,3 ,,118,306,2,967,7,23,253,52,663,Europe\n,Taiwan,420,+22,6 ,,189,225,,18,0.3,53,005,2,226,Asia\n,Jordan,417,+4,7 ,,276,134,5,41,0.7,29,000,2,842,Asia\n,Réunion,408,+1, ,,237,171,4,456,,,,Africa\n,Georgia,394,+6,4 ,,93,297,6,99,1,5,027,1,260,Asia\n,Senegal,367,+17,3 ,,220,144,1,22,0.2,,,Africa\n,Mauritius,328,+3,9 ,,208,111,3,258,7,9,755,7,670,Africa\n,DRC,327,+20,25 ,,26,276,,4,0.3,,,Africa\n,Montenegro,308,+1,5 ,,55,248,7,490,8,3,874,6,168,Europe\n,Isle of Man,298,+1,6 ,,193,99,10,3,505,71,2,465,28,989,Europe\n,Sri Lanka,271,+17,7 ,,96,168,1,13,0.3,6,463,302,Asia\n,Mayotte,271,+17,4 ,,117,150,5,993,15,1,850,6,781,Africa\n,Kenya,270,+8,14 ,+2,67,189,2,5,0.3,13,239,246,Africa\n,Vietnam,268,, ,,202,66,8,3,,206,253,2,119,Asia\n,Guatemala,257,+22,7 ,,21,229,3,14,0.4,7,200,402,North America\n,Venezuela,256,+29,9 ,,117,130,4,9,0.3,299,714,10,540,South America\n,Mali,224,+8,14 ,+1,42,168,,11,0.7,,,Africa\n,Paraguay,206,+4,8 ,,41,157,1,29,1,4,950,694,South America\n,El Salvador,201,+11,7 ,,44,150,1,31,1,11,160,1,721,North America\n,Faeroe Islands,185,+1, ,,176,9,,3,786,,6,021,123,222,Europe\n,Jamaica,173,+10,5 ,,27,141,,58,2,1,605,542,North America\n,Tanzania,170,+23,7 ,+2,11,152,4,3,0.1,,,Africa\n,Somalia,164,+29,7 ,,3,154,2,10,0.4,,,Africa\n,Martinique,163,,12 ,,73,78,11,434,32,,,North America\n,Guadeloupe,148,,8 ,,73,67,13,370,20,,,North America\n,Rwanda,147,+3, ,,76,71,,11,,6,959,537,Africa\n,Congo,143,,6 ,,11,126,,26,1,,,Africa\n,Brunei ,138,+1,1 ,,115,22,2,315,2,11,472,26,223,Asia\n,Gibraltar,132,, ,,120,12,1,3,918,,1,912,56,751,Europe\n,Cambodia,122,, ,,105,17,1,7,,5,768,345,Asia\n,Madagascar,121,+1, ,,39,82,1,4,,2,357,85,Africa\n,Trinidad and Tobago,114,,8 ,,21,85,,81,6,1,335,954,North America\n,Myanmar,111,+13,5 ,,7,99,,2,0.09,4,605,85,Asia\n,Gabon,109,+1,1 ,,7,101,,49,0.4,572,257,Africa\n,Ethiopia,108,+3,3 ,,16,89,1,0.9,0.03,7,557,66,Africa\n,Aruba,97,+1,2 ,,49,46,4,909,19,1,442,13,506,North America\n,French Guiana,96,, ,,64,32,2,321,,,,South America\n,Monaco,94,,3 ,,22,69,3,2,395,76,,,Europe\n,Liberia,91,+15,8 ,+1,7,76,,18,2,,,Africa\n,Bermuda,86,+3,5 ,,35,46,10,1,381,80,638,10,244,North America\n,Togo,84,,5 ,,52,27,,10,0.6,4,295,519,Africa\n,Liechtenstein,81,,1 ,,55,25,,2,124,26,900,23,605,Europe\n,Equatorial Guinea,79,, ,,4,75,,56,,854,609,Africa\n,Barbados,75,,5 ,,17,53,4,261,17,1,000,3,480,North America\n,Sint Maarten,67,+3,10 ,+1,12,45,6,1,563,233,163,3,802,North America\n,Sudan,66,,10 ,,6,50,,2,0.2,,,Africa\n,Guyana,65,+2,7 ,+1,9,49,4,83,9,322,409,South America\n,Zambia,61,+4,3 ,+1,33,25,1,3,0.2,2,586,141,Africa\n,Cabo Verde,61,+3,1 ,,1,59,,110,2,,,Africa\n,Cayman Islands,61,,1 ,,7,53,3,928,15,702,10,681,North America\n,Bahamas,60,+2,9 ,,11,40,1,153,23,,,North America\n,French Polynesia,55,, ,,2,53,1,196,,1,302,4,635,Australia/Oceania\n,Uganda,55,, ,,28,27,,1,,11,101,243,Africa\n,Maldives,52,+17, ,,16,36,,96,,3,714,6,871,Asia\n,Libya,51,+2,1 ,,11,39,,7,0.1,725,106,Africa\n,Guinea-Bissau,50,+4, ,,3,47,,25,,1,500,762,Africa\n,Haiti,47,+3,3 ,,,44,,4,0.3,498,44,North America\n,Macao,45,, ,,17,28,1,69,,,,Asia\n,Syria,39,+1,3 ,+1,5,31,,2,0.2,,,Asia\n,Eritrea,39,, ,,3,36,,11,,,,Africa\n,Mozambique,39,+4, ,,8,31,,1,,1,037,33,Africa\n,Saint Martin,37,,2 ,,19,16,5,957,52,,,North America\n,Benin,35,,1 ,,18,16,,3,0.08,,,Africa\n,Sierra Leone,35,+5, ,,6,29,,4,,,,Africa\n,Chad,33,, ,,8,25,,2,,,,Africa\n,Mongolia,32,+1, ,,7,25,,10,,1,554,474,Asia\n,Nepal,31,, ,,4,27,,1,,29,567,1,015,Asia\n,Zimbabwe,25,,3 ,,2,20,,2,0.2,2,851,192,Africa\n,Angola,24,,2 ,,6,16,,0.7,0.06,,,Africa\n,Antigua and Barbuda,23,,3 ,,3,17,1,235,31,73,745,North America\n,Eswatini,22,,1 ,,8,13,,19,0.9,714,615,Africa\n,Botswana,20,+5,1 ,,,19,,9,0.4,4,432,1,885,Africa\n,Laos,19,, ,,2,17,,3,,1,333,183,Asia\n,Timor-Leste,19,+1, ,,1,18,,14,,,,Asia\n,Belize,18,,2 ,,2,14,1,45,5,651,1,637,North America\n,New Caledonia,18,, ,,15,3,1,63,,3,399,11,906,Australia/Oceania\n,Malawi,17,,2 ,,3,12,1,0.9,0.1,429,22,Africa\n,Fiji,17,, ,,,17,,19,,,,Australia/Oceania\n,Dominica,16,, ,,8,8,,222,,345,4,793,North America\n,Namibia,16,, ,,6,10,,6,,524,206,Africa\n,Saint Lucia,15,, ,,11,4,,82,,344,1,873,North America\n,Curaçao,14,,1 ,,11,2,,85,6,286,1,743,North America\n,Grenada,14,, ,,6,8,4,124,,92,818,North America\n,Saint Kitts and Nevis,14,, ,,,14,,263,,234,4,399,North America\n,CAR,12,, ,,4,8,,2,,,,Africa\n,St. Vincent Grenadines,12,, ,,1,11,,108,,87,784,North America\n,Turks and Caicos,11,,1 ,,,10,,284,26,83,2,144,North America\n,Falkland Islands,11,, ,,3,8,,3,161,,319,91,667,South America\n,Greenland,11,, ,,11,0,,194,,1,043,18,372,North America\n,Montserrat,11,, ,,2,9,1,2,204,,36,7,212,North America\n,Seychelles,11,, ,,5,6,,112,,,,Africa\n,Nicaragua,10,+1,2 ,,6,2,,2,0.3,,,North America\n,Gambia,10,+1,1 ,,2,7,,4,0.4,316,131,Africa\n,Suriname,10,,1 ,,6,3,,17,2,,,South America\n,MS Zaandam,9,,2 ,,,7,,,,,,\n,Vatican City,8,, ,,2,6,,9,988,,,,Europe\n,Mauritania,7,,1 ,,6,0,,2,0.2,913,196,Africa\n,Papua New Guinea,7,, ,,,7,,0.8,,167,19,Australia/Oceania\n,St. Barth,6,, ,,5,1,,607,,,,North America\n,Western Sahara,6,, ,,,6,,10,,,,Africa\n,Burundi,5,,1 ,,,4,,0.4,0.08,80,7,Africa\n,Bhutan,5,, ,,2,3,,6,,8,107,10,507,Asia\n,Caribbean Netherlands,5,+2, ,,,5,,191,,110,4,195,North America\n,British Virgin Islands,4,,1 ,+1,2,1,,132,33,,,North America\n,Sao Tome and Principe,4,, ,,,4,,18,,19,87,Africa\n,South Sudan,4,, ,,,4,,0.4,,,,Africa\n,Anguilla,3,, ,,1,2,,200,,,,North America\n,Saint Pierre Miquelon,1,, ,,,1,,173,,,,North America\n,Yemen,1,, ,,,1,,0.03,,,,Asia\n,Total:,383,157,+12,373,14,786,+385,180,611,187,760,6,403,,,,,Asia\n,Total:,820,083,+28,674,43,337,+1,800,87,020,689,726,14,644,,,,,North America\n,Total:,1,089,256,+29,218,101,836,+2,558,309,495,677,925,25,395,,,,,Europe\n,Total:,82,252,+4,279,3,849,+196,36,329,42,074,7,534,,,,,South America\n,Total:,8,140,+35,83,+2,5,159,2,898,68,,,,,Australia/Oceania\n,Total:,22,966,+1,225,1,125,+43,5,840,16,001,174,,,,,Africa\n,Total:,721,,15,,644,62,7,,,,,\n,Total:,2,406,575,+75,804,165,031,+4,984,625,098,1,616,446,54,225,308.7,21.2,,,All\n"
],
[
"import os\ncountrydatasaved =\"\"\nfor record in soup.findAll('tr'):\n countrydata=\"\"\n for data in record.findAll('td'):\n countrydata =countrydata+\"|\"+data.text\n countrydatasaved = countrydatasaved +\"\\n\"+countrydata[1:]\n \n \nfile = open(os.path.expanduser(\"country.csv\"),\"wb\")\nfile.write(bytes(countrydatasaved,encoding=\"ascii\",errors='ignore'))\nprint(countrydatasaved)",
"\n\n\nNorth America\n|820,749|+666|43,369|+32|86,885|690,495|14,651|||||North America\n\nEurope\n|1,097,423|+8,167|102,454|+618|316,674|678,295|25,383|||||Europe\n\nAsia\n|386,077|+2,920|14,840|+54|181,254|189,983|6,403|||||Asia\n\nSouth America\n|82,310|+58|3,850|+1|36,339|42,121|7,533|||||South America\n\nOceania\n|8,157|+17|83||5,252|2,822|54|||||Australia/Oceania\n\nAfrica\n|22,992|+26|1,128|+3|5,847|16,017|174|||||Africa\n\n\n|721||15||644|62|7|||||\nWorld|2,418,429|+11,854|165,739|+708|632,895|1,619,795|54,205|310|21.3|||All\nUSA|764,265||40,565 ||71,012|652,688|13,566|2,309|123|3,861,596|11,666|North America\nSpain|200,210|+1,536|20,852 |+399|80,587|98,771|7,371|4,282|446|930,230|19,896|Europe\nItaly|178,972||23,660 ||47,055|108,257|2,635|2,960|391|1,356,541|22,436|Europe\nFrance|152,894||19,718 ||36,578|96,598|5,744|2,342|302|463,662|7,103|Europe\nGermany|145,743|+1|4,642 ||91,500|49,601|2,889|1,740|55|1,728,357|20,629|Europe\nUK|120,067||16,060 ||N/A|103,663|1,559|1,769|237|482,063|7,101|Europe\nTurkey|86,306||2,017 ||11,976|72,313|1,922|1,023|24|634,277|7,521|Asia\nIran|82,211||5,118 ||57,023|20,070|3,456|979|61|341,662|4,068|Asia\nRussia|47,121|+4,268|405 |+44|3,446|43,270|8|323|3|2,050,000|14,047|Europe\nBelgium|39,983|+1,487|5,828 |+145|8,895|25,260|1,071|3,450|503|153,778|13,269|Europe\nBrazil|38,654||2,462 ||22,130|14,062|6,634|182|12|62,985|296|South America\nCanada|35,056||1,587 ||11,843|21,626|557|929|42|549,349|14,555|North America\nNetherlands|32,655||3,684 ||250|28,721|1,176|1,906|215|154,911|9,041|Europe\nSwitzerland|27,740||1,393 ||17,800|8,547|386|3,205|161|221,263|25,566|Europe\nPortugal|20,206||714 ||610|18,882|224|1,982|70|235,878|23,133|Europe\nIndia|17,615||559 ||2,854|14,202||13|0.4|401,586|291|Asia\nPeru|15,628||400 ||6,811|8,417|167|474|12|143,745|4,360|South America\nIreland|15,251||610 ||77|14,564|294|3,089|124|90,646|18,358|Europe\nAustria|14,749||452 ||10,501|3,796|204|1,638|50|179,243|19,902|Europe\nSweden|14,385||1,540 ||550|12,295|450|1,424|152|74,600|7,387|Europe\nIsrael|13,654|+163|173 |+1|3,872|9,609|150|1,577|20|187,250|21,634|Asia\nJapan|10,797||236 ||1,159|9,402|217|85|2|112,816|892|Asia\nS. Korea|10,674|+13|236 |+2|8,114|2,324|55|208|5|563,035|10,982|Asia\nChile|10,088||133 ||4,338|5,617|360|528|7|113,649|5,945|South America\nEcuador|9,468||474 ||1,061|7,933|124|537|27|31,809|1,803|South America\nPoland|9,453|+166|362 |+2|1,133|7,958|160|250|10|204,246|5,397|Europe\nSaudi Arabia|9,362||97 ||1,398|7,867|97|269|3|180,000|5,170|Asia\nRomania|8,746||451 ||1,892|6,403|256|455|23|93,611|4,866|Europe\nPakistan|8,418|+70|176 |+8|1,970|6,272|46|38|0.8|104,302|472|Asia\nMexico|8,261|+764|686 |+36|2,627|4,948|207|64|5|49,570|384|North America\nSingapore|8,014|+1,426|11 ||768|7,235|22|1,370|2|94,796|16,203|Asia\nDenmark|7,515|+131|355 ||4,141|3,019|84|1,297|61|96,244|16,616|Europe\nNorway|7,103|+25|165 ||32|6,906|58|1,310|30|142,166|26,224|Europe\nCzechia|6,787|+41|188 |+2|1,311|5,288|84|634|18|172,123|16,073|Europe\nUAE|6,781||41 ||1,286|5,454|1|686|4|767,000|77,550|Asia\nIndonesia|6,760|+185|590 |+8|747|5,423||25|2|42,219|154|Asia\nAustralia|6,619|+7|71 ||4,258|2,290|49|260|3|431,734|16,931|Australia/Oceania\nPhilippines|6,459|+200|428 |+19|613|5,418|1|59|4|59,928|547|Asia\nSerbia|6,318||122 ||753|5,443|120|723|14|38,701|4,429|Europe\nUkraine|5,710|+261|151 |+10|359|5,200|45|131|3|58,093|1,328|Europe\nQatar|5,448||8 ||518|4,922|37|1,891|3|62,538|21,707|Asia\nMalaysia|5,389||89 ||3,197|2,103|46|167|3|103,892|3,210|Asia\nBelarus|4,779||47 ||494|4,238|65|506|5|98,231|10,396|Europe\nDominican Republic|4,680||226 ||363|4,091|114|431|21|15,583|1,436|North America\nPanama|4,467|+194|126 |+6|165|4,176|98|1,035|29|19,091|4,425|North America\nFinland|3,868|+85|94 ||1,700|2,074|68|698|17|58,727|10,599|Europe\nColombia|3,792||179 ||711|2,902|98|75|4|62,746|1,233|South America\nLuxembourg|3,550||73 ||627|2,850|31|5,671|117|33,666|53,781|Europe\nSouth Africa|3,158||54 ||903|2,201|36|53|0.9|114,711|1,934|Africa\nEgypt|3,144||239 ||732|2,173||31|2|55,000|537|Africa\nBangladesh|2,948|+492|101 |+10|85|2,762|1|18|0.6|26,604|162|Asia\nArgentina|2,941||134 ||709|2,098|123|65|3|32,712|724|South America\nMorocco|2,855||141 ||327|2,387|1|77|4|15,123|410|Africa\nThailand|2,792|+27|47 ||1,999|746|61|40|0.7|142,589|2,043|Asia\nAlgeria|2,629||375 ||1,047|1,207|40|60|9|6,500|148|Africa\nMoldova|2,472||67 ||457|1,948|80|613|17|10,488|2,600|Europe\nGreece|2,235||113 ||269|1,853|67|214|11|53,290|5,113|Europe\nHungary|1,984|+68|199 |+10|267|1,518|60|205|21|48,057|4,975|Europe\nKuwait|1,915||7 ||305|1,603|38|448|2|||Asia\nBahrain|1,881||7 ||762|1,112|2|1,105|4|86,732|50,972|Asia\nCroatia|1,871||47 ||709|1,115|23|456|11|24,186|5,891|Europe\nIceland|1,771||9 ||1,291|471|3|5,190|26|41,091|120,416|Europe\nKazakhstan|1,757|+81|19 |+2|417|1,321|22|94|1|117,949|6,282|Asia\nUzbekistan|1,565||5 ||225|1,335|8|47|0.1|136,000|4,063|Asia\nIraq|1,539||82 ||1,009|448||38|2|56,147|1,396|Asia\nEstonia|1,535|+7|40 ||165|1,330|9|1,157|30|40,930|30,855|Europe\nNew Zealand|1,440|+9|12 ||974|454|3|299|2|86,305|17,897|Australia/Oceania\nOman|1,410|+144|7 ||238|1,165|3|276|1|||Asia\nAzerbaijan|1,398||19 ||712|667|21|138|2|95,747|9,443|Asia\nArmenia|1,339|+48|22 |+2|580|737|30|452|7|13,373|4,513|Asia\nSlovenia|1,335|+5|77 |+3|193|1,065|26|642|37|41,802|20,107|Europe\nLithuania|1,326|+28|37 |+2|242|1,047|14|487|14|66,352|24,374|Europe\nBosnia and Herzegovina|1,285||48 ||347|890|4|392|15|17,818|5,431|Europe\nNorth Macedonia|1,207||51 ||179|977|15|579|24|11,870|5,697|Europe\nSlovakia|1,161||12 ||229|920|10|213|2|44,278|8,110|Europe\nGhana|1,042||9 ||99|934|4|34|0.3|68,591|2,207|Africa\nCuba|1,035||34 ||255|746|16|91|3|26,982|2,382|North America\nHong Kong|1,026||4 ||602|420|8|137|0.5|131,786|17,579|Asia\nCameroon|1,017||42 ||305|670|33|38|2|||Africa\nAfghanistan|996||33 ||131|832|7|26|0.8|6,422|165|Asia\nBulgaria|915|+21|43 |+1|167|705|36|132|6|26,417|3,802|Europe\nTunisia|879||38 ||43|798|33|74|3|16,098|1,362|Africa\nIvory Coast|847||9 ||260|578||32|0.3|||Africa\nDjibouti|846||2 ||102|742||856|2|8,144|8,243|Africa\nCyprus|767||12 ||81|674|15|635|10|34,087|28,233|Asia\nLatvia|739|+12|5 ||88|646|5|392|3|36,668|19,440|Europe\nAndorra|713||36 ||235|442|17|9,228|466|1,673|21,653|Europe\nDiamond Princess|712||13 ||644|55|7|||||\nLebanon|673||21 ||102|550|27|99|3|20,929|3,066|Asia\nCosta Rica|660||5 ||112|543|14|130|1.0|10,417|2,045|North America\nNiger|648||20 ||117|511||27|0.8|4,715|195|Africa\nNigeria|627||21 ||170|436|2|3|0.1|7,153|35|Africa\nAlbania|584|+22|26 ||327|231|5|203|9|5,775|2,007|Europe\nGuinea|579||5 ||87|487||44|0.4|||Africa\nBurkina Faso|576||36 ||338|202||28|2|||Africa\nKyrgyzstan|568|+14|7 |+2|201|360|5|87|1|26,147|4,008|Asia\nBolivia|564|+44|33 |+1|31|500|3|48|3|3,569|306|South America\nUruguay|528|+11|10 ||298|220|13|152|3|13,207|3,802|South America\nChannel Islands|488||21 ||73|394||2,807|121|3,320|19,095|Europe\nHonduras|477|+5|46 ||25|406|10|48|5|2,535|256|North America\nSan Marino|461||39 ||60|362|4|13,586|1,149|1,711|50,426|Europe\nPalestine|449|+10|3 ||71|375||88|0.6|17,329|3,397|Asia\nMalta|427||3 ||118|306|2|967|7|23,253|52,663|Europe\nTaiwan|422|+2|6 ||203|213||18|0.3|53,632|2,252|Asia\nJordan|417||7 ||276|134|5|41|0.7|29,000|2,842|Asia\nRéunion|408|| ||237|171|4|456||||Africa\nGeorgia|399|+5|4 ||93|302|6|100|1|5,027|1,260|Asia\nSenegal|367||4 |+1|220|143|1|22|0.2|||Africa\nMauritius|328||9 ||208|111|3|258|7|9,755|7,670|Africa\nDRC|327||25 ||27|275||4|0.3|||Africa\nMontenegro|311|+3|5 ||55|251|7|495|8|3,874|6,168|Europe\nIsle of Man|298||6 ||193|99|10|3,505|71|2,465|28,989|Europe\nSri Lanka|295|+24|7 ||97|191|1|14|0.3|6,463|302|Asia\nGuatemala|289|+32|7 ||21|261|3|16|0.4|7,200|402|North America\nMayotte|271||4 ||117|150|5|993|15|1,850|6,781|Africa\nKenya|270||14 ||67|189|2|5|0.3|13,239|246|Africa\nVietnam|268|| ||202|66|8|3||206,253|2,119|Asia\nVenezuela|256||9 ||117|130|4|9|0.3|336,169|11,822|South America\nMali|224||14 ||42|168||11|0.7|||Africa\nEl Salvador|218|+17|7 ||46|165|2|34|1|12,210|1,882|North America\nParaguay|208|+2|8 ||46|154|1|29|1|4,950|694|South America\nJamaica|196|+23|5 ||27|164||66|2|1,809|611|North America\nFaeroe Islands|185|| ||176|9||3,786||6,021|123,222|Europe\nTanzania|170||7 ||11|152|4|3|0.1|||Africa\nSomalia|164||7 ||3|154|2|10|0.4|||Africa\nMartinique|163||12 ||73|78|11|434|32|||North America\nGuadeloupe|148||8 ||73|67|13|370|20|||North America\nRwanda|147|| ||76|71||11||6,959|537|Africa\nCongo|143||6 ||11|126||26|1|||Africa\nBrunei |138||1 ||116|21|2|315|2|11,633|26,591|Asia\nGibraltar|132|| ||120|12|1|3,918||1,912|56,751|Europe\nCambodia|122|| ||107|15|1|7||5,768|345|Asia\nMadagascar|121|| ||39|82|1|4||2,357|85|Africa\nTrinidad and Tobago|114||8 ||21|85||81|6|1,335|954|North America\nMyanmar|111||5 ||7|99||2|0.09|4,786|88|Asia\nGabon|109||1 ||7|101||49|0.4|572|257|Africa\nEthiopia|108||3 ||16|89|1|0.9|0.03|7,557|66|Africa\nAruba|97||2 ||49|46|4|909|19|1,442|13,506|North America\nFrench Guiana|97|+1| ||69|28|2|325||||South America\nMonaco|94||3 ||22|69|3|2,395|76|||Europe\nSudan|92|+26|12 |+2|8|72||2|0.3|||Africa\nLiberia|91||8 ||7|76||18|2|||Africa\nBermuda|86||5 ||35|46|10|1,381|80|638|10,244|North America\nTogo|84||5 ||52|27||10|0.6|4,295|519|Africa\nLiechtenstein|81||1 ||55|25||2,124|26|900|23,605|Europe\nEquatorial Guinea|79|| ||4|75||56||854|609|Africa\nBarbados|75||5 ||19|51|4|261|17|1,035|3,602|North America\nSint Maarten|67||10 ||12|45|6|1,563|233|163|3,802|North America\nGuyana|65||7 ||9|49|4|83|9|322|409|South America\nZambia|61||3 ||33|25|1|3|0.2|2,586|141|Africa\nCabo Verde|61||1 ||1|59||110|2|||Africa\nCayman Islands|61||1 ||7|53|3|928|15|702|10,681|North America\nBahamas|60||9 ||11|40|1|153|23|||North America\nFrench Polynesia|55|| ||2|53|1|196||1,302|4,635|Australia/Oceania\nUganda|55|| ||28|27||1||12,565|275|Africa\nMaldives|52|| ||16|36||96||3,714|6,871|Asia\nLibya|51||1 ||11|39||7|0.1|725|106|Africa\nGuinea-Bissau|50|| ||3|47||25||1,500|762|Africa\nHaiti|47||3 |||44||4|0.3|498|44|North America\nMacao|45|| ||17|28|1|69||||Asia\nSyria|39||3 ||5|31||2|0.2|||Asia\nEritrea|39|| ||3|36||11||||Africa\nMozambique|39|| ||8|31||1||1,037|33|Africa\nSaint Martin|37||2 ||19|16|5|957|52|||North America\nBenin|35||1 ||18|16||3|0.08|||Africa\nSierra Leone|35|| ||6|29||4||||Africa\nChad|33|| ||8|25||2||||Africa\nMongolia|33|+1| ||7|26||10||1,554|474|Asia\nNepal|31|| ||4|27||1||29,567|1,015|Asia\nZimbabwe|25||3 ||2|20||2|0.2|2,851|192|Africa\nAngola|24||2 ||6|16||0.7|0.06|||Africa\nAntigua and Barbuda|23||3 ||3|17|1|235|31|73|745|North America\nEswatini|22||1 ||8|13||19|0.9|714|615|Africa\nTimor-Leste|22|+3| ||1|21||17||||Asia\nBotswana|20||1 |||19||9|0.4|4,432|1,885|Africa\nLaos|19|| ||2|17||3||1,349|185|Asia\nBelize|18||2 ||2|14|1|45|5|651|1,637|North America\nFiji|18|+1| ||3|15||20||||Australia/Oceania\nNew Caledonia|18|| ||15|3|1|63||3,399|11,906|Australia/Oceania\nMalawi|17||2 ||3|12|1|0.9|0.1|429|22|Africa\nDominica|16|| ||8|8||222||345|4,793|North America\nNamibia|16|| ||6|10||6||524|206|Africa\nSaint Kitts and Nevis|15|+1| |||15||282||247|4,643|North America\nSaint Lucia|15|| ||11|4||82||344|1,873|North America\nCuraçao|14||1 ||11|2||85|6|286|1,743|North America\nGrenada|14|| ||6|8|4|124||92|818|North America\nCAR|12|| ||4|8||2||||Africa\nSt. Vincent Grenadines|12|| ||1|11||108||87|784|North America\nTurks and Caicos|11||1 |||10||284|26|83|2,144|North America\nFalkland Islands|11|| ||3|8||3,161||319|91,667|South America\nGreenland|11|| ||11|0||194||1,043|18,372|North America\nMontserrat|11|| ||2|9|1|2,204||36|7,212|North America\nSeychelles|11|| ||5|6||112||||Africa\nNicaragua|10||2 ||6|2||2|0.3|||North America\nGambia|10||1 ||2|7||4|0.4|316|131|Africa\nSuriname|10||1 ||6|3||17|2|||South America\nMS Zaandam|9||2 |||7||||||\nVatican City|8|| ||2|6||9,988||||Europe\nMauritania|7||1 ||6|0||2|0.2|913|196|Africa\nPapua New Guinea|7|| |||7||0.8||167|19|Australia/Oceania\nSt. Barth|6|| ||5|1||607||||North America\nWestern Sahara|6|| |||6||10||||Africa\nBritish Virgin Islands|5|+1|1 ||3|1||165|33|||North America\nBurundi|5||1 ||4|0||0.4|0.08|80|7|Africa\nBhutan|5|| ||2|3||6||8,107|10,507|Asia\nCaribbean Netherlands|5|| |||5||191||110|4,195|North America\nSao Tome and Principe|4|| |||4||18||19|87|Africa\nSouth Sudan|4|| |||4||0.4||||Africa\nAnguilla|3|| ||1|2||200||||North America\nSaint Pierre Miquelon|1|| |||1||173||||North America\nYemen|1|| |||1||0.03||||Asia\nChina|82,747|+12|4,632 ||77,084|1,031|81|57|3|||Asia\nTotal:|820,749|+666|43,369|+32|86,885|690,495|14,651|||||North America\nTotal:|1,097,423|+8,167|102,454|+618|316,674|678,295|25,383|||||Europe\nTotal:|386,077|+2,920|14,840|+54|181,254|189,983|6,403|||||Asia\nTotal:|82,310|+58|3,850|+1|36,339|42,121|7,533|||||South America\nTotal:|8,157|+17|83||5,252|2,822|54|||||Australia/Oceania\nTotal:|22,992|+26|1,128|+3|5,847|16,017|174|||||Africa\nTotal:|721||15||644|62|7|||||\nTotal:|2,418,429|+11,854|165,739|+708|632,895|1,619,795|54,205|310.3|21.3|||All\n\n\nAsia\n|383,157|+12,373|14,786|+385|180,611|187,760|6,403|||||Asia\n\nNorth America\n|820,083|+28,674|43,337|+1,800|87,020|689,726|14,644|||||North America\n\nEurope\n|1,089,256|+29,218|101,836|+2,558|309,495|677,925|25,395|||||Europe\n\nSouth America\n|82,252|+4,279|3,849|+196|36,329|42,074|7,534|||||South America\n\nOceania\n|8,140|+35|83|+2|5,159|2,898|68|||||Australia/Oceania\n\nAfrica\n|22,966|+1,225|1,125|+43|5,840|16,001|174|||||Africa\n\n\n|721||15||644|62|7|||||\nWorld|2,406,575|+75,804|165,031|+4,984|625,098|1,616,446|54,225|309|21.2|||All\nChina|82,735|+16|4,632 ||77,062|1,041|85|57|3|||Asia\nUSA|764,636|+25,844|40,575 |+1,561|71,187|652,874|13,566|2,310|123|3,861,549|11,666|North America\nSpain|198,674|+4,258|20,453 |+410|77,357|100,864|7,371|4,249|437|930,230|19,896|Europe\nItaly|178,972|+3,047|23,660 |+433|47,055|108,257|2,635|2,960|391|1,356,541|22,436|Europe\nFrance|152,894|+1,101|19,718 |+395|36,578|96,598|5,744|2,342|302|463,662|7,103|Europe\nGermany|145,742|+2,018|4,642 |+104|88,000|53,100|2,889|1,739|55|1,728,357|20,629|Europe\nUK|120,067|+5,850|16,060 |+596|N/A|103,663|1,559|1,769|237|482,063|7,101|Europe\nTurkey|86,306|+3,977|2,017 |+127|11,976|72,313|1,922|1,023|24|634,277|7,521|Asia\nIran|82,211|+1,343|5,118 |+87|57,023|20,070|3,456|979|61|341,662|4,068|Asia\nRussia|42,853|+6,060|361 |+48|3,291|39,201|8|294|2|1,940,000|13,294|Europe\nBrazil|38,654|+1,932|2,462 |+101|22,130|14,062|6,634|182|12|62,985|296|South America\nBelgium|38,496|+1,313|5,683 |+230|8,757|24,056|1,081|3,322|490|153,778|13,269|Europe\nCanada|35,056|+1,673|1,587 |+117|11,843|21,626|557|929|42|536,062|14,203|North America\nNetherlands|32,655|+1,066|3,684 |+83|250|28,721|1,176|1,906|215|154,911|9,041|Europe\nSwitzerland|27,740|+336|1,393 |+25|17,800|8,547|386|3,205|161|221,263|25,566|Europe\nPortugal|20,206|+521|714 |+27|610|18,882|224|1,982|70|235,878|23,133|Europe\nIndia|17,615|+1,250|559 |+38|2,854|14,202||13|0.4|401,586|291|Asia\nPeru|15,628|+1,208|400 |+52|6,811|8,417|167|474|12|143,745|4,360|South America\nIreland|15,251|+493|610 |+39|77|14,564|294|3,089|124|90,646|18,358|Europe\nAustria|14,749|+78|452 |+9|10,501|3,796|204|1,638|50|179,243|19,902|Europe\nSweden|14,385|+563|1,540 |+29|550|12,295|450|1,424|152|74,600|7,387|Europe\nIsrael|13,491|+226|172 |+8|3,754|9,565|146|1,559|20|187,250|21,634|Asia\nJapan|10,797|+501|236 |+14|1,159|9,402|217|85|2|112,816|892|Asia\nS. Korea|10,661|+8|234 |+2|8,042|2,385|55|208|5|559,109|10,905|Asia\nChile|10,088|+358|133 |+7|4,338|5,617|360|528|7|113,649|5,945|South America\nEcuador|9,468|+446|474 |+18|1,061|7,933|124|537|27|31,809|1,803|South America\nSaudi Arabia|9,362|+1,088|97 |+5|1,398|7,867|97|269|3|180,000|5,170|Asia\nPoland|9,287|+545|360 |+13|1,040|7,887|160|245|10|204,246|5,397|Europe\nRomania|8,746|+328|451 |+30|1,892|6,403|256|455|23|93,611|4,866|Europe\nPakistan|8,348|+710|168 |+25|1,868|6,312|46|38|0.8|98,522|446|Asia\nMexico|7,497|+622|650 |+104|2,627|4,220|207|58|5|49,167|381|North America\nDenmark|7,384|+142|355 |+9|4,141|2,888|84|1,275|61|94,277|16,277|Europe\nNorway|7,078|+42|165 |+1|32|6,881|58|1,306|30|142,166|26,224|Europe\nUAE|6,781|+479|41 |+4|1,286|5,454|1|686|4|767,000|77,550|Asia\nCzechia|6,746|+140|186 |+5|1,298|5,262|84|630|17|168,096|15,697|Europe\nAustralia|6,612|+26|71 |+1|4,230|2,311|48|259|3|420,996|16,510|Australia/Oceania\nSingapore|6,588|+596|11 ||768|5,809|22|1,126|2|94,796|16,203|Asia\nIndonesia|6,575|+327|582 |+47|686|5,307||24|2|42,219|154|Asia\nSerbia|6,318|+324|122 |+5|753|5,443|120|723|14|38,701|4,429|Europe\nPhilippines|6,259|+172|409 |+12|572|5,278|1|57|4|59,928|547|Asia\nUkraine|5,449|+343|141 |+8|347|4,961|45|125|3|57,111|1,306|Europe\nQatar|5,448|+440|8 ||518|4,922|37|1,891|3|62,538|21,707|Asia\nMalaysia|5,389|+84|89 |+1|3,197|2,103|46|167|3|103,892|3,210|Asia\nBelarus|4,779||47 |+2|494|4,238|65|506|5|98,231|10,396|Europe\nDominican Republic|4,680|+345|226 |+9|363|4,091|114|431|21|15,583|1,436|North America\nPanama|4,273|+63|120 |+4|140|4,013|96|990|28|19,091|4,425|North America\nColombia|3,792|+171|179 |+13|711|2,902|98|75|4|62,746|1,233|South America\nFinland|3,783|+102|94 |+4|1,700|1,989|68|683|17|57,300|10,342|Europe\nLuxembourg|3,550|+13|73 |+1|627|2,850|31|5,671|117|33,666|53,781|Europe\nSouth Africa|3,158|+124|54 |+2|903|2,201|36|53|0.9|114,711|1,934|Africa\nEgypt|3,144|+112|239 |+15|732|2,173||31|2|55,000|537|Africa\nArgentina|2,941|+102|134 |+2|709|2,098|123|65|3|32,712|724|South America\nMorocco|2,855|+170|141 |+4|327|2,387|1|77|4|15,123|410|Africa\nThailand|2,765|+32|47 ||1,928|790|61|40|0.7|100,498|1,440|Asia\nAlgeria|2,629|+95|375 |+8|1,047|1,207|40|60|9|6,500|148|Africa\nMoldova|2,472|+121|67 |+10|457|1,948|80|613|17|10,488|2,600|Europe\nBangladesh|2,456|+312|91 |+7|75|2,290|1|15|0.6|23,941|145|Asia\nGreece|2,235||113 |+3|269|1,853|67|214|11|53,290|5,113|Europe\nHungary|1,916|+82|189 |+17|250|1,477|61|198|20|46,353|4,798|Europe\nKuwait|1,915|+164|7 |+1|305|1,603|38|448|2|||Asia\nBahrain|1,881|+108|7 ||759|1,115|2|1,105|4|85,126|50,028|Asia\nCroatia|1,871|+39|47 |+8|709|1,115|23|456|11|24,186|5,891|Europe\nIceland|1,771|+11|9 ||1,291|471|3|5,190|26|41,091|120,416|Europe\nKazakhstan|1,676|+61|17 ||400|1,259|22|89|0.9|100,878|5,373|Asia\nUzbekistan|1,565|+75|5 ||225|1,335|8|47|0.1|136,000|4,063|Asia\nIraq|1,539|+26|82 ||1,009|448||38|2|56,147|1,396|Asia\nEstonia|1,528|+16|40 |+2|164|1,324|10|1,152|30|40,333|30,405|Europe\nNew Zealand|1,431|+9|12 |+1|912|507|18|297|2|83,224|17,258|Australia/Oceania\nAzerbaijan|1,398|+25|19 |+1|712|667|21|138|2|95,747|9,443|Asia\nSlovenia|1,330|+13|74 |+4|192|1,064|26|640|36|41,265|19,849|Europe\nLithuania|1,298|+59|35 |+2|242|1,021|14|477|13|64,035|23,522|Europe\nArmenia|1,291|+43|20 ||545|726|30|436|7|12,680|4,279|Asia\nBosnia and Herzegovina|1,285|+17|48 |+1|347|890|4|392|15|17,818|5,431|Europe\nOman|1,266|+86|7 |+1|233|1,026|3|248|1|||Asia\nNorth Macedonia|1,207|+37|51 |+2|179|977|15|579|24|11,870|5,697|Europe\nSlovakia|1,161|+72|12 |+1|229|920|10|213|2|44,278|8,110|Europe\nGhana|1,042|+208|9 ||99|934|4|34|0.3|68,591|2,207|Africa\nCuba|1,035|+49|34 |+2|255|746|16|91|3|26,982|2,382|North America\nHong Kong|1,026|+2|4 ||602|420|8|137|0.5|131,786|17,579|Asia\nCameroon|1,017||42 ||305|670|33|38|2|||Africa\nAfghanistan|996|+63|33 |+3|131|832|7|26|0.8|6,422|165|Asia\nBulgaria|894|+16|42 |+1|161|691|36|129|6|26,417|3,802|Europe\nTunisia|879|+13|38 |+1|43|798|33|74|3|16,098|1,362|Africa\nIvory Coast|847|+46|9 |+1|260|578||32|0.3|||Africa\nDjibouti|846|+114|2 ||102|742||856|2|8,144|8,243|Africa\nCyprus|767|+6|12 ||81|674|15|635|10|34,087|28,233|Asia\nLatvia|727|+15|5 ||88|634|5|385|3|35,881|19,023|Europe\nAndorra|713|+9|36 |+1|235|442|17|9,228|466|1,673|21,653|Europe\nDiamond Princess|712||13 ||644|55|7|||||\nLebanon|673|+1|21 ||102|550|27|99|3|20,929|3,066|Asia\nCosta Rica|660|+5|5 |+1|112|543|10|130|1.0|10,417|2,045|North America\nNiger|648|+9|20 |+1|117|511||27|0.8|4,715|195|Africa\nNigeria|627|+85|21 |+2|170|436|2|3|0.1|7,153|35|Africa\nGuinea|579|+61|5 |+2|87|487||44|0.4|||Africa\nBurkina Faso|576|+11|36 ||338|202||28|2|||Africa\nAlbania|562|+14|26 ||314|222|5|195|9|5,542|1,926|Europe\nKyrgyzstan|554|+48|5 ||133|416|5|85|0.8|26,147|4,008|Asia\nBolivia|520|+27|32 |+1|31|457|3|45|3|3,569|306|South America\nUruguay|517||10 |+1|298|209|14|149|3|13,207|3,802|South America\nChannel Islands|488|+4|21 ||73|394||2,807|121|3,320|19,095|Europe\nHonduras|472|+15|46 ||15|411|10|48|5|2,535|256|North America\nSan Marino|461|+6|39 ||60|362|4|13,586|1,149|1,711|50,426|Europe\nPalestine|439|+21|3 |+1|71|365||86|0.6|17,329|3,397|Asia\nMalta|427|+1|3 ||118|306|2|967|7|23,253|52,663|Europe\nTaiwan|420|+22|6 ||189|225||18|0.3|53,005|2,226|Asia\nJordan|417|+4|7 ||276|134|5|41|0.7|29,000|2,842|Asia\nRéunion|408|+1| ||237|171|4|456||||Africa\nGeorgia|394|+6|4 ||93|297|6|99|1|5,027|1,260|Asia\nSenegal|367|+17|3 ||220|144|1|22|0.2|||Africa\nMauritius|328|+3|9 ||208|111|3|258|7|9,755|7,670|Africa\nDRC|327|+20|25 ||26|276||4|0.3|||Africa\nMontenegro|308|+1|5 ||55|248|7|490|8|3,874|6,168|Europe\nIsle of Man|298|+1|6 ||193|99|10|3,505|71|2,465|28,989|Europe\nSri Lanka|271|+17|7 ||96|168|1|13|0.3|6,463|302|Asia\nMayotte|271|+17|4 ||117|150|5|993|15|1,850|6,781|Africa\nKenya|270|+8|14 |+2|67|189|2|5|0.3|13,239|246|Africa\nVietnam|268|| ||202|66|8|3||206,253|2,119|Asia\nGuatemala|257|+22|7 ||21|229|3|14|0.4|7,200|402|North America\nVenezuela|256|+29|9 ||117|130|4|9|0.3|299,714|10,540|South America\nMali|224|+8|14 |+1|42|168||11|0.7|||Africa\nParaguay|206|+4|8 ||41|157|1|29|1|4,950|694|South America\nEl Salvador|201|+11|7 ||44|150|1|31|1|11,160|1,721|North America\nFaeroe Islands|185|+1| ||176|9||3,786||6,021|123,222|Europe\nJamaica|173|+10|5 ||27|141||58|2|1,605|542|North America\nTanzania|170|+23|7 |+2|11|152|4|3|0.1|||Africa\nSomalia|164|+29|7 ||3|154|2|10|0.4|||Africa\nMartinique|163||12 ||73|78|11|434|32|||North America\nGuadeloupe|148||8 ||73|67|13|370|20|||North America\nRwanda|147|+3| ||76|71||11||6,959|537|Africa\nCongo|143||6 ||11|126||26|1|||Africa\nBrunei |138|+1|1 ||115|22|2|315|2|11,472|26,223|Asia\nGibraltar|132|| ||120|12|1|3,918||1,912|56,751|Europe\nCambodia|122|| ||105|17|1|7||5,768|345|Asia\nMadagascar|121|+1| ||39|82|1|4||2,357|85|Africa\nTrinidad and Tobago|114||8 ||21|85||81|6|1,335|954|North America\nMyanmar|111|+13|5 ||7|99||2|0.09|4,605|85|Asia\nGabon|109|+1|1 ||7|101||49|0.4|572|257|Africa\nEthiopia|108|+3|3 ||16|89|1|0.9|0.03|7,557|66|Africa\nAruba|97|+1|2 ||49|46|4|909|19|1,442|13,506|North America\nFrench Guiana|96|| ||64|32|2|321||||South America\nMonaco|94||3 ||22|69|3|2,395|76|||Europe\nLiberia|91|+15|8 |+1|7|76||18|2|||Africa\nBermuda|86|+3|5 ||35|46|10|1,381|80|638|10,244|North America\nTogo|84||5 ||52|27||10|0.6|4,295|519|Africa\nLiechtenstein|81||1 ||55|25||2,124|26|900|23,605|Europe\nEquatorial Guinea|79|| ||4|75||56||854|609|Africa\nBarbados|75||5 ||17|53|4|261|17|1,000|3,480|North America\nSint Maarten|67|+3|10 |+1|12|45|6|1,563|233|163|3,802|North America\nSudan|66||10 ||6|50||2|0.2|||Africa\nGuyana|65|+2|7 |+1|9|49|4|83|9|322|409|South America\nZambia|61|+4|3 |+1|33|25|1|3|0.2|2,586|141|Africa\nCabo Verde|61|+3|1 ||1|59||110|2|||Africa\nCayman Islands|61||1 ||7|53|3|928|15|702|10,681|North America\nBahamas|60|+2|9 ||11|40|1|153|23|||North America\nFrench Polynesia|55|| ||2|53|1|196||1,302|4,635|Australia/Oceania\nUganda|55|| ||28|27||1||11,101|243|Africa\nMaldives|52|+17| ||16|36||96||3,714|6,871|Asia\nLibya|51|+2|1 ||11|39||7|0.1|725|106|Africa\nGuinea-Bissau|50|+4| ||3|47||25||1,500|762|Africa\nHaiti|47|+3|3 |||44||4|0.3|498|44|North America\nMacao|45|| ||17|28|1|69||||Asia\nSyria|39|+1|3 |+1|5|31||2|0.2|||Asia\nEritrea|39|| ||3|36||11||||Africa\nMozambique|39|+4| ||8|31||1||1,037|33|Africa\nSaint Martin|37||2 ||19|16|5|957|52|||North America\nBenin|35||1 ||18|16||3|0.08|||Africa\nSierra Leone|35|+5| ||6|29||4||||Africa\nChad|33|| ||8|25||2||||Africa\nMongolia|32|+1| ||7|25||10||1,554|474|Asia\nNepal|31|| ||4|27||1||29,567|1,015|Asia\nZimbabwe|25||3 ||2|20||2|0.2|2,851|192|Africa\nAngola|24||2 ||6|16||0.7|0.06|||Africa\nAntigua and Barbuda|23||3 ||3|17|1|235|31|73|745|North America\nEswatini|22||1 ||8|13||19|0.9|714|615|Africa\nBotswana|20|+5|1 |||19||9|0.4|4,432|1,885|Africa\nLaos|19|| ||2|17||3||1,333|183|Asia\nTimor-Leste|19|+1| ||1|18||14||||Asia\nBelize|18||2 ||2|14|1|45|5|651|1,637|North America\nNew Caledonia|18|| ||15|3|1|63||3,399|11,906|Australia/Oceania\nMalawi|17||2 ||3|12|1|0.9|0.1|429|22|Africa\nFiji|17|| |||17||19||||Australia/Oceania\nDominica|16|| ||8|8||222||345|4,793|North America\nNamibia|16|| ||6|10||6||524|206|Africa\nSaint Lucia|15|| ||11|4||82||344|1,873|North America\nCuraçao|14||1 ||11|2||85|6|286|1,743|North America\nGrenada|14|| ||6|8|4|124||92|818|North America\nSaint Kitts and Nevis|14|| |||14||263||234|4,399|North America\nCAR|12|| ||4|8||2||||Africa\nSt. Vincent Grenadines|12|| ||1|11||108||87|784|North America\nTurks and Caicos|11||1 |||10||284|26|83|2,144|North America\nFalkland Islands|11|| ||3|8||3,161||319|91,667|South America\nGreenland|11|| ||11|0||194||1,043|18,372|North America\nMontserrat|11|| ||2|9|1|2,204||36|7,212|North America\nSeychelles|11|| ||5|6||112||||Africa\nNicaragua|10|+1|2 ||6|2||2|0.3|||North America\nGambia|10|+1|1 ||2|7||4|0.4|316|131|Africa\nSuriname|10||1 ||6|3||17|2|||South America\nMS Zaandam|9||2 |||7||||||\nVatican City|8|| ||2|6||9,988||||Europe\nMauritania|7||1 ||6|0||2|0.2|913|196|Africa\nPapua New Guinea|7|| |||7||0.8||167|19|Australia/Oceania\nSt. Barth|6|| ||5|1||607||||North America\nWestern Sahara|6|| |||6||10||||Africa\nBurundi|5||1 |||4||0.4|0.08|80|7|Africa\nBhutan|5|| ||2|3||6||8,107|10,507|Asia\nCaribbean Netherlands|5|+2| |||5||191||110|4,195|North America\nBritish Virgin Islands|4||1 |+1|2|1||132|33|||North America\nSao Tome and Principe|4|| |||4||18||19|87|Africa\nSouth Sudan|4|| |||4||0.4||||Africa\nAnguilla|3|| ||1|2||200||||North America\nSaint Pierre Miquelon|1|| |||1||173||||North America\nYemen|1|| |||1||0.03||||Asia\nTotal:|383,157|+12,373|14,786|+385|180,611|187,760|6,403|||||Asia\nTotal:|820,083|+28,674|43,337|+1,800|87,020|689,726|14,644|||||North America\nTotal:|1,089,256|+29,218|101,836|+2,558|309,495|677,925|25,395|||||Europe\nTotal:|82,252|+4,279|3,849|+196|36,329|42,074|7,534|||||South America\nTotal:|8,140|+35|83|+2|5,159|2,898|68|||||Australia/Oceania\nTotal:|22,966|+1,225|1,125|+43|5,840|16,001|174|||||Africa\nTotal:|721||15||644|62|7|||||\nTotal:|2,406,575|+75,804|165,031|+4,984|625,098|1,616,446|54,225|308.7|21.2|||All\n"
],
[
"import pandas as pd\ndf=pd.read_csv(\"C:\\\\ITM SPRING 2020\\\\JOB HUNT\\\\Python\\\\country_note.csv\",sep=\"|\")",
"_____no_output_____"
],
[
"df.to_csv(\"C:\\\\ITM SPRING 2020\\\\JOB HUNT\\\\Python\\\\country_note_modified.csv\")",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e741cf4ec1b93f5595365b9003b12e19a2dbfb21 | 144,504 | ipynb | Jupyter Notebook | .ipynb_checkpoints/LabEx9_Solutions-checkpoint.ipynb | clarence-ong/COS2000-Solutions | f98451fdf7544a57718a63c00d69236daa39c05c | [
"MIT"
] | null | null | null | .ipynb_checkpoints/LabEx9_Solutions-checkpoint.ipynb | clarence-ong/COS2000-Solutions | f98451fdf7544a57718a63c00d69236daa39c05c | [
"MIT"
] | null | null | null | .ipynb_checkpoints/LabEx9_Solutions-checkpoint.ipynb | clarence-ong/COS2000-Solutions | f98451fdf7544a57718a63c00d69236daa39c05c | [
"MIT"
] | null | null | null | 592.229508 | 61,839 | 0.746831 | [
[
[
"## <span style = \"color:blue\">Lab Exercise 9 Solutions (Total Marks: 20)</span>\n<div class = \"alert alert-danger\">\nDeadline for submission: Two weeks from your lab session (eg, Tuesday/Friday $\\rightarrow$ 11:59 pm of Tuesday/Friday two weeks later)\n \nRename your file as AXXXXXXXY_LabEx9.ipynb, where AXXXXXXXY is your matric number.\n</div>",
"_____no_output_____"
],
[
"**Question 1**\n\nWrite a program that uses the Monte Carlo method to estimate the area of a triangle with coordinates (0,0), (p,0) and (p,q) within a unit square. Your program is to perform the following:\n- Prompt user for the number of points and $p,q (0<p<1.0; 0<q<1.0)$\n- Output the areas from theory and simulation\n- Display an appropriate plot to show the simulation\n\n(Hint: Consider the equation of the line joining (0,0) and (p,q) when determining whether points lie within or outside the triangle. You may refer to Example 5 or 6 for a guide.)",
"_____no_output_____"
],
[
"**Question 2**\n\nWrite a program to simulate a simplified version of a casino game known as Jackpot. On each attempt, a set of three letters will be selected from a collection of \"A\", \"B\", \"C\", \"D\" or \"E\" randomly with equal probability. Each attempt costs **1** dollar. If the three letters selected are consecutively \"A\" or \"B\" (i.e. \"AAA\" or \"BBB\"), the player wins **50** dollars. Your program is to perform the following:\n- Prompt user for the number of attempts N\n- Plot a histogram of the net amount won/lost for all N attempts\n- Compute the expected winning or loss per attempt (i.e. (net amount won)/N or (net amount lost)/N respectively)\n\n(Hint: You may proceed by writing a loop over N attempts, with each attempt containing a 3-letter sequence that is independent of previous attempts. Compare the letters and update the net amount accordingly after each comparison. Consider the use of a list to hold the three letters that should be reset before the next attempt. Also append the updated net amount for each attempt to another list before the next attempt.)\n\n**Food for thought**: For a very determined player who attempts a very large number of times, will the expected net amount more likely to result in a winning or a loss?",
"_____no_output_____"
],
[
"### <span style = \"color:blue\">Question 1 (10 marks)</span>",
"_____no_output_____"
]
],
[
[
"%reset\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nnPoints = eval(input(\"Enter the number of points to be used in the simulation: \")) \n# Ideal to use more points, but not way too many. 100,000 points is ideal for a close estimation.\n\n# Prompt for triangle's base\np = eval(input(\"Enter the base of the triangle in the simulation: \"))\n\n# Prompt for triangle's height\nq = eval(input(\"Enter the height of the triangle in the simulation: \"))\n\n# Initialisation\nnArea = 0 \nix = []; iy = []\nox = []; oy = []\n\n# Simulation \nfor n in range(nPoints):\n x = np.random.rand()\n y = np.random.rand()\n if (x <= p) and (y <= q) and (y/x <= q/p): # Point lies within the triangle\n # y/x is the hypotenuse, or is the gradient m in the equation y = mx\n nArea += 1\n ix.append(x)\n iy.append(y)\n else: # Point lies outside the triangle\n ox.append(x)\n oy.append(y)\n \n# Compare areas from Monte Carlo simulation with actual area\nprint(\"Area of the triangle from theory =\", 0.5*p*q)\nprint(\"Area of the triangle from simulation =\", nArea/nPoints)\n\nplt.figure(1,(7,7)) \nplt.plot(ix,iy,\"ro\") \nplt.plot(ox,oy,\"bo\") \nplt.plot([0,p,p],[0,q,0],\"k--\")\nplt.xlim(0,1); plt.ylim(0,1)\nplt.title(\"Plot of Simulated Points within a Unit Square and Triangle\")\nplt.xlabel('x')\nplt.ylabel('y') \nplt.show()",
"Once deleted, variables cannot be recovered. Proceed (y/[n])? y\nEnter the number of points to be used in the simulation: 10000\nEnter the base of the triangle in the simulation: 0.4\nEnter the height of the triangle in the simulation: 0.7\nArea of the triangle from theory = 0.13999999999999999\nArea of the triangle from simulation = 0.1407\n"
]
],
[
[
"### <span style = \"color:blue\">Question 2 (10 marks)</span>",
"_____no_output_____"
]
],
[
[
"%reset\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nN = eval(input(\"Enter the number of attempts in the Jackpot: \"))\noutcome = [] # List to store the outcome values\namount = [] # List to store the net amount won or lost after each attempt\nnetamount = 0 # Initialise the net amount\n\nfor n in range (N):\n for i in range(3): # range(3) is used since there are only 3 letters \n p = np.random.rand() # Generate a random number in [0,1)\n if 0 <= p < 0.2: # Set range of values for \"A\" to be selected\n outcome.append(\"A\")\n elif 0.2 <= p < 0.4: # Set range of values for \"B\" to be selected\n outcome.append(\"B\")\n elif 0.4 <= p < 0.6: # Set range of values for \"C\" to be selected\n outcome.append(\"C\")\n elif 0.6 <= p < 0.8: # Set range of values for \"D\" to be selected\n outcome.append(\"D\")\n else: # For a random number in [0.8, 1), \"E\" will be selected\n outcome.append(\"E\")\n if outcome[0] == \"A\" and outcome[1] == \"A\" and outcome[2] == \"A\":\n netamount += 49\n elif outcome[0] == \"B\" and outcome[1] == \"B\" and outcome[2] == \"B\":\n netamount += 49\n else:\n netamount -= 1\n outcome = [] # Reset the list\n amount.append(netamount) # Save each net amount\n\nprint(amount) # Output the net amount for each of the N attempts\n\nbins = 20\nplt.figure(1,(7,5)) \nplt.hist(amount,bins)\nplt.title(\"Histogram of winning/loss after \" + str(N) + \" attempts\")\nplt.xlabel(\"Net Gain/Loss\");plt.ylabel(\"Frequency\")\nplt.show()\n\nwin_loss = netamount/N\nif win_loss > 0:\n print(\"Expected winning per attempt = \",win_loss)\nelif win_loss < 0:\n print(\"Expected loss per attempt = \",win_loss)\nelse:\n print(\"You have not won or lost.\") ",
"Once deleted, variables cannot be recovered. Proceed (y/[n])? \nNothing done.\nEnter the number of attempts in the Jackpot: 10000\n[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -17, -18, -19, -20, -21, -22, -23, -24, -25, -26, -27, -28, -29, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39, -40, -41, -42, -43, -44, -45, -46, -47, -48, -49, -50, -51, -52, -53, -54, -55, -56, -57, -58, -59, -60, -61, -62, -63, -64, -65, -66, -67, -68, -69, -70, -71, -72, -73, -74, -75, -76, -77, -78, -79, -80, -81, -82, -83, -84, -85, -86, -87, -88, -89, -90, -91, -92, -93, -94, -95, -96, -97, -98, -99, -100, -101, -102, -103, -104, -105, -106, -107, -108, -109, -110, -111, -112, -113, -114, -115, -116, -117, -118, -69, -70, -71, -72, -73, -74, -75, -76, -77, -78, -79, -80, -81, -82, -83, -84, -85, -86, -37, -38, -39, -40, -41, -42, -43, -44, -45, -46, -47, -48, -49, -50, -51, -52, -53, -54, -55, -56, -57, -58, -59, -60, -61, -62, -63, -64, -65, -66, -67, -68, -69, -20, -21, -22, -23, -24, -25, -26, -27, -28, -29, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39, -40, -41, -42, -43, -44, -45, -46, -47, -48, -49, -50, -51, -52, -53, -54, -55, -56, -57, -58, -59, -60, -61, -62, -63, -64, -65, -66, -67, -68, -69, -70, -71, -72, -73, -74, -75, -76, -77, -78, -79, -80, -81, -82, -83, -84, -85, -86, -87, -88, -89, -90, -91, -92, -93, -94, -95, -96, -97, -98, -99, -100, -101, -102, -103, -104, -105, -106, -107, -108, -109, -110, -111, -112, -113, -114, -115, -116, -117, -118, -119, -120, -121, -122, -123, -124, -125, -126, -127, -128, -129, -130, -131, -132, -133, -134, -135, -136, -137, -138, -139, -140, -141, -142, -143, -144, -145, -146, -147, -148, -149, -150, -151, -152, -103, -104, -105, -106, -107, -108, -109, -110, -111, -112, -113, -114, -115, -116, -117, -118, -119, -120, -121, -122, -123, -124, -125, -126, -127, -128, -129, -130, -131, -132, -133, -134, -135, -136, -137, -138, -139, -140, -141, -142, -143, -144, -145, -146, -147, -148, -149, -150, -151, -152, -153, -154, -155, -156, -157, -158, -159, -160, -161, -162, -163, -164, -165, -166, -167, -168, -169, -170, -171, -172, -173, -174, -175, -176, -177, -178, -179, -180, -181, -182, -183, -184, -185, -186, -187, -188, -189, -190, -191, -192, -143, -144, -145, -146, -147, -148, -149, -150, -151, -152, -153, -154, -155, -156, -157, -158, -159, -160, -161, -162, -163, -164, -165, -166, -167, -168, -169, -170, -171, -172, -173, -174, -175, -176, -177, -178, -179, -180, -181, -182, -183, -184, -185, -186, -137, -138, -139, -140, -91, -92, -93, -94, -95, -96, -97, -98, -99, -100, -101, -102, -103, -104, -105, -106, -107, -108, -59, -60, -61, -62, -63, -64, -65, -66, -67, -68, -69, -70, -71, -72, -73, -74, -75, -76, -77, -78, -79, -80, -81, -82, -83, -84, -85, -86, -37, -38, -39, -40, -41, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -17, -18, -19, -20, -21, -22, -23, -24, -25, -26, -27, -28, -29, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39, -40, -41, -42, -43, -44, -45, -46, -47, 2, 1, 0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -17, -18, -19, -20, -21, -22, -23, -24, -25, -26, -27, -28, -29, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39, -40, -41, -42, -43, -44, -45, -46, -47, -48, -49, -50, -51, -52, -53, -54, -55, -56, -57, -58, -59, -60, -61, -62, -63, -64, -65, -66, -67, -68, -69, -70, -71, -72, -73, -74, -75, -76, -77, -78, -79, -80, -81, -82, -83, -84, -85, -86, -87, -88, -89, -90, -91, -92, -93, -94, -95, -96, -97, -98, -99, -100, -101, -102, -103, -104, -105, -106, -107, -108, -109, -110, -111, -112, -113, -114, -115, -116, -117, -118, -119, -120, -121, -122, -123, -124, -125, -126, -127, -128, -129, -130, -131, -132, -133, -134, -135, -136, -137, -138, -89, -90, -91, -92, -93, -94, -95, -96, -97, -98, -99, -100, -101, -102, -103, -104, -105, -106, -107, -108, -109, -110, -111, -112, -113, -114, -115, -116, -117, -118, -119, -120, -121, -122, -123, -124, -125, -126, -127, -128, -129, -130, -131, -132, -133, -134, -135, -136, -137, -138, -139, -140, -141, -142, -143, -144, -145, -146, -147, -148, -149, -150, -151, -152, -153, -154, -155, -156, -157, -158, -159, -160, -161, -162, -163, -164, -165, -166, -167, -168, -169, -170, -171, -172, -173, -174, -175, -176, -177, -178, -179, -180, -181, -182, -183, -184, -185, -186, -187, -188, -189, -190, -191, -192, -193, -194, -195, -196, -197, -198, -199, -200, -201, -202, -203, -204, -205, -206, -207, -208, -209, -210, -211, -212, -213, -214, -215, -216, -217, -218, -219, -220, -221, -222, -223, -224, -225, -226, -227, -228, -229, -230, -231, -232, -233, -234, -235, -236, -237, -188, -189, -140, -141, -142, -143, -94, -95, -96, -97, -98, -99, -100, -101, -102, -103, -104, -105, -56, -57, -58, -59, -60, -61, -62, -63, -64, -65, -66, -67, -68, -69, -70, -71, -72, -73, -74, -75, -76, -77, -78, -79, -80, -81, -82, -83, -84, -85, -86, -87, -88, -89, -90, -91, -92, -93, -94, -95, -96, -97, -98, -99, -100, -101, -102, -103, -104, -105, -106, -107, -108, -109, -110, -111, -112, -113, -114, -115, -116, -117, -118, -119, -120, -121, -122, -123, -124, -125, -126, -127, -128, -129, -130, -131, -132, -133, -134, -135, -136, -137, -138, -139, -140, -141, -142, -143, -144, -145, -146, -147, -148, -149, -150, -151, -152, -153, -154, -155, -156, -157, -158, -159, -160, -161, -162, -163, -164, -165, -166, -167, -168, -169, -170, -171, -172, -173, -174, -175, -176, -177, -128, -129, -130, -131, -132, -133, -84, -85, -86, -87, -88, -89, -90, -91, -92, -93, -94, -95, -96, -97, -98, -99, -100, -101, -102, -53, -54, -55, -56, -57, -58, -59, -60, -61, -62, -63, -64, -65, -66, -67, -68, -69, -70, -71, -72, -73, -74, -75, -76, -77, -78, -79, -80, -81, -82, -83, -84, -85, -86, -87, -88, -89, -90, -91, -92, -93, -94, -95, -96, -97, -98, -99, -100, -51, -52, -53, -54, -55, -56, -57, -58, -59, -60, -61, -62, -63, -64, -65, -66, -67, -68, -69, -70, -71, -72, -73, -74, -75, -76, -77, -78, -79, -80, -81, -82, -83, -84, -85, -86, -87, -88, -89, -90, -91, -92, -93, -94, -95, -96, -97, -48, -49, -50, -51, -52, -53, -54, -55, -56, -57, -58, -59, -60, -61, -62, -63, -64, -65, -66, -67, -68, -69, -70, -71, -72, -73, -74, -75, -76, -77, -78, -79, -80, -81, -82, -83, -84, -85, -86, -87, -88, -89, -90, -91, -92, -93, -94, -95, -96, -97, -98, -99, -100, -101, -102, -103, -104, -105, -106, -107, -108, -109, -110, -111, -112, -113, -114, -115, -116, -117, -118, -119, -120, -121, -72, -73, -74, -75, -76, -77, -78, -79, -80, -31, -32, -33, -34, -35, -36, -37, -38, -39, -40, -41, -42, -43, -44, -45, -46, -47, -48, -49, -50, -51, -52, -53, -54, -55, -56, -57, -58, -59, -60, -61, -62, -63, -64, -65, -66, -67, -68, -69, -70, -71, -72, -73, -74, -75, -76, -77, -78, -79, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39, -40, -41, -42, -43, -44, -45, -46, -47, -48, -49, -50, -51, -52, -53, -54, -55, -56, -57, -58, -59, -60, -61, -62, -63, -64, -65, -66, -67, -68, -69, -70, -71, -72, -73, -24, -25, -26, -27, -28, -29, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39, -40, -41, -42, -43, -44, -45, -46, -47, -48, -49, -50, -51, -52, -53, -54, -55, -56, -57, -58, -59, -60, -61, -62, -63, -64, -65, -66, -67, -68, -69, -70, -71, -72, -73, -74, -75, -76, -77, -78, -79, -80, -81, -82, -83, -84, -85, -86, -87, -88, -89, -90, -91, -92, -93, -94, -95, -96, -97, -98, -99, -100, -101, -102, -103, -104, -105, -106, -107, -108, -109, -110, -111, -112, -113, -114, -115, -116, -117, -68, -69, -70, -71, -72, -73, -74, -75, -76, -77, -78, -79, -80, -81, -82, -83, -84, -85, -86, -87, -88, -89, -90, -91, -92, -93, -94, -95, -96, -97, -98, -99, -100, -101, -102, -103, -104, -105, -106, -107, -108, -109, -110, -111, -112, -113, -64, -65, -66, -67, -68, -69, -70, -71, -72, -73, -74, -75, -76, -77, -78, -79, -80, -81, -82, -83, -84, -85, -86, -87, -88, -89, -90, -91, -92, -93, -94, -95, -96, -97, -98, -99, -100, -101, -102, -103, -104, -105, -106, -107, -108, -109, -110, -111, -112, -113, -114, -115, -116, -117, -118, -119, -120, -121, -122, -123, -124, -125, -126, -127, -128, -129, -130, -131, -132, -133, -134, -135, -136, -137, -138, -139, -140, -141, -142, -143, -144, -145, -146, -147, -148, -149, -150, -151, -152, -153, -154, -155, -156, -157, -158, -159, -160, -161, -162, -163, -164, -165, -116, -117, -118, -119, -120, -121, -122, -123, -124, -125, -126, -127, -128, -129, -130, -131, -132, -133, -134, -135, -136, -137, -138, -139, -140, -141, -142, -143, -144, -145, -146, -147, -148, -149, -150, -151, -152, -153, -154, -155, -156, -157, -158, -159, -160, -161, -162, -163, -164, -165, -166, -167, -168, -169, -170, -171, -172, -173, -174, -175, -176, -177, -178, -179, -130, -131, -132, -133, -134, -135, -136, -137, -88, -89, -90, -91, -92, -93, -94, -95, -96, -97, -98, -99, -100, -101, -102, -103, -104, -105, -106, -107, -108, -109, -110, -111, -112, -113, -114, -115, -116, -117, -118, -119, -120, -121, -122, -123, -124, -125, -126, -127, -128, -129, -130, -131, -132, -133, -134, -135, -136, -137, -138, -89, -90, -91, -92, -93, -94, -95, -96, -47, -48, -49, -50, -51, -52, -53, -54, -55, -56, -57, -58, -59, -60, -61, -62, -63, -64, -65, -66, -67, -68, -69, -70, -71, -72, -73, -74, -75, -76, -77, -78, -79, -80, -81, -82, -83, -84, -85, -86, -87, -88, -89, -90, -91, -92, -93, -94, -95, -96, -97, -98, -99, -100, -101, -102, -103, -104, -105, -106, -107, -108, -109, -110, -111, -112, -113, -114, -115, -116, -117, -118, -119, -120, -121, -122, -123, -124, -125, -126, -127, -128, -129, -130, -131, -132, -133, -134, -135, -136, -137, -138, -139, -140, -141, -142, -143, -144, -145, -146, -147, -148, -149, -150, -151, -152, -153, -154, -155, -156, -157, -158, -159, -160, -161, -162, -163, -164, -165, -166, -167, -168, -169, -170, -171, -172, -173, -174, -175, -176, -177, -178, -179, -180, -181, -182, -183, -184, -185, -186, -187, -188, -189, -190, -191, -192, -193, -194, -195, -196, -197, -198, -199, -200, -201, -202, -203, -204, -205, -206, -207, -208, -209, -210, -211, -212, -213, -214, -215, -216, -217, -218, -219, -220, -221, -222, -223, -224, -225, -226, -227, -228, -229, -230, -231, -232, -233, -234, -235, -236, -237, -238, -239, -240, -241, -242, -243, -244, -245, -246, -247, -248, -249, -250, -251, -252, -253, -254, -255, -256, -257, -258, -259, -260, -261, -262, -263, -264, -265, -266, -267, -268, -269, -220, -221, -222, -223, -224, -225, -226, -227, -228, -229, -230, -231, -232, -233, -234, -235, -236, -237, -238, -239, -240, -241, -242, -243, -244, -245, -246, -247, -248, -249, -250, -251, -252, -253, -254, -255, -256, -257, -258, -259, -260, -261, -262, -263, -264, -265, -266, -267, -268, -269, -270, -271, -272, -273, -274, -275, -276, -277, -278, -279, -280, -281, -282, -283, -284, -285, -286, -287, -288, -289, -290, -291, -292, -293, -294, -295, -296, -297, -298, -299, -300, -301, -302, -303, -304, -305, -306, -307, -308, -309, -310, -311, -312, -313, -314, -315, -316, -317, -318, -319, -320, -321, -322, -323, -324, -275, -276, -277, -278, -279, -280, -281, -282, -283, -284, -285, -286, -287, -288, -289, -290, -291, -292, -293, -294, -295, -296, -297, -298, -299, -300, -301, -302, -303, -304, -305, -306, -307, -308, -309, -310, -311, -312, -313, -264, -265, -266, -267, -268, -269, -270, -271, -272, -273, -274, -275, -276, -277, -278, -279, -280, -281, -282, -283, -284, -285, -286, -287, -288, -289, -290, -291, -292, -293, -294, -295, -296, -297, -298, -299, -300, -301, -302, -303, -304, -305, -306, -307, -308, -309, -310, -311, -312, -313, -314, -315, -316, -317, -318, -319, -320, -321, -322, -323, -324, -325, -326, -327, -328, -329, -330, -331, -332, -333, -334, -335, -336, -337, -338, -339, -340, -341, -342, -343, -344, -345, -346, -347, -348, -349, -350, -301, -302, -303, -304, -305, -306, -307, -308, -309, -310, -311, -312, -313, -314, -315, -316, -317, -318, -319, -320, -321, -322, -323, -324, -325, -326, -327, -328, -329, -330, -331, -332, -333, -334, -335, -336, -337, -338, -339, -340, -341, -342, -343, -344, -345, -346, -347, -348, -349, -350, -351, -352, -353, -354, -355, -356, -357, -358, -359, -360, -361, -362, -363, -364, -365, -366, -367, -368, -369, -370, -371, -372, -373, -374, -375, -376, -377, -378, -379, -380, -381, -382, -383, -384, -385, -386, -387, -388, -389, -390, -391, -392, -393, -394, -395, -396, -397, -348, -349, -350, -351, -352, -353, -354, -355, -356, -357, -358, -359, -360, -361, -362, -363, -364, -365, -366, -367, -368, -369, -370, -371, -372, -373, -374, -375, -376, -377, -378, -379, -380, -381, -382, -383, -384, -385, -386, -387, -388, -389, -390, -391, -392, -393, -394, -395, -396, -397, -398, -349, -350, -351, -352, -353, -354, -355, -356, -357, -358, -359, -360, -361, -362, -363, -364, -365, -366, -367, -368, -369, -370, -371, -372, -373, -374, -375, -376, -377, -378, -379, -380, -381, -382, -383, -384, -385, -386, -387, -388, -389, -390, -391, -392, -393, -394, -395, -396, -397, -398, -399, -400, -401, -402, -403, -404, -405, -406, -407, -408, -409, -410, -411, -412, -413, -414, -415, -416, -417, -418, -419, -420, -421, -422, -423, -424, -425, -426, -427, -378, -379, -380, -381, -382, -383, -384, -385, -386, -387, -388, -389, -390, -391, -392, -393, -394, -395, -396, -397, -398, -399, -400, -401, -402, -403, -404, -405, -406, -407, -408, -409, -410, -411, -412, -413, -414, -415, -416, -417, -418, -419, -420, -421, -422, -423, -424, -425, -426, -427, -428, -429, -430, -431, -432, -433, -434, -435, -436, -437, -438, -439, -440, -441, -442, -443, -444, -445, -446, -447, -448, -449, -450, -451, -452, -453, -454, -455, -456, -457, -458, -459, -460, -461, -462, -463, -464, -465, -466, -467, -468, -469, -470, -471, -472, -473, -474, -475, -476, -477, -478, -479, -480, -481, -482, -483, -484, -485, -486, -487, -488, -489, -490, -491, -492, -493, -494, -495, -496, -497, -498, -499, -500, -501, -502, -503, -504, -505, -506, -507, -508, -509, -510, -511, -512, -513, -514, -515, -516, -517, -518, -519, -470, -471, -472, -473, -474, -475, -476, -477, -478, -479, -480, -481, -482, -483, -484, -485, -486, -487, -488, -489, -490, -491, -492, -493, -494, -495, -496, -497, -498, -499, -500, -501, -502, -503, -504, -505, -506, -507, -508, -509, -510, -511, -512, -513, -514, -515, -516, -517, -518, -519, -520, -521, -522, -523, -524, -525, -526, -527, -528, -529, -530, -531, -532, -533, -534, -535, -536, -537, -538, -539, -540, -541, -542, -543, -544, -545, -546, -547, -548, -549, -550, -551, -552, -553, -554, -505, -506, -507, -508, -509, -510, -511, -512, -513, -514, -515, -516, -517, -518, -519, -520, -521, -522, -523, -524, -525, -526, -527, -528, -529, -530, -531, -482, -483, -484, -485, -486, -487, -488, -489, -490, -491, -492, -493, -494, -495, -496, -497, -498, -499, -500, -501, -502, -503, -504, -505, -506, -507, -508, -509, -510, -511, -512, -513, -514, -515, -516, -517, -518, -519, -520, -521, -522, -523, -524, -525, -526, -527, -528, -529, -530, -531, -532, -533, -534, -535, -536, -537, -538, -539, -540, -541, -542, -543, -544, -545, -546, -547, -548, -549, -550, -501, -502, -503, -504, -505, -506, -507, -508, -509, -510, -511, -512, -513, -514, -515, -516, -517, -518, -519, -520, -521, -522, -523, -524, -525, -526, -527, -528, -529, -530, -531, -532, -533, -534, -535, -536, -537, -538, -539, -540, -541, -542, -543, -544, -545, -546, -547, -548, -549, -550, -551, -552, -553, -554, -555, -556, -557, -558, -559, -560, -561, -562, -563, -564, -565, -566, -567, -568, -569, -570, -571, -572, -573, -524, -525, -526, -527, -528, -529, -530, -531, -532, -533, -534, -535, -536, -537, -538, -539, -540, -541, -542, -543, -544, -545, -546, -547, -548, -549, -550, -551, -552, -553, -554, -555, -556, -557, -558, -559, -560, -561, -562, -563, -564, -565, -566, -567, -568, -569, -570, -571, -572, -573, -574, -575, -576, -577, -578, -579, -580, -581, -582, -583, -584, -585, -586, -587, -588, -589, -590, -591, -592, -593, -594, -595, -596, -597, -598, -599, -600, -601, -602, -603, -604, -605, -606, -607, -608, -609, -610, -611, -612, -613, -614, -615, -616, -617, -618, -619, -620, -621, -622, -623, -624, -625, -626, -627, -628, -629, -630, -631, -632, -633, -634, -635, -636, -637, -638, -639, -640, -641, -592, -593, -594, -595, -596, -597, -598, -599, -600, -601, -602, -603, -604, -605, -606, -607, -608, -609, -610, -611, -612, -613, -614, -615, -616, -617, -618, -619, -620, -621, -622, -623, -624, -625, -626, -627, -628, -629, -630, -631, -632, -633, -634, -635, -636, -637, -638, -639, -640, -641, -642, -643, -644, -645, -646, -647, -648, -649, -650, -651, -652, -653, -654, -655, -656, -657, -658, -659, -660, -661, -662, -663, -664, -665, -666, -667, -668, -669, -670, -671, -672, -673, -674, -675, -676, -677, -678, -679, -680, -681, -682, -683, -684, -685, -686, -687, -688, -689, -690, -691, -692, -693, -694, -695, -696, -697, -698, -699, -700, -701, -702, -703, -704, -705, -706, -707, -708, -709, -710, -711, -712, -713, -714, -715, -716, -717, -718, -719, -720, -721, -722, -723, -724, -725, -726, -727, -728, -729, -730, -731, -732, -733, -734, -735, -736, -737, -738, -739, -740, -741, -742, -743, -744, -745, -746, -747, -748, -749, -750, -751, -752, -753, -754, -755, -756, -757, -758, -759, -760, -761, -762, -763, -764, -765, -766, -767, -768, -769, -770, -771, -772, -773, -774, -775, -776, -777, -778, -779, -780, -781, -782, -783, -784, -785, -786, -737, -738, -739, -740, -741, -742, -743, -744, -745, -746, -747, -748, -749, -750, -751, -752, -753, -754, -755, -756, -757, -758, -759, -760, -761, -762, -763, -764, -765, -766, -767, -768, -769, -770, -771, -772, -773, -774, -775, -776, -777, -778, -779, -730, -731, -732, -733, -734, -735, -736, -737, -738, -739, -740, -741, -742, -743, -744, -745, -746, -747, -748, -749, -750, -751, -752, -753, -754, -755, -756, -757, -758, -759, -760, -761, -762, -763, -764, -765, -766, -767, -768, -769, -720, -721, -722, -723, -724, -725, -726, -727, -728, -729, -730, -731, -732, -733, -734, -735, -736, -737, -738, -739, -740, -741, -742, -743, -744, -745, -746, -747, -748, -749, -750, -751, -752, -753, -754, -755, -756, -757, -758, -759, -760, -761, -762, -763, -764, -765, -766, -767, -768, -769, -770, -771, -772, -773, -774, -775, -776, -777, -778, -779, -780, -781, -782, -783, -784, -785, -786, -787, -788, -789, -790, -791, -792, -793, -794, -795, -796, -797, -798, -799, -800, -801, -802, -803, -804, -805, -806, -807, -808, -809, -810, -811, -812, -813, -814, -815, -816, -817, -818, -819, -820, -771, -772, -773, -774, -775, -776, -777, -778, -779, -780, -781, -782, -783, -784, -785, -786, -787, -788, -789, -790, -791, -792, -793, -794, -795, -796, -797, -798, -799, -800, -801, -802, -803, -804, -805, -806, -807, -808, -809, -810, -811, -812, -813, -814, -815, -816, -817, -818, -819, -820, -821, -822, -823, -824, -825, -826, -827, -828, -829, -830, -831, -832, -833, -834, -835, -836, -837, -838, -839, -840, -841, -842, -843, -844, -845, -846, -847, -848, -849, -850, -851, -852, -853, -854, -855, -856, -857, -858, -859, -860, -861, -862, -863, -864, -865, -866, -867, -868, -869, -870, -871, -872, -873, -874, -875, -876, -877, -878, -879, -880, -831, -832, -833, -834, -835, -836, -837, -838, -839, -840, -841, -842, -843, -844, -845, -846, -847, -848, -849, -850, -851, -852, -853, -854, -855, -856, -857, -858, -859, -860, -861, -812, -813, -764, -765, -766, -767, -768, -769, -770, -771, -772, -773, -774, -775, -776, -777, -778, -779, -780, -781, -782, -783, -784, -785, -786, -787, -788, -789, -790, -791, -792, -793, -794, -795, -796, -797, -798, -799, -800, -801, -802, -803, -804, -805, -806, -807, -808, -809, -810, -811, -812, -813, -814, -815, -816, -817, -818, -819, -820, -821, -822, -823, -824, -825, -826, -827, -828, -829, -830, -831, -832, -833, -834, -835, -836, -837, -838, -839, -840, -841, -842, -843, -844, -845, -846, -847, -848, -849, -850, -851, -852, -853, -854, -855, -856, -857, -858, -859, -860, -861, -862, -863, -864, -865, -866, -867, -868, -869, -870, -871, -872, -873, -874, -875, -876, -877, -828, -829, -830, -831, -832, -833, -834, -835, -836, -837, -838, -839, -840, -841, -842, -843, -844, -845, -846, -847, -848, -849, -850, -851, -852, -853, -854, -855, -856, -857, -858, -859, -860, -861, -862, -863, -864, -865, -866, -867, -868, -869, -870, -871, -872, -873, -874, -875, -876, -827, -828, -829, -830, -831, -832, -833, -834, -835, -836, -837, -838, -839, -840, -841, -842, -843, -844, -845, -796, -797, -798, -799, -800, -801, -802, -803, -754, -755, -756, -757, -758, -759, -760, -761, -762, -763, -764, -765, -766, -767, -768, -769, -770, -771, -772, -773, -774, -775, -776, -727, -728, -729, -730, -731, -732, -733, -734, -735, -736, -737, -738, -739, -740, -741, -742, -743, -744, -745, -746, -747, -748, -749, -750, -751, -752, -753, -754, -755, -756, -757, -758, -759, -760, -761, -762, -763, -764, -765, -766, -767, -768, -769, -770, -771, -772, -773, -774, -775, -776, -777, -778, -779, -780, -781, -782, -783, -784, -785, -786, -787, -788, -789, -790, -791, -792, -793, -794, -795, -796, -797, -798, -799, -800, -801, -802, -803, -804, -805, -806, -807, -808, -759, -760, -761, -762, -763, -764, -765, -766, -767, -768, -769, -770, -771, -772, -773, -774, -775, -776, -777, -778, -779, -780, -781, -782, -783, -784, -785, -786, -787, -788, -789, -790, -791, -792, -793, -794, -795, -796, -797, -798, -799, -800, -801, -802, -803, -804, -805, -756, -757, -758, -759, -760, -761, -762, -763, -764, -765, -766, -767, -768, -769, -770, -771, -772, -773, -774, -725, -726, -727, -728, -729, -730, -731, -732, -683, -684, -685, -636, -637, -638, -639, -640, -641, -642, -643, -644, -645, -646, -647, -648, -649, -650, -651, -652, -653, -654, -655, -656, -657, -658, -659, -660, -661, -662, -663, -664, -665, -666, -667, -668, -669, -670, -671, -672, -673, -674, -675, -676, -677, -678, -679, -680, -681, -682, -683, -684, -685, -686, -637, -638, -639, -640, -641, -642, -643, -644, -645, -646, -647, -648, -649, -650, -651, -652, -653, -654, -655, -656, -657, -658, -659, -660, -661, -662, -663, -664, -665, -666, -667, -618, -619, -620, -621, -622, -623, -624, -625, -626, -627, -628, -629, -630, -631, -632, -633, -634, -635, -636, -637, -638, -589, -590, -591, -592, -593, -594, -595, -596, -597, -598, -599, -600, -601, -602, -603, -604, -605, -606, -607, -608, -609, -610, -611, -612, -613, -614, -615, -616, -617, -618, -619, -620, -621, -622, -623, -624, -625, -626, -627, -628, -629, -630, -631, -632, -633, -634, -635, -636, -637, -638, -639, -640, -641, -642, -643, -644, -645, -646, -647, -648, -649, -650, -651, -652, -653, -654, -655, -656, -657, -658, -659, -660, -661, -662, -663, -664, -665, -666, -667, -668, -669, -670, -671, -672, -673, -674, -675, -676, -677, -678, -679, -680, -681, -682, -683, -684, -685, -686, -687, -688, -689, -690, -691, -692, -693, -694, -695, -696, -697, -698, -699, -700, -701, -702, -703, -704, -655, -656, -657, -658, -659, -660, -661, -662, -663, -664, -665, -616, -617, -618, -619, -620, -621, -622, -623, -624, -625, -626, -627, -628, -629, -630, -631, -632, -633, -634, -635, -636, -637, -638, -639, -640, -641, -642, -643, -644, -645, -646, -647, -648, -649, -650, -651, -652, -653, -654, -655, -656, -657, -658, -659, -660, -661, -612, -613, -614, -615, -616, -617, -618, -619, -620, -621, -622, -623, -624, -625, -626, -627, -628, -629, -630, -631, -632, -633, -634, -635, -636, -637, -638, -639, -640, -641, -642, -643, -644, -645, -646, -647, -648, -649, -650, -651, -652, -653, -604, -605, -606, -607, -608, -609, -610, -611, -612, -613, -614, -615, -616, -617, -618, -619, -620, -621, -622, -623, -624, -625, -626, -627, -628, -629, -630, -631, -632, -633, -634, -635, -636, -637, -588, -589, -590, -591, -592, -593, -594, -595, -596, -597, -598, -599, -600, -601, -602, -603, -604, -605, -606, -607, -608, -609, -610, -611, -612, -613, -614, -615, -616, -617, -618, -619, -620, -621, -622, -623, -624, -625, -626, -627, -628, -629, -630, -631, -632, -633, -634, -635, -636, -637, -638, -639, -640, -641, -642, -643, -644, -645, -646, -647, -648, -649, -650, -651, -652, -653, -604, -605, -606, -607, -608, -609, -610, -611, -562, -563, -564, -565, -566, -567, -568, -569, -570, -571, -572, -573, -574, -575, -576, -577, -578, -579, -580, -581, -582, -583, -584, -585, -586, -587, -588, -589, -590, -591, -592, -593, -594, -595, -596, -597, -598, -599, -550, -551, -552, -553, -554, -555, -556, -557, -558, -559, -560, -561, -562, -563, -564, -565, -566, -567, -568, -519, -520, -521, -522, -523, -524, -525, -526, -527, -528, -529, -530, -531, -532, -533, -534, -535, -536, -487, -488, -489, -490, -491, -492, -493, -494, -495, -496, -497, -498, -499, -500, -501, -502, -503, -504, -505, -506, -507, -508, -509, -510, -511, -512, -513, -514, -515, -516, -517, -518, -519, -520, -521, -522, -523, -524, -525, -526, -527, -528, -529, -530, -531, -532, -533, -534, -535, -536, -537, -538, -539, -540, -541, -542, -543, -544, -545, -546, -547, -548, -549, -550, -551, -552, -553, -554, -555, -556, -557, -558, -559, -560, -561, -562, -563, -564, -565, -566, -567, -568, -569, -570, -571, -572, -573, -524, -525, -526, -527, -528, -529, -530, -531, -532, -533, -534, -535, -536, -537, -538, -539, -540, -541, -542, -543, -544, -545, -546, -547, -548, -499, -500, -501, -502, -503, -504, -505, -506, -507, -508, -509, -510, -511, -512, -513, -514, -515, -516, -517, -518, -519, -520, -521, -522, -523, -474, -475, -476, -477, -478, -479, -480, -481, -482, -483, -484, -485, -436, -437, -438, -439, -440, -441, -442, -443, -444, -445, -446, -447, -448, -449, -450, -451, -452, -453, -454, -455, -456, -457, -458, -409, -410, -411, -412, -413, -414, -415, -416, -417, -418, -419, -420, -421, -422, -423, -424, -425, -376, -377, -378, -379, -380, -381, -382, -383, -384, -385, -386, -387, -388, -389, -390, -391, -392, -393, -394, -395, -396, -397, -398, -399, -350, -351, -352, -353, -354, -355, -356, -357, -358, -359, -360, -361, -362, -363, -364, -365, -366, -367, -368, -369, -370, -371, -372, -373, -374, -375, -376, -377, -378, -379, -380, -381, -382, -383, -384, -385, -386, -387, -388, -389, -390, -391, -392, -393, -394, -395, -396, -397, -398, -399, -400, -401, -402, -403, -404, -405, -406, -407, -408, -409, -410, -411, -412, -413, -414, -415, -416, -417, -368, -369, -370, -371, -372, -373, -374, -375, -376, -377, -378, -379, -380, -381, -382, -383, -384, -385, -386, -387, -388, -389, -390, -391, -392, -393, -394, -395, -396, -397, -398, -399, -400, -401, -402, -403, -404, -405, -406, -407, -408, -409, -410, -411, -412, -413, -414, -415, -416, -417, -418, -419, -420, -421, -422, -423, -424, -425, -426, -427, -428, -429, -430, -431, -432, -433, -434, -435, -436, -437, -438, -439, -440, -441, -442, -443, -444, -445, -446, -447, -448, -449, -450, -451, -452, -453, -454, -455, -456, -457, -458, -409, -410, -411, -412, -413, -414, -415, -416, -417, -418, -419, -420, -421, -422, -423, -424, -425, -426, -427, -428, -429, -430, -431, -432, -433, -434, -435, -436, -437, -438, -439, -440, -441, -442, -443, -444, -445, -446, -447, -448, -449, -450, -451, -452, -453, -454, -455, -456, -457, -458, -459, -460, -461, -462, -463, -464, -465, -466, -467, -468, -469, -470, -471, -472, -473, -474, -475, -476, -477, -478, -479, -480, -481, -432, -433, -434, -435, -436, -437, -438, -439, -440, -441, -442, -443, -444, -445, -446, -447, -448, -449, -450, -451, -452, -453, -454, -455, -456, -457, -458, -459, -460, -461, -462, -463, -464, -465, -466, -467, -468, -469, -470, -471, -472, -473, -474, -475, -476, -477, -478, -479, -480, -481, -482, -483, -484, -485, -486, -437, -438, -439, -440, -441, -442, -443, -444, -445, -446, -447, -448, -449, -450, -451, -452, -453, -454, -455, -456, -457, -458, -459, -460, -461, -462, -463, -464, -465, -466, -467, -468, -469, -470, -471, -472, -473, -474, -475, -476, -477, -478, -479, -480, -481, -482, -483, -484, -485, -486, -487, -488, -489, -490, -491, -492, -493, -494, -495, -496, -497, -498, -499, -500, -501, -452, -453, -454, -455, -456, -457, -458, -459, -460, -461, -462, -463, -464, -465, -466, -467, -468, -469, -470, -471, -472, -473, -474, -475, -476, -477, -478, -479, -480, -481, -482, -483, -484, -485, -486, -487, -488, -489, -490, -491, -492, -493, -494, -495, -496, -497, -498, -499, -500, -501, -502, -503, -504, -505, -506, -507, -458, -459, -460, -461, -462, -463, -464, -465, -466, -467, -468, -469, -470, -471, -472, -473, -474, -475, -476, -477, -478, -479, -480, -481, -482, -483, -484, -485, -486, -487, -488, -489, -490, -491, -492, -493, -494, -495, -496, -497, -498, -499, -500, -501, -502, -503, -504, -505, -506, -507, -508, -509, -510, -511, -512, -513, -514, -515, -516, -517, -518, -519, -520, -521, -522, -523, -524, -525, -526, -477, -478, -479, -480, -481, -482, -483, -484, -485, -486, -487, -488, -489, -490, -491, -492, -493, -444, -445, -446, -447, -448, -449, -450, -451, -452, -453, -454, -455, -456, -457, -458, -459, -460, -461, -462, -463, -464, -465, -466, -467, -468, -469, -470, -471, -472, -473, -474, -475, -476, -477, -478, -479, -480, -481, -482, -483, -484, -485, -486, -487, -488, -489, -490, -491, -492, -493, -494, -495, -496, -497, -498, -499, -500, -501, -502, -503, -504, -505, -506, -507, -508, -509, -510, -511, -512, -463, -464, -465, -466, -467, -468, -469, -470, -471, -472, -473, -474, -475, -476, -477, -478, -479, -480, -481, -482, -483, -484, -485, -486, -487, -488, -489, -490, -491, -492, -493, -494, -495, -496, -497, -498, -499, -500, -501, -502, -503, -504, -505, -506, -507, -508, -509, -510, -511, -512, -513, -514, -515, -516, -517, -518, -519, -520, -521, -472, -473, -474, -475, -426, -427, -428, -429, -430, -431, -432, -433, -434, -435, -436, -437, -438, -439, -440, -441, -442, -443, -444, -445, -446, -447, -448, -449, -450, -451, -452, -453, -454, -455, -456, -457, -458, -459, -460, -461, -462, -463, -464, -465, -466, -467, -468, -469, -470, -471, -472, -473, -474, -475, -476, -477, -478, -479, -480, -481, -482, -483, -484, -485, -486, -487, -488, -489, -490, -491, -492, -493, -494, -495, -496, -497, -498, -499, -500, -501, -502, -503, -504, -505, -506, -507, -508, -509, -510, -511, -512, -513, -514, -515, -516, -517, -518, -519, -520, -521, -522, -523, -524, -525, -526, -527, -528, -529, -530, -531, -532, -533, -534, -535, -536, -537, -538, -539, -540, -541, -542, -543, -544, -545, -546, -547, -548, -549, -550, -551, -552, -553, -554, -555, -556, -557, -558, -559, -510, -511, -512, -513, -514, -515, -516, -517, -518, -519, -520, -521, -522, -523, -524, -525, -526, -527, -528, -529, -480, -481, -482, -483, -484, -485, -486, -487, -488, -489, -490, -491, -492, -493, -494, -495, -496, -497, -448, -449, -450, -451, -452, -453, -454, -455, -456, -407, -408, -409, -410, -411, -412, -413, -414, -415, -416, -417, -418, -419, -420, -421, -422, -423, -424, -425, -426, -427, -428, -429, -430, -431, -432, -433, -434, -435, -436, -437, -438, -439, -440, -441, -442, -443, -444, -445, -446, -447, -448, -449, -450, -451, -452, -453, -454, -455, -456, -457, -458, -459, -460, -461, -462, -463, -464, -465, -466, -467, -468, -469, -470, -471, -472, -473, -474, -475, -476, -477, -478, -479, -480, -481, -482, -483, -484, -485, -486, -487, -488, -489, -490, -491, -492, -493, -494, -495, -496, -497, -498, -499, -500, -501, -502, -503, -504, -505, -506, -507, -508, -509, -510, -511, -512, -513, -514, -515, -516, -517, -518, -519, -520, -521, -522, -523, -524, -525, -526, -527, -528, -529, -530, -531, -532, -533, -534, -535, -536, -537, -538, -489, -490, -491, -492, -493, -494, -495, -496, -497, -498, -499, -500, -501, -502, -503, -504, -505, -506, -507, -508, -509, -510, -511, -512, -513, -514, -515, -516, -517, -518, -519, -520, -521, -522, -523, -524, -525, -526, -527, -528, -529, -530, -531, -532, -533, -534, -535, -536, -537, -538, -539, -540, -541, -542, -543, -544, -545, -546, -547, -548, -549, -550, -551, -552, -553, -554, -555, -556, -557, -558, -559, -560, -561, -562, -563, -564, -565, -566, -567, -568, -569, -570, -571, -572, -573, -574, -575, -576, -577, -578, -579, -580, -581, -582, -583, -584, -585, -586, -587, -588, -589, -590, -591, -592, -593, -594, -595, -596, -597, -598, -599, -600, -601, -602, -603, -604, -605, -606, -607, -608, -609, -610, -611, -612, -613, -614, -615, -616, -617, -618, -619, -620, -621, -622, -623, -624, -625, -626, -627, -628, -629, -630, -631, -632, -633, -634, -635, -586, -587, -538, -539, -540, -541, -542, -543, -544, -545, -546, -547, -548, -549, -550, -551, -552, -553, -554, -555, -556, -557, -558, -559, -560, -561, -562, -563, -564, -565, -566, -567, -568, -569, -570, -571, -572, -573, -574, -575, -576, -577, -578, -579, -580, -581, -582, -583, -584, -585, -586, -587, -588, -589, -590, -591, -592, -593, -594, -595, -596, -597, -598, -599, -600, -601, -602, -603, -604, -605, -606, -607, -608, -609, -610, -611, -612, -613, -614, -615, -616, -617, -618, -619, -620, -621, -622, -623, -624, -625, -626, -627, -628, -629, -630, -631, -632, -633, -634, -635, -636, -637, -638, -639, -640, -641, -642, -643, -644, -645, -646, -647, -648, -649, -650, -651, -652, -653, -654, -655, -656, -657, -658, -659, -660, -661, -662, -663, -664, -665, -666, -667, -668, -669, -670, -671, -672, -673, -674, -675, -676, -677, -678, -679, -680, -681, -682, -683, -684, -685, -686, -687, -688, -689, -690, -691, -692, -693, -694, -695, -696, -697, -698, -699, -700, -701, -702, -703, -704, -705, -706, -707, -708, -709, -660, -661, -662, -663, -664, -665, -666, -667, -668, -669, -670, -671, -622, -623, -624, -625, -626, -627, -628, -629, -630, -631, -632, -633, -634, -635, -636, -637, -638, -639, -640, -641, -642, -643, -644, -645, -646, -647, -648, -649, -650, -651, -652, -653, -654, -655, -656, -657, -658, -659, -660, -661, -662, -663, -664, -665, -666, -667, -668, -669, -670, -671, -672, -673, -674, -675, -676, -677, -678, -679, -680, -681, -682, -683, -684, -685, -686, -687, -688, -689, -690, -691, -692, -693, -694, -695, -696, -697, -698, -699, -700, -701, -702, -703, -704, -705, -706, -707, -708, -709, -710, -711, -712, -713, -714, -715, -716, -717, -718, -719, -720, -721, -722, -723, -674, -675, -676, -677, -678, -679, -680, -681, -682, -683, -684, -685, -686, -687, -688, -689, -690, -691, -692, -693, -694, -695, -696, -697, -698, -699, -700, -701, -702, -703, -704, -705, -706, -707, -708, -709, -710, -711, -712, -713, -714, -715, -716, -717, -718, -719, -720, -721, -722, -723, -724, -725, -726, -727, -728, -729, -730, -731, -732, -733, -734, -735, -736, -737, -738, -739, -740, -741, -742, -743, -744, -745, -746, -747, -748, -749, -750, -751, -752, -753, -754, -755, -756, -757, -758, -759, -760, -761, -762, -763, -764, -765, -766, -767, -768, -769, -770, -771, -772, -773, -774, -775, -776, -777, -778, -779, -780, -781, -782, -783, -784, -785, -786, -787, -788, -789, -790, -791, -792, -793, -794, -795, -796, -797, -798, -799, -800, -801, -802, -803, -804, -805, -806, -807, -808, -809, -810, -811, -812, -813, -814, -815, -816, -817, -818, -819, -820, -821, -822, -823, -824, -825, -826, -827, -828, -829, -830, -831, -832, -833, -834, -835, -836, -837, -838, -839, -840, -841, -842, -843, -844, -845, -846, -847, -848, -849, -850, -851, -852, -853, -854, -855, -856, -857, -858, -859, -860, -861, -862, -863, -864, -865, -866, -867, -868, -869, -870, -871, -872, -873, -874, -875, -876, -877, -878, -879, -880, -881, -882, -883, -884, -885, -886, -887, -888, -889, -890, -891, -892, -893, -894, -895, -896, -897, -898, -899, -900, -901, -902, -903, -904, -905, -906, -907, -908, -909, -910, -911, -912, -863, -864, -865, -866, -817, -818, -819, -820, -821, -822, -823, -824, -825, -826, -827, -828, -829, -830, -831, -832, -833, -834, -835, -836, -837, -838, -789, -790, -791, -792, -793, -794, -795, -796, -797, -798, -799, -800, -801, -802, -803, -804, -805, -806, -807, -758, -759, -760, -761, -762, -763, -764, -765, -766, -767, -768, -769, -770, -771, -772, -773, -724, -725, -726, -727, -728, -729, -730, -731, -732, -733, -734, -735, -736, -737, -738, -739, -740, -741, -742, -743, -744, -745, -746, -747, -748, -749, -750, -751, -752, -753, -754, -755, -756, -757, -758, -759, -760, -761, -762, -763, -764, -765, -766, -767, -768, -769, -770, -771, -772, -773, -774, -775, -776, -777, -778, -779, -780, -781, -782, -783, -784, -785, -786, -787, -788, -789, -790, -791, -792, -793, -794, -795, -796, -797, -798, -799, -800, -751, -752, -753, -754, -755, -756, -757, -758, -759, -760, -761, -762, -763, -764, -765, -766, -767, -768, -769, -770, -771, -772, -773, -774, -775, -776, -777, -778, -779, -780, -781, -782, -783, -784, -785, -786, -787, -788, -789, -790, -791, -792, -793, -794, -795, -796, -797, -798, -799, -800, -801, -802, -803, -804, -805, -806, -807, -808, -809, -810, -811, -812, -813, -814, -815, -816, -817, -768, -769, -770, -771, -772, -773, -774, -775, -776, -777, -778, -779, -780, -781, -782, -783, -784, -785, -786, -787, -788, -789, -790, -791, -792, -793, -794, -795, -796, -797, -798, -799, -800, -801, -802, -753, -754, -755, -756, -757, -758, -759, -760, -761, -762, -763, -764, -765, -766, -767, -768, -769, -770, -771, -772, -773, -774, -775, -776, -777, -778, -779, -780, -781, -782, -783, -784, -785, -786, -787, -788, -789, -790, -791, -792, -793, -794, -795, -796, -797, -798, -799, -800, -801, -802, -803, -804, -805, -806, -807, -808, -809, -810, -811, -812, -813, -814, -815, -816, -817, -818, -819, -820, -821, -822, -823, -824, -825, -826, -827, -828, -829, -830, -831, -832, -833, -834, -835, -836, -837, -838, -839, -840, -841, -842, -843, -844, -845, -846, -847, -848, -849, -850, -851, -852, -853, -854, -855, -856, -857, -808, -809, -810, -811, -812, -813, -814, -815, -816, -817, -818, -819, -820, -821, -822, -823, -824, -825, -826, -827, -828, -829, -830, -831, -832, -833, -834, -835, -836, -837, -838, -839, -840, -841, -842, -843, -844, -845, -796, -797, -798, -799, -800, -801, -802, -803, -804, -805, -806, -807, -808, -809, -810, -811, -812, -813, -814, -815, -816, -817, -818, -819, -820, -821, -822, -823, -824, -825, -826, -827, -828, -829, -830, -831, -832, -833, -834, -835, -836, -837, -838, -839, -840, -791, -792, -793, -794, -795, -796, -797, -798, -799, -800, -801, -802, -803, -804, -805, -806, -807, -808, -809, -810, -811, -812, -813, -814, -815, -816, -817, -818, -819, -820, -821, -822, -823, -824, -825, -826, -827, -828, -829, -830, -831, -832, -833, -834, -835, -836, -837, -838, -839, -840, -841, -842, -843, -844, -845, -846, -847, -848, -849, -850, -851, -852, -853, -854, -855, -856, -857, -858, -859, -860, -861, -862, -863, -864, -865, -866, -867, -868, -869, -870, -871, -872, -873, -874, -875, -876, -877, -878, -829, -830, -831, -832, -833, -834, -835, -836, -787, -788, -789, -790, -791, -792, -793, -794, -795, -796, -797, -798, -799, -800, -801, -802, -803, -754, -755, -756, -757, -758, -759, -760, -761, -762, -763, -764, -765, -766, -767, -768, -769, -770, -771, -772, -773, -774, -775, -776, -777, -778, -779, -780, -781, -782, -783, -784, -785, -786, -787, -788, -789, -790, -791, -792, -793, -794, -795, -796, -797, -798, -799, -800, -801, -802, -803, -804, -805, -806, -807, -808, -809, -810, -811, -812, -813, -814, -815, -816, -817, -818, -819, -820, -821, -822, -823, -824, -825, -826, -827, -828, -829, -830, -831, -832, -833, -834, -835, -836, -787, -788, -789, -790, -791, -792, -793, -794, -795, -796, -797, -798, -799, -800, -801, -802, -803, -804, -805, -806, -807, -808, -809, -810, -811, -812, -813, -814, -815, -816, -817, -818, -819, -820, -821, -822, -823, -824, -825, -826, -827, -828, -829, -830, -831, -832, -833, -834, -835, -836, -837, -838, -839, -840, -841, -842, -843, -844, -845, -846, -847, -848, -849, -850, -851, -852, -853, -854, -855, -856, -857, -858, -859, -860, -861, -862, -863, -864, -865, -866, -867, -868, -869, -870, -871, -872, -873, -874, -875, -876, -877, -878, -879, -880, -881, -882, -883, -884, -885, -886, -887, -888, -889, -890, -891, -892, -893, -894, -895, -896, -897, -898, -899, -900, -901, -902, -903, -904, -905, -906, -907, -908, -909, -910, -911, -912, -913, -914, -915, -916, -917, -918, -919, -920, -921, -922, -923, -924, -925, -926, -927, -928, -929, -930, -931, -932, -933, -934, -935, -936, -937, -938, -939, -940, -941, -942, -943, -944, -945, -946, -947, -898, -899, -900, -901, -902, -903, -904, -905, -906, -907, -908, -909, -910, -911, -912, -913, -914, -915, -916, -917, -918, -919, -920, -921, -922, -923, -924, -875, -876, -877, -878, -879, -880, -881, -882, -883, -884, -885, -886, -887, -888, -889, -890, -891, -892, -893, -894, -895, -896, -897, -898, -899, -900, -901, -902, -903, -904, -905, -906, -907, -908, -909, -910, -911, -912, -913, -914, -915, -916, -917, -918, -919, -920, -921, -922, -923, -924, -925, -926, -927, -928, -929, -930, -931, -932, -933, -934, -935, -936, -937, -938, -939, -940, -941, -942, -943, -944, -945, -946, -947, -948, -949, -950, -951, -952, -953, -954, -955, -956, -957, -958, -959, -960, -961, -962, -963, -964, -965, -966, -967, -968, -969, -970, -971, -972, -973, -974, -975, -976, -977, -978, -979, -980, -981, -982, -983, -984, -985, -986, -987, -988, -939, -940, -941, -942, -943, -944, -945, -946, -947, -948, -949, -950, -951, -952, -953, -954, -955, -956, -957, -958, -959, -960, -961, -962, -963, -914, -915, -916, -917, -918, -919, -920, -921, -922, -923, -924, -925, -926, -927, -928, -929, -930, -931, -932, -933, -934, -935, -936, -937, -938, -939, -940, -941, -942, -943, -944, -945, -946, -947, -948, -949, -950, -951, -952, -953, -954, -955, -956, -957, -958, -959, -960, -961, -962, -963, -964, -965, -966, -967, -968, -969, -970, -971, -972, -973, -974, -975, -976, -977, -978, -979, -980, -981, -982, -983, -984, -985, -986, -987, -988, -989, -990, -941, -942, -943, -944, -945, -946, -947, -948, -949, -950, -951, -952, -953, -954, -955, -956, -957, -958, -959, -960, -961, -962, -963, -964, -965, -966, -967, -968, -969, -970, -971, -972, -973, -974, -975, -976, -977, -978, -979, -980, -981, -982, -983, -984, -985, -986, -987, -988, -989, -990, -991, -992, -993, -994, -995, -996, -997, -998, -999, -1000, -1001, -1002, -1003, -1004, -1005, -1006, -1007, -1008, -1009, -1010, -1011, -1012, -1013, -1014, -1015, -1016, -1017, -1018, -1019, -1020, -1021, -1022, -1023, -1024, -1025, -1026, -1027, -1028, -1029, -1030, -1031, -1032, -1033, -1034, -1035, -1036, -1037, -1038, -1039, -1040, -1041, -1042, -1043, -1044, -1045, -1046, -1047, -1048, -1049, -1050, -1051, -1052, -1053, -1054, -1055, -1056, -1057, -1058, -1059, -1060, -1061, -1062, -1063, -1064, -1065, -1066, -1067, -1068, -1069, -1070, -1071, -1072, -1073, -1074, -1075, -1076, -1077, -1078, -1079, -1080, -1081, -1082, -1083, -1084, -1085, -1086, -1087, -1088, -1089, -1090, -1091, -1092, -1093, -1094, -1095, -1096, -1097, -1098, -1099, -1100, -1101, -1102, -1103, -1104, -1105, -1106, -1107, -1108, -1109, -1110, -1111, -1112, -1113, -1114, -1115, -1116, -1117, -1118, -1119, -1120, -1121, -1122, -1123, -1124, -1125, -1126, -1127, -1128, -1129, -1130, -1131, -1132, -1133, -1134, -1135, -1136, -1137, -1138, -1139, -1140, -1141, -1142, -1143, -1144, -1145, -1146, -1147, -1148, -1149, -1150, -1151, -1152, -1153, -1154, -1155, -1156, -1157, -1158, -1159, -1160, -1161, -1162, -1163, -1164, -1165, -1166, -1167, -1168, -1169, -1170, -1171, -1172, -1173, -1174, -1175, -1176, -1177, -1178, -1179, -1180, -1181, -1182, -1183, -1184, -1185, -1186, -1187, -1188, -1189, -1190, -1191, -1192, -1193, -1194, -1195, -1196, -1197, -1198, -1199, -1200, -1201, -1202, -1203, -1204, -1205, -1206, -1207, -1208, -1209, -1210, -1211, -1212, -1213, -1214, -1215, -1216, -1217, -1218, -1219, -1220, -1221, -1222, -1223, -1224, -1175, -1176, -1177, -1178, -1179, -1180, -1181, -1182, -1183, -1184, -1185, -1186, -1187, -1188, -1189, -1190, -1191, -1192, -1193, -1194, -1195, -1196, -1197, -1198, -1199, -1200, -1201, -1202, -1203, -1204, -1205, -1206, -1207, -1208, -1209, -1210, -1211, -1212, -1213, -1214, -1215, -1216, -1217, -1218, -1219, -1220, -1221, -1222, -1223, -1224, -1225, -1226, -1227, -1228, -1229, -1230, -1231, -1232, -1233, -1234, -1235, -1236, -1237, -1238, -1239, -1240, -1241, -1242, -1243, -1244, -1245, -1246, -1247, -1248, -1249, -1250, -1251, -1252, -1253, -1254, -1255, -1256, -1257, -1258, -1259, -1260, -1261, -1262, -1263, -1264, -1265, -1266, -1267, -1268, -1269, -1270, -1271, -1272, -1273, -1274, -1275, -1276, -1277, -1278, -1279, -1280, -1281, -1282, -1283, -1284, -1285, -1286, -1287, -1288, -1289, -1290, -1291, -1292, -1293, -1294, -1295, -1296, -1297, -1298, -1299, -1300, -1301, -1302, -1303, -1304, -1305, -1306, -1307, -1308, -1309, -1310, -1311, -1312, -1313, -1314, -1315, -1316, -1317, -1318, -1319, -1320, -1321, -1322, -1323, -1324, -1325, -1326, -1327, -1328, -1329, -1330, -1331, -1332, -1333, -1334, -1335, -1336, -1337, -1338, -1339, -1340, -1341, -1342, -1343, -1344, -1345, -1346, -1347, -1348, -1349, -1350, -1351, -1352, -1353, -1354, -1355, -1356, -1357, -1358, -1359, -1360, -1361, -1362, -1363, -1364, -1365, -1366, -1367, -1368, -1369, -1370, -1371, -1372, -1373, -1374, -1375, -1376, -1377, -1378, -1379, -1380, -1381, -1382, -1383, -1384, -1385, -1386, -1387, -1388, -1389, -1390, -1391, -1392, -1393, -1394, -1395, -1396, -1397, -1398, -1399, -1400, -1401, -1402, -1403, -1404, -1405, -1406, -1407, -1408, -1409, -1410, -1411, -1412, -1413, -1414, -1415, -1416, -1417, -1418, -1419, -1420, -1421, -1422, -1423, -1424, -1425, -1426, -1427, -1428, -1429, -1430, -1431, -1432, -1433, -1434, -1435, -1436, -1437, -1438, -1439, -1440, -1441, -1442, -1443, -1444, -1445, -1446, -1447, -1448, -1449, -1450, -1451, -1452, -1453, -1454, -1455, -1456, -1457, -1458, -1459, -1460, -1461, -1462, -1463, -1464, -1465, -1466, -1467, -1468, -1469, -1470, -1471, -1472, -1473, -1474, -1475, -1476, -1477, -1478, -1479, -1480, -1481, -1482, -1483, -1484, -1485, -1486, -1487, -1488, -1489, -1490, -1491, -1492, -1493, -1494, -1495, -1496, -1497, -1498, -1499, -1500, -1501, -1502, -1503, -1504, -1505, -1506, -1507, -1508, -1509, -1510, -1511, -1512, -1513, -1514, -1515, -1516, -1517, -1518, -1519, -1520, -1521, -1522, -1523, -1524, -1525, -1526, -1527, -1528, -1529, -1530, -1531, -1532, -1533, -1534, -1535, -1536, -1537, -1538, -1539, -1540, -1541, -1542, -1543, -1544, -1545, -1546, -1547, -1548, -1549, -1550, -1551, -1552, -1553, -1554, -1555, -1556, -1557, -1558, -1559, -1560, -1561, -1562, -1563, -1564, -1565, -1566, -1567, -1568, -1569, -1570, -1571, -1572, -1573, -1574, -1575, -1576, -1577, -1578, -1579, -1580, -1581, -1582, -1583, -1584, -1585, -1586, -1587, -1588, -1589, -1590, -1591, -1592, -1593, -1594, -1595, -1596, -1597, -1598, -1599, -1600, -1601, -1602, -1603, -1604, -1605, -1606, -1607, -1608, -1609, -1610, -1611, -1612, -1613, -1614, -1615, -1616, -1617, -1618, -1619, -1620, -1621, -1622, -1623, -1624, -1625, -1626, -1627, -1628, -1629, -1630, -1631, -1632, -1633, -1634, -1635, -1636, -1637, -1638, -1639, -1590, -1591, -1592, -1593, -1594, -1595, -1596, -1597, -1598, -1599, -1600, -1601, -1602, -1603, -1604, -1605, -1606, -1607, -1608, -1609, -1610, -1611, -1612, -1613, -1614, -1615, -1616, -1617, -1618, -1619, -1620, -1621, -1622, -1623, -1624, -1625, -1626, -1627, -1628, -1629, -1630, -1631, -1632, -1633, -1634, -1635, -1636, -1637, -1638, -1639, -1640, -1641, -1642, -1643, -1644, -1645, -1646, -1647, -1648, -1649, -1650, -1651, -1652, -1653, -1654, -1655, -1656, -1657, -1658, -1659, -1660, -1661, -1662, -1663, -1664, -1615, -1616, -1617, -1618, -1619, -1620, -1621, -1622, -1623, -1624, -1625, -1626, -1627, -1628, -1629, -1630, -1631, -1582, -1583, -1584, -1585, -1586, -1587, -1588, -1589, -1590, -1591, -1592, -1593, -1594, -1595, -1596, -1597, -1548, -1549, -1550, -1551, -1552, -1553, -1554, -1555, -1556, -1557, -1558, -1559, -1560, -1561, -1562, -1563, -1564, -1565, -1566, -1567, -1568, -1569, -1570, -1571, -1572, -1573, -1574, -1575, -1576, -1577, -1578, -1579, -1580, -1581, -1582, -1583, -1584, -1585, -1586, -1587, -1588, -1589, -1590, -1591, -1592, -1593, -1594, -1595, -1596, -1597, -1598, -1599, -1600, -1601, -1602, -1603, -1604, -1605, -1606, -1607, -1608, -1609, -1610, -1611, -1612, -1613, -1614, -1615, -1616, -1617, -1618, -1619, -1620, -1621, -1622, -1623, -1624, -1625, -1626, -1627, -1628, -1629, -1630, -1631, -1632, -1633, -1634, -1635, -1636, -1637, -1638, -1639, -1640, -1591, -1592, -1593, -1594, -1595, -1596, -1597, -1598, -1599, -1600, -1601, -1602, -1603, -1604, -1605, -1606, -1607, -1608, -1609, -1610, -1611, -1612, -1613, -1614, -1615, -1616, -1617, -1618, -1619, -1620, -1621, -1622, -1623, -1624, -1625, -1626, -1627, -1628, -1629, -1630, -1631, -1632, -1633, -1634, -1635, -1636, -1637, -1638, -1639, -1640, -1641, -1642, -1643, -1644, -1645, -1646, -1647, -1648, -1649, -1650, -1651, -1652, -1653, -1654, -1655, -1656, -1657, -1658, -1659, -1660, -1661, -1662, -1663, -1664, -1665, -1666, -1667, -1668, -1669, -1670, -1671, -1672, -1673, -1674, -1675, -1676, -1677, -1678, -1679, -1680, -1681, -1682, -1683, -1684, -1685, -1686, -1687, -1688, -1689, -1690, -1691, -1692, -1693, -1694, -1695, -1696, -1647, -1648, -1649, -1650, -1651, -1652, -1653, -1654, -1655, -1656, -1657, -1658, -1659, -1660, -1661, -1662, -1663, -1664, -1665, -1666, -1667, -1668, -1669, -1670, -1671, -1672, -1673, -1674, -1675, -1676, -1677, -1678, -1679, -1680, -1681, -1682, -1683, -1684, -1685, -1686, -1687, -1688, -1689, -1690, -1691, -1692, -1693, -1694, -1695, -1696, -1697, -1698, -1699, -1650, -1651, -1652, -1653, -1654, -1655, -1656, -1657, -1658, -1659, -1660, -1661, -1662, -1663, -1664, -1665, -1666, -1667, -1668, -1669, -1620, -1621, -1622, -1623, -1624, -1625, -1576, -1577, -1578, -1579, -1580, -1581, -1582, -1583, -1584, -1585, -1586, -1587, -1588, -1589, -1590, -1591, -1592, -1593, -1594, -1595, -1596, -1597, -1598, -1599, -1600, -1601, -1602, -1603, -1604, -1605, -1606, -1607, -1608, -1609, -1610, -1611, -1612, -1613, -1614, -1615, -1616, -1617, -1618, -1619, -1620, -1621, -1622, -1623, -1624, -1625, -1626, -1627, -1628, -1629, -1630, -1631, -1632, -1633, -1634, -1635, -1636, -1637, -1638, -1639, -1640, -1641, -1642, -1643, -1644, -1645, -1646, -1647, -1648, -1649, -1650, -1651, -1652, -1653, -1654, -1655, -1656, -1657, -1658, -1659, -1660, -1661, -1662, -1663, -1664, -1665, -1666, -1667, -1668, -1669, -1620, -1621, -1622, -1623, -1624, -1625, -1626, -1627, -1628, -1629, -1630, -1631, -1632, -1633, -1634, -1635, -1636, -1637, -1638, -1639, -1640, -1641, -1642, -1643, -1644, -1645, -1646, -1647, -1648, -1649, -1650, -1651, -1652, -1653, -1654, -1655, -1656, -1657, -1658, -1659, -1660, -1661, -1662, -1663, -1664, -1665, -1666, -1667, -1668, -1669, -1670, -1671, -1622, -1623, -1624, -1625, -1626, -1627, -1628, -1629, -1630, -1631, -1632, -1633, -1634, -1635, -1636, -1637, -1638, -1639, -1640, -1641, -1642, -1643, -1644, -1645, -1646, -1647, -1648, -1649, -1650, -1651, -1652, -1653, -1654, -1655, -1656, -1657, -1658, -1659, -1660, -1661, -1662, -1663, -1664, -1665, -1666, -1667, -1668, -1669, -1670, -1671, -1672, -1673, -1674, -1675, -1676, -1677, -1678, -1679, -1680, -1681, -1682, -1683, -1684, -1685, -1686, -1687, -1688, -1689, -1690, -1691, -1692, -1693, -1694, -1695, -1696, -1697, -1698, -1699, -1700, -1701, -1702, -1703, -1704, -1705, -1706, -1707, -1708, -1709, -1710, -1711, -1712, -1713, -1714, -1715, -1716, -1717, -1718, -1719, -1720, -1721, -1722, -1723, -1724, -1725, -1726, -1727, -1728, -1729, -1730, -1731, -1732, -1733, -1734, -1735, -1736, -1737, -1738, -1739, -1740, -1741, -1742, -1743, -1744, -1745, -1746, -1747, -1748, -1749, -1750, -1751, -1752, -1753, -1754, -1755, -1756, -1757, -1758, -1759, -1760, -1761, -1762, -1763, -1764, -1765, -1766, -1767, -1768, -1769, -1770, -1771, -1772, -1773, -1774, -1775, -1776, -1777, -1778, -1779, -1780, -1781, -1782, -1783, -1784, -1785, -1786, -1787, -1788, -1789, -1790, -1791, -1792, -1793, -1794, -1795, -1796, -1797, -1798, -1799, -1800, -1801, -1802, -1803, -1804, -1805, -1806, -1807, -1808, -1809, -1810, -1811, -1812, -1813, -1814, -1815, -1816, -1817, -1818, -1819, -1770, -1771, -1772, -1773, -1774, -1775, -1776, -1777, -1778, -1779, -1780, -1781, -1782, -1783, -1784, -1785, -1786, -1787, -1788, -1789, -1790, -1791, -1792, -1793, -1794, -1795, -1796, -1797, -1798, -1799, -1800, -1801, -1802, -1803, -1804, -1805, -1806, -1807, -1808, -1809, -1810, -1811, -1812, -1813, -1814, -1815, -1816, -1817, -1818, -1819, -1820, -1821, -1822, -1823, -1824, -1825, -1826, -1827, -1828, -1829, -1830, -1831, -1832, -1833, -1834, -1835, -1836, -1837, -1838, -1839, -1840, -1841, -1842, -1843, -1844, -1845, -1846, -1847, -1848, -1849, -1850, -1851, -1852, -1853, -1854, -1855, -1856, -1857, -1858, -1859, -1860, -1861, -1862, -1863, -1864, -1865, -1866, -1867, -1868, -1869, -1870, -1871, -1872, -1873, -1874, -1875, -1876, -1877, -1878, -1879, -1880, -1881, -1882, -1883, -1884, -1885, -1886, -1887, -1888, -1839, -1840, -1841, -1842, -1843, -1844, -1845, -1846, -1847, -1848, -1849, -1850, -1851, -1852, -1853, -1854, -1855, -1856, -1857, -1858, -1859, -1860, -1861, -1862, -1863, -1864, -1865, -1866, -1867, -1868, -1869, -1870, -1871, -1872, -1823, -1824, -1825, -1826, -1827, -1828, -1829, -1830, -1831, -1832, -1833, -1834, -1835, -1836, -1837, -1838, -1839, -1840, -1841, -1842, -1843, -1844, -1845, -1846, -1847, -1848, -1849, -1850, -1851, -1852, -1853, -1854, -1855, -1856, -1857, -1858, -1859, -1860, -1861, -1862, -1863, -1864, -1865, -1816, -1817, -1818, -1819, -1820, -1821, -1822, -1823, -1824, -1825, -1826, -1827, -1828, -1829, -1830, -1831, -1832, -1833, -1834, -1835, -1836, -1837, -1838, -1839, -1840, -1841, -1842, -1843, -1844, -1845, -1846, -1847, -1848, -1849, -1850, -1851, -1852, -1853, -1854, -1855, -1856, -1857, -1858, -1859, -1860, -1861, -1862, -1863, -1864, -1865, -1866, -1867, -1868, -1869, -1870, -1871, -1872, -1873, -1874, -1875, -1876, -1877, -1878, -1879, -1880, -1881, -1882, -1883, -1884, -1885, -1886, -1887, -1888, -1889, -1890, -1891, -1892, -1893, -1894, -1895, -1896, -1897, -1898, -1899, -1900, -1901, -1902, -1903, -1904, -1905, -1906, -1907, -1908, -1909, -1910, -1911, -1912, -1913, -1914, -1915, -1916, -1917, -1918, -1919, -1920, -1921, -1922, -1923, -1924, -1925, -1926, -1927, -1928, -1929, -1930, -1931, -1932, -1933, -1934, -1935, -1936, -1937, -1938, -1939, -1940, -1941, -1942, -1943, -1944, -1945, -1946, -1947, -1948, -1949, -1950, -1951, -1952, -1953, -1954, -1955, -1956, -1957, -1958, -1959, -1960, -1961, -1962, -1963, -1964, -1965, -1966, -1967, -1968, -1969, -1970, -1971, -1972, -1973, -1974, -1975, -1976, -1977, -1978, -1979, -1980, -1981, -1982, -1983, -1984, -1985, -1986, -1987, -1988, -1989, -1990, -1991, -1992, -1993, -1994, -1995, -1996, -1997, -1998, -1999, -2000, -2001, -2002, -2003, -2004, -2005, -2006, -2007, -2008, -2009, -2010, -2011, -2012, -2013, -2014, -2015, -2016, -2017, -2018, -2019, -2020, -2021, -2022, -2023, -2024, -2025, -2026, -2027, -2028, -2029, -2030, -2031, -2032, -2033, -2034, -2035, -1986, -1987, -1988, -1989, -1990, -1991, -1992, -1993, -1994, -1995, -1996, -1997, -1998, -1999, -2000, -2001, -2002, -2003, -2004, -2005, -2006, -2007, -2008, -2009, -2010, -2011, -2012, -2013, -2014, -2015, -2016, -2017, -1968, -1969, -1970, -1971, -1972, -1973, -1974, -1975, -1976, -1977, -1978, -1979, -1980, -1981, -1982, -1983, -1984, -1985, -1986, -1987, -1988, -1989, -1990, -1991, -1992, -1993, -1994, -1995, -1996, -1997, -1998, -1999, -2000, -2001, -2002, -2003, -2004, -2005, -2006, -2007, -2008, -2009, -2010, -2011, -2012, -2013, -2014, -2015, -2016, -2017, -2018, -2019, -2020, -2021, -2022, -2023, -2024, -2025, -2026, -2027, -2028, -2029, -2030, -2031, -2032, -2033, -2034, -2035, -2036, -2037, -2038, -1989, -1990, -1991, -1992, -1993, -1994, -1995, -1996, -1997, -1948, -1949, -1950, -1951, -1952, -1953, -1954, -1955, -1956, -1957, -1958, -1959, -1960, -1961, -1962, -1913, -1914, -1915, -1916, -1917, -1918, -1919, -1920, -1921, -1922, -1923, -1924, -1925, -1926, -1927, -1928, -1929, -1930, -1931, -1932, -1933, -1934, -1935, -1936, -1937, -1938, -1939, -1940, -1941, -1942, -1943, -1944, -1945, -1946, -1947, -1948, -1949, -1950, -1951, -1952, -1953, -1954, -1955, -1956, -1957, -1958, -1959, -1960, -1961, -1962, -1963, -1964, -1965, -1966, -1967, -1968, -1969, -1970, -1971, -1972, -1973, -1974, -1975, -1976, -1977, -1978, -1979, -1980, -1981, -1982, -1983, -1984, -1985, -1986, -1987, -1988, -1989, -1990, -1991, -1992, -1993, -1994, -1995, -1996, -1997, -1998, -1999, -2000, -2001, -2002, -2003, -2004, -2005, -2006, -2007, -2008, -2009, -2010, -2011, -2012, -2013, -2014, -2015, -2016, -2017, -2018, -2019, -2020, -2021, -2022, -2023, -2024, -2025, -2026, -2027, -2028, -2029, -2030, -2031, -2032, -2033, -2034, -2035, -1986, -1987, -1988, -1989, -1990, -1991, -1992, -1993, -1994, -1995, -1996, -1997, -1998, -1999, -2000, -2001, -2002, -2003, -2004, -2005, -2006, -2007, -2008, -2009, -2010, -2011, -2012, -2013, -2014, -1965, -1966, -1967, -1968, -1969, -1970, -1971, -1972, -1973, -1974, -1975, -1976, -1977, -1978, -1979, -1980, -1981, -1982, -1983, -1984, -1985, -1986, -1987, -1988, -1989, -1990, -1991, -1992, -1993, -1994, -1995, -1996, -1997, -1998, -1999, -2000, -2001, -2002, -2003, -2004, -2005, -2006, -2007, -2008, -2009, -2010, -2011, -2012, -2013, -2014, -2015, -2016, -2017, -2018, -2019, -2020, -2021, -2022, -2023, -2024, -2025, -2026, -2027, -2028, -2029, -2030, -2031, -2032, -2033, -2034, -2035, -2036, -2037, -2038, -2039, -2040, -2041, -2042, -2043, -2044, -2045, -2046, -2047, -2048, -2049, -2050, -2051, -2052, -2053, -2054, -2055, -2056, -2057, -2058, -2059, -2060, -2061, -2062, -2063, -2064, -2065, -2066, -2067, -2068, -2069, -2070, -2071, -2022, -2023, -2024, -2025, -2026, -2027, -2028, -2029, -2030, -2031, -2032, -2033, -2034, -2035, -2036, -2037, -2038, -2039, -2040, -2041, -1992, -1993, -1994, -1995, -1996, -1997, -1998, -1999, -2000, -2001, -2002, -2003, -2004, -2005, -2006, -2007, -2008, -2009, -2010, -2011, -2012, -2013, -2014, -2015, -2016, -2017, -2018, -2019, -2020, -2021, -2022, -2023, -2024, -2025, -2026, -2027, -2028, -2029, -2030, -2031, -2032, -2033, -2034, -2035, -2036, -2037, -2038, -2039, -2040, -2041, -2042, -2043, -2044, -2045, -2046, -2047, -2048, -2049, -2050, -2051, -2052, -2053, -2054, -2055, -2056, -2057, -2058, -2059, -2060, -2061, -2062, -2063, -2064, -2065, -2066, -2067, -2068, -2069, -2070, -2071, -2022, -2023, -2024, -2025, -2026, -2027, -2028, -2029, -2030, -2031, -2032, -2033, -2034, -2035, -2036, -2037, -2038, -2039, -2040, -2041, -2042, -2043, -2044, -2045, -2046, -2047, -2048, -2049, -2050, -2051, -2052, -2053, -2054, -2055, -2056, -2057, -2058, -2059, -2060, -2061, -2062, -2063, -2064, -2065, -2066, -2067, -2068, -2069, -2070, -2071, -2072, -2073, -2074, -2075, -2076, -2077, -2078, -2079, -2080, -2081, -2082, -2083, -2084, -2085, -2086, -2087, -2088, -2089, -2090, -2091, -2092, -2093, -2094, -2095, -2096, -2097, -2098, -2099, -2100, -2101, -2102, -2103, -2104, -2105, -2106, -2107, -2108, -2109, -2110, -2111, -2112, -2113, -2114, -2115, -2116, -2117, -2118, -2119, -2120, -2121, -2122, -2123, -2124, -2125, -2126, -2127, -2128, -2079, -2080, -2081, -2082, -2083, -2084, -2085, -2086, -2087, -2088, -2089, -2090, -2091, -2092, -2093, -2094, -2095, -2096, -2097, -2098, -2099, -2100, -2101, -2102, -2103, -2104, -2105, -2106, -2107, -2108, -2109, -2110, -2111, -2112, -2113, -2114, -2115, -2116, -2067, -2068, -2069, -2070, -2071, -2072, -2073, -2074, -2075, -2076, -2077, -2078, -2079, -2080, -2081, -2082, -2083, -2084, -2085, -2086, -2087, -2088, -2089, -2090, -2091, -2092, -2093, -2094, -2045, -2046, -2047, -2048, -2049, -2050, -2051, -2052, -2053, -2054, -2055, -2056, -2057, -2058, -2059, -2060, -2061, -2062, -2063, -2064, -2065, -2066, -2067, -2068, -2069, -2070, -2071, -2072, -2073, -2074, -2075, -2076, -2077, -2078, -2079, -2080, -2081, -2082, -2083, -2084, -2085, -2086, -2087, -2088, -2089, -2090, -2091, -2092, -2093, -2094, -2095, -2096, -2097, -2098, -2099, -2100, -2101, -2102, -2103, -2104, -2105, -2106, -2107, -2108, -2109, -2110, -2111, -2112, -2113, -2114, -2115, -2116, -2117, -2118, -2119, -2120, -2071, -2072, -2073, -2074, -2075, -2076, -2077, -2078, -2079, -2080, -2081, -2082, -2083, -2084, -2085, -2086, -2087, -2088, -2089, -2090, -2091, -2092, -2093, -2094, -2095, -2096, -2097, -2098, -2099, -2100, -2101, -2102, -2103, -2104, -2105, -2106, -2107, -2108, -2109, -2110, -2111, -2112, -2113, -2114, -2115, -2116, -2117, -2118, -2119, -2120, -2121, -2122, -2123, -2124, -2125, -2126, -2127, -2128, -2129, -2130, -2131, -2132, -2133, -2134, -2135, -2136, -2137, -2138, -2139, -2140, -2141, -2142, -2143, -2144, -2145, -2146, -2147, -2148, -2149, -2150, -2151, -2152, -2153, -2154, -2155, -2156, -2157, -2158, -2159, -2160, -2161, -2162, -2163, -2164, -2165, -2166, -2167, -2168, -2169, -2170, -2171, -2172, -2173, -2124, -2125, -2126, -2127, -2128, -2129, -2130, -2131, -2132, -2133, -2134, -2135, -2136, -2137, -2138, -2139, -2140, -2141, -2142, -2093, -2094, -2095, -2096, -2097, -2098, -2099, -2100, -2101, -2102, -2103, -2104, -2105, -2106, -2107, -2108, -2109, -2110, -2111, -2112, -2113, -2114, -2115, -2116, -2117, -2118, -2119, -2120, -2121, -2122, -2123, -2124, -2125, -2126, -2127, -2128, -2129, -2130, -2131, -2132, -2133, -2134, -2135, -2136, -2137, -2138, -2139, -2140, -2141, -2142, -2143, -2144, -2145, -2146, -2147, -2148, -2149, -2150, -2151, -2152, -2153, -2154, -2155, -2156, -2157, -2158, -2159, -2160, -2161, -2162, -2163, -2164, -2165, -2166, -2167, -2168, -2169, -2170, -2171, -2172, -2173, -2174, -2175, -2176, -2177, -2178, -2179, -2180, -2181, -2182, -2183, -2184, -2185, -2186, -2187, -2188, -2189, -2190, -2191, -2192, -2193, -2194, -2195, -2196, -2197, -2198, -2199, -2200, -2201, -2202, -2203, -2204, -2205, -2206, -2207, -2208, -2209, -2210, -2211, -2212, -2213, -2214, -2215, -2216, -2217, -2218, -2219, -2220, -2221, -2222, -2223, -2224, -2225, -2226, -2227, -2228, -2229, -2230, -2231, -2232, -2233, -2234, -2235, -2236, -2237, -2238, -2239, -2240, -2241, -2242, -2243, -2244, -2245, -2246, -2247, -2248, -2249, -2250]\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e741d1ec53c3ee1752315e818b2a583c81ed3e92 | 130,577 | ipynb | Jupyter Notebook | 06.Association-Rule-Mining-Sample/06.Association-Rule-Mining-Sample.ipynb | Kazuhito00/PyCaret-Learn | 22fdf73f23e85388ab8e5f715e78012c41920d59 | [
"MIT"
] | 3 | 2020-04-25T14:11:02.000Z | 2022-03-20T15:28:17.000Z | 06.Association-Rule-Mining-Sample/06.Association-Rule-Mining-Sample.ipynb | Kazuhito00/PyCaret-Learn | 22fdf73f23e85388ab8e5f715e78012c41920d59 | [
"MIT"
] | null | null | null | 06.Association-Rule-Mining-Sample/06.Association-Rule-Mining-Sample.ipynb | Kazuhito00/PyCaret-Learn | 22fdf73f23e85388ab8e5f715e78012c41920d59 | [
"MIT"
] | null | null | null | 35.951817 | 9,175 | 0.393293 | [
[
[
"# データセット取得(Online Retail Dataset)\nOnline Retail Dataset:英国を拠点とする登録オンライン小売業者の2010年12月1日から2011年9月12日の間に発生するレコードを含むトランザクションデータセット。\n\n以下のようなデータが含まれます。\n\n* InvoiceNo:請求書番号。各トランザクションに一意に割り当てられる6桁の整数。<br> このコードが文字「c」で始まる場合、キャンセルを示す。\n* StockCode:製品コード。各製品に一意に割り当てられる5桁の整数。\n* Description:製品名。\n* Quantity:トランザクションごとの各製品の数量。\n* InvoiceData:請求書の日付と時刻。各トランザクションが生成された日時。\n* UnitPrice:単価。\n* CustomerID:顧客番号。各顧客に一意に割り当てられる5桁の整数。\n* Country:各顧客が居住する国の名前。",
"_____no_output_____"
]
],
[
[
"# PyCaretチュートリアル用データセット取得\n# 詳細は<https://pycaret.org/get-data/>を参照\nfrom pycaret.datasets import get_data\n\ndataset = get_data('france')\ndataset.to_csv('./dataset.csv')",
"_____no_output_____"
],
[
"dataset.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 8557 entries, 0 to 8556\nData columns (total 8 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 InvoiceNo 8557 non-null object \n 1 StockCode 8557 non-null object \n 2 Description 8557 non-null object \n 3 Quantity 8557 non-null int64 \n 4 InvoiceDate 8557 non-null object \n 5 UnitPrice 8557 non-null float64\n 6 CustomerID 8491 non-null float64\n 7 Country 8557 non-null object \ndtypes: float64(2), int64(1), object(5)\nmemory usage: 534.9+ KB\n"
],
[
"print('データ :' + str(dataset.shape) + ' ' + str(dataset.index))",
"データ :(8557, 8) RangeIndex(start=0, stop=8557, step=1)\n"
]
],
[
[
"# PyCaretでのデータセットアップ",
"_____no_output_____"
]
],
[
[
"# 相関ルールマイニング用インポート\nfrom pycaret.arules import *",
"_____no_output_____"
],
[
"# session_idを指定することで欄数シードを固定\n# セットアップが完了するとデータの情報や前処理のパイプラインの情報が表示される\n# 詳細は<https://pycaret.org/setup/>を参照\nexp = setup(data=dataset, transaction_id='InvoiceNo', item_id='Description', session_id=42)",
"_____no_output_____"
],
[
"print(exp)",
"( InvoiceNo StockCode Description Quantity \\\n0 536370 22728 ALARM CLOCK BAKELIKE PINK 24 \n1 536370 22727 ALARM CLOCK BAKELIKE RED 24 \n2 536370 22726 ALARM CLOCK BAKELIKE GREEN 12 \n3 536370 21724 PANDA AND BUNNIES STICKER SHEET 12 \n4 536370 21883 STARS GIFT TAPE 24 \n... ... ... ... ... \n8552 581587 22613 PACK OF 20 SPACEBOY NAPKINS 12 \n8553 581587 22899 CHILDREN'S APRON DOLLY GIRL 6 \n8554 581587 23254 CHILDRENS CUTLERY DOLLY GIRL 4 \n8555 581587 23255 CHILDRENS CUTLERY CIRCUS PARADE 4 \n8556 581587 22138 BAKING SET 9 PIECE RETROSPOT 3 \n\n InvoiceDate UnitPrice CustomerID Country \n0 12/1/2010 8:45 3.75 12583.0 France \n1 12/1/2010 8:45 3.75 12583.0 France \n2 12/1/2010 8:45 3.75 12583.0 France \n3 12/1/2010 8:45 0.85 12583.0 France \n4 12/1/2010 8:45 0.65 12583.0 France \n... ... ... ... ... \n8552 12/9/2011 12:50 0.85 12680.0 France \n8553 12/9/2011 12:50 2.10 12680.0 France \n8554 12/9/2011 12:50 4.15 12680.0 France \n8555 12/9/2011 12:50 4.15 12680.0 France \n8556 12/9/2011 12:50 4.95 12680.0 France \n\n[8557 rows x 8 columns], 'InvoiceNo', 'Description', None, 42, [])\n"
]
],
[
[
"# モデル生成\n相関ルールマイニングのcreate_model()では必須パラメーターは無く、以下の4つのオプションある。\n\n<b>metric</b>:ルールが重要かどうかを評価するためのメトリック。<br> デフォルト設定は'confidence'<br> その他の指定可能なオプションは、'support', 'lift', 'leverage', 'conviction'があります。\n\n<b>threshold</b>:metric候補ルールが対象かどうかを決定するための最小しきい値。デフォルトは0.5\n\n<b>min_support</b>:返されるアイテムセットの最小サポートの値(0.0 ~ 0.1)<br> transactions_where_item(s)_occur / total_transactionsで計算されデフォルト設定は0.05\n\n<b>round</b>:スコアグリッドの小数点以下の指定桁数を四捨五入",
"_____no_output_____"
]
],
[
[
"# 引数で作成するモデルを指定\narule_model = create_model()",
"_____no_output_____"
],
[
"print(arule_model)",
" antecedents \\\n0 (JUMBO BAG WOODLAND ANIMALS) \n1 (SET/6 RED SPOTTY PAPER PLATES, SET/20 RED RET... \n2 (SET/6 RED SPOTTY PAPER CUPS, SET/20 RED RETRO... \n3 (SET/6 RED SPOTTY PAPER PLATES, SET/20 RED RET... \n4 (SET/6 RED SPOTTY PAPER CUPS, SET/20 RED RETRO... \n.. ... \n136 (STRAWBERRY LUNCH BOX WITH CUTLERY) \n137 (LUNCH BAG APPLE DESIGN) \n138 (LUNCH BAG APPLE DESIGN) \n139 (PLASTERS IN TIN CIRCUS PARADE , POSTAGE) \n140 (LUNCH BAG RED RETROSPOT, POSTAGE) \n\n consequents antecedent support \\\n0 (POSTAGE) 0.0651 \n1 (SET/6 RED SPOTTY PAPER CUPS) 0.0868 \n2 (SET/6 RED SPOTTY PAPER PLATES) 0.0868 \n3 (SET/6 RED SPOTTY PAPER CUPS) 0.0716 \n4 (SET/6 RED SPOTTY PAPER PLATES) 0.0716 \n.. ... ... \n136 (LUNCH BOX WITH CUTLERY RETROSPOT ) 0.1041 \n137 (LUNCH BAG SPACEBOY DESIGN ) 0.1085 \n138 (LUNCH BAG RED RETROSPOT) 0.1085 \n139 (PLASTERS IN TIN SPACEBOY) 0.1258 \n140 (LUNCH BAG APPLE DESIGN) 0.1041 \n\n consequent support support confidence lift leverage conviction \n0 0.6746 0.0651 1.0000 1.4823 0.0212 inf \n1 0.1171 0.0846 0.9750 8.3236 0.0744 35.3145 \n2 0.1085 0.0846 0.9750 8.9895 0.0752 35.6616 \n3 0.1171 0.0694 0.9697 8.2783 0.0610 29.1345 \n4 0.1085 0.0694 0.9697 8.9406 0.0617 29.4208 \n.. ... ... ... ... ... ... \n136 0.1236 0.0542 0.5208 4.2124 0.0414 1.8289 \n137 0.1041 0.0564 0.5200 4.9942 0.0451 1.8664 \n138 0.1323 0.0564 0.5200 3.9298 0.0420 1.8077 \n139 0.1193 0.0629 0.5000 4.1909 0.0479 1.7614 \n140 0.1085 0.0521 0.5000 4.6100 0.0408 1.7831 \n\n[141 rows x 9 columns]\n"
],
[
"arule_model.head()",
"_____no_output_____"
]
],
[
[
"# セットアップ(ignore_items指定)\n先の実施例のPOSTAGE(送料)が第1位の相関であることは明白なため、以下の例ではPOSTAGEを無視してセットアップを行う。",
"_____no_output_____"
]
],
[
[
"exp_arul101 = setup(data=dataset, transaction_id='InvoiceNo', item_id='Description', ignore_items=['POSTAGE'],session_id=42) ",
"_____no_output_____"
],
[
"arule_model2 = create_model()",
"_____no_output_____"
],
[
"print(arule_model2)",
" antecedents \\\n0 (SET/6 RED SPOTTY PAPER CUPS, SET/20 RED RETRO... \n1 (SET/6 RED SPOTTY PAPER PLATES, SET/20 RED RET... \n2 (SET/6 RED SPOTTY PAPER PLATES) \n3 (CHILDRENS CUTLERY SPACEBOY ) \n4 (SET/6 RED SPOTTY PAPER CUPS) \n5 (CHILDRENS CUTLERY DOLLY GIRL ) \n6 (ALARM CLOCK BAKELIKE PINK, ALARM CLOCK BAKELI... \n7 (ALARM CLOCK BAKELIKE GREEN, ALARM CLOCK BAKEL... \n8 (ALARM CLOCK BAKELIKE RED ) \n9 (SET/6 RED SPOTTY PAPER PLATES, SET/6 RED SPOT... \n10 (ALARM CLOCK BAKELIKE GREEN, ALARM CLOCK BAKEL... \n11 (SET/6 RED SPOTTY PAPER PLATES) \n12 (ALARM CLOCK BAKELIKE GREEN) \n13 (ALARM CLOCK BAKELIKE RED ) \n14 (SET/6 RED SPOTTY PAPER PLATES) \n15 (SET/20 RED RETROSPOT PAPER NAPKINS ) \n16 (SET/20 RED RETROSPOT PAPER NAPKINS ) \n17 (PLASTERS IN TIN SPACEBOY, PLASTERS IN TIN CIR... \n18 (SET/20 RED RETROSPOT PAPER NAPKINS ) \n19 (PLASTERS IN TIN SPACEBOY) \n20 (ALARM CLOCK BAKELIKE GREEN) \n21 (SET/6 RED SPOTTY PAPER CUPS) \n22 (ALARM CLOCK BAKELIKE PINK) \n23 (ALARM CLOCK BAKELIKE PINK) \n24 (SET/6 RED SPOTTY PAPER CUPS) \n25 (DOLLY GIRL LUNCH BOX) \n26 (ALARM CLOCK BAKELIKE RED ) \n27 (PLASTERS IN TIN WOODLAND ANIMALS, PLASTERS IN... \n28 (PLASTERS IN TIN SPACEBOY, PLASTERS IN TIN WOO... \n29 (PLASTERS IN TIN SPACEBOY) \n30 (ALARM CLOCK BAKELIKE GREEN) \n31 (ALARM CLOCK BAKELIKE PINK) \n32 (PLASTERS IN TIN WOODLAND ANIMALS) \n33 (PLASTERS IN TIN WOODLAND ANIMALS) \n34 (PLASTERS IN TIN CIRCUS PARADE ) \n35 (ROUND SNACK BOXES SET OF 4 FRUITS ) \n36 (SPACEBOY LUNCH BOX ) \n37 (LUNCH BAG WOODLAND) \n38 (LUNCH BAG SPACEBOY DESIGN ) \n39 (LUNCH BAG SPACEBOY DESIGN ) \n40 (LUNCH BAG SPACEBOY DESIGN ) \n41 (PLASTERS IN TIN CIRCUS PARADE ) \n42 (STRAWBERRY LUNCH BOX WITH CUTLERY) \n43 (LUNCH BAG APPLE DESIGN) \n44 (LUNCH BAG APPLE DESIGN) \n\n consequents antecedent support \\\n0 (SET/6 RED SPOTTY PAPER PLATES) 0.0868 \n1 (SET/6 RED SPOTTY PAPER CUPS) 0.0868 \n2 (SET/6 RED SPOTTY PAPER CUPS) 0.1085 \n3 (CHILDRENS CUTLERY DOLLY GIRL ) 0.0586 \n4 (SET/6 RED SPOTTY PAPER PLATES) 0.1171 \n5 (CHILDRENS CUTLERY SPACEBOY ) 0.0629 \n6 (ALARM CLOCK BAKELIKE GREEN) 0.0629 \n7 (ALARM CLOCK BAKELIKE RED ) 0.0629 \n8 (ALARM CLOCK BAKELIKE GREEN) 0.0803 \n9 (SET/20 RED RETROSPOT PAPER NAPKINS ) 0.1041 \n10 (ALARM CLOCK BAKELIKE PINK) 0.0672 \n11 (SET/20 RED RETROSPOT PAPER NAPKINS ) 0.1085 \n12 (ALARM CLOCK BAKELIKE RED ) 0.0846 \n13 (ALARM CLOCK BAKELIKE PINK) 0.0803 \n14 (SET/6 RED SPOTTY PAPER CUPS, SET/20 RED RETRO... 0.1085 \n15 (SET/6 RED SPOTTY PAPER PLATES) 0.1128 \n16 (SET/6 RED SPOTTY PAPER CUPS) 0.1128 \n17 (PLASTERS IN TIN WOODLAND ANIMALS) 0.0781 \n18 (SET/6 RED SPOTTY PAPER PLATES, SET/6 RED SPOT... 0.1128 \n19 (PLASTERS IN TIN WOODLAND ANIMALS) 0.1193 \n20 (ALARM CLOCK BAKELIKE PINK) 0.0846 \n21 (SET/20 RED RETROSPOT PAPER NAPKINS ) 0.1171 \n22 (ALARM CLOCK BAKELIKE GREEN) 0.0868 \n23 (ALARM CLOCK BAKELIKE RED ) 0.0868 \n24 (SET/6 RED SPOTTY PAPER PLATES, SET/20 RED RET... 0.1171 \n25 (SPACEBOY LUNCH BOX ) 0.0868 \n26 (ALARM CLOCK BAKELIKE GREEN, ALARM CLOCK BAKEL... 0.0803 \n27 (PLASTERS IN TIN SPACEBOY) 0.0868 \n28 (PLASTERS IN TIN CIRCUS PARADE ) 0.0889 \n29 (PLASTERS IN TIN CIRCUS PARADE ) 0.1193 \n30 (ALARM CLOCK BAKELIKE PINK, ALARM CLOCK BAKELI... 0.0846 \n31 (ALARM CLOCK BAKELIKE GREEN, ALARM CLOCK BAKEL... 0.0868 \n32 (PLASTERS IN TIN SPACEBOY) 0.1453 \n33 (PLASTERS IN TIN CIRCUS PARADE ) 0.1453 \n34 (PLASTERS IN TIN WOODLAND ANIMALS) 0.1475 \n35 (ROUND SNACK BOXES SET OF4 WOODLAND ) 0.0933 \n36 (DOLLY GIRL LUNCH BOX) 0.1063 \n37 (LUNCH BAG SPACEBOY DESIGN ) 0.1020 \n38 (LUNCH BAG RED RETROSPOT) 0.1041 \n39 (LUNCH BAG WOODLAND) 0.1041 \n40 (LUNCH BAG APPLE DESIGN) 0.1041 \n41 (PLASTERS IN TIN SPACEBOY) 0.1475 \n42 (LUNCH BOX WITH CUTLERY RETROSPOT ) 0.1041 \n43 (LUNCH BAG SPACEBOY DESIGN ) 0.1085 \n44 (LUNCH BAG RED RETROSPOT) 0.1085 \n\n consequent support support confidence lift leverage conviction \n0 0.1085 0.0846 0.9750 8.9895 0.0752 35.6616 \n1 0.1171 0.0846 0.9750 8.3236 0.0744 35.3145 \n2 0.1171 0.1041 0.9600 8.1956 0.0914 22.0716 \n3 0.0629 0.0542 0.9259 14.7190 0.0505 12.6508 \n4 0.1085 0.1041 0.8889 8.1956 0.0914 8.0239 \n5 0.0586 0.0542 0.8621 14.7190 0.0505 6.8254 \n6 0.0846 0.0542 0.8621 10.1901 0.0489 6.6367 \n7 0.0803 0.0542 0.8621 10.7409 0.0492 6.6681 \n8 0.0846 0.0672 0.8378 9.9037 0.0605 5.6450 \n9 0.1128 0.0846 0.8125 7.2031 0.0729 4.7317 \n10 0.0868 0.0542 0.8065 9.2944 0.0484 4.7184 \n11 0.1128 0.0868 0.8000 7.0923 0.0745 4.4360 \n12 0.0803 0.0672 0.7949 9.9037 0.0605 4.4837 \n13 0.0868 0.0629 0.7838 9.0331 0.0559 4.2237 \n14 0.0868 0.0846 0.7800 8.9895 0.0752 4.1511 \n15 0.1085 0.0868 0.7692 7.0923 0.0745 3.8633 \n16 0.1171 0.0868 0.7692 6.5670 0.0736 3.8257 \n17 0.1453 0.0586 0.7500 5.1604 0.0472 3.4187 \n18 0.1041 0.0846 0.7500 7.2031 0.0729 3.5835 \n19 0.1453 0.0889 0.7455 5.1292 0.0716 3.3576 \n20 0.0868 0.0629 0.7436 8.5699 0.0556 3.5616 \n21 0.1128 0.0868 0.7407 6.5670 0.0736 3.4221 \n22 0.0846 0.0629 0.7250 8.5699 0.0556 3.3287 \n23 0.0803 0.0629 0.7250 9.0331 0.0559 3.3445 \n24 0.0868 0.0846 0.7222 8.3236 0.0744 3.2876 \n25 0.1063 0.0607 0.7000 6.5857 0.0515 2.9790 \n26 0.0629 0.0542 0.6757 10.7409 0.0492 2.8894 \n27 0.1193 0.0586 0.6750 5.6577 0.0482 2.7098 \n28 0.1475 0.0586 0.6585 4.4645 0.0454 2.4966 \n29 0.1475 0.0781 0.6545 4.4374 0.0605 2.4677 \n30 0.0629 0.0542 0.6410 10.1901 0.0489 2.6105 \n31 0.0672 0.0542 0.6250 9.2944 0.0484 2.4873 \n32 0.1193 0.0889 0.6119 5.1292 0.0716 2.2695 \n33 0.1475 0.0868 0.5970 4.0474 0.0653 2.1154 \n34 0.1453 0.0868 0.5882 4.0474 0.0653 2.0756 \n35 0.1388 0.0542 0.5814 4.1879 0.0413 2.0572 \n36 0.0868 0.0607 0.5714 6.5857 0.0515 2.1309 \n37 0.1041 0.0564 0.5532 5.3129 0.0458 2.0051 \n38 0.1323 0.0564 0.5417 4.0936 0.0426 1.8931 \n39 0.1020 0.0564 0.5417 5.3129 0.0458 1.9594 \n40 0.1085 0.0564 0.5417 4.9942 0.0451 1.9452 \n41 0.1193 0.0781 0.5294 4.4374 0.0605 1.8715 \n42 0.1236 0.0542 0.5208 4.2124 0.0414 1.8289 \n43 0.1041 0.0564 0.5200 4.9942 0.0451 1.8664 \n44 0.1323 0.0564 0.5200 3.9298 0.0420 1.8077 \n"
],
[
"arule_model2.head()",
"_____no_output_____"
]
],
[
[
"# プロットモデル",
"_____no_output_____"
]
],
[
[
"plot_model(arule_model2)",
"_____no_output_____"
],
[
"plot_model(arule_model2, plot='3d')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e741d9f019be6a9869f5af51bbbe507f903e0a24 | 3,222 | ipynb | Jupyter Notebook | Heatmap_of _top_100_cites_in_Vermont.ipynb | kewangchen/Heatmap_of_cites_in_VT | 0d4a1cb61c539d8c3c862efdcd51a11d143fa16b | [
"Apache-2.0"
] | 2 | 2018-08-02T09:26:08.000Z | 2019-03-20T13:33:39.000Z | Heatmap_of _top_100_cites_in_Vermont.ipynb | kewangchen/Heatmap_of_cites_in_VT | 0d4a1cb61c539d8c3c862efdcd51a11d143fa16b | [
"Apache-2.0"
] | null | null | null | Heatmap_of _top_100_cites_in_Vermont.ipynb | kewangchen/Heatmap_of_cites_in_VT | 0d4a1cb61c539d8c3c862efdcd51a11d143fa16b | [
"Apache-2.0"
] | null | null | null | 29.559633 | 116 | 0.565798 | [
[
[
"# Heatmap of top 100 cites in Vermont\n### Kewang Chen 25/07/2018",
"_____no_output_____"
]
],
[
[
"import gmaps\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport csv\n\ngmaps.configure(api_key=\"API\") # Personal info, need your Google API key\nresults=[]\n# put locations_top_100_cites.csv in the same dic\nwith open(\"locations_top_100_cites.csv\") as csvfile: # data of locations of top 100 cites\n reader = csv.reader(csvfile, quoting=csv.QUOTE_NONNUMERIC) # change contents to floats\n for row in reader: # each row is a list\n results.append(row)\n# change the list to array\ndata=np.array(results)\nlocations = data\n#print(locations)\n#plt.xkcd()\n#gmaps.figure(map_type='HYBRID')\n\nfig=gmaps.figure(layout={\n 'width': '700px',\n 'height': '500px',\n 'padding': '3px',\n 'border': '1px solid black'\n})\nfig.add_layer(gmaps.heatmap_layer(locations))\nfig\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
]
] |
e741dcca941d07aeaa64b02cc29c6570ce6214fb | 188,863 | ipynb | Jupyter Notebook | assignments/PA5/soln_PA5_RecSys.ipynb | saravanan-thirumuruganathan/cse5334Spring2015 | 1ba5fe4e76bfc6e336aa50fb50b8916c0cae0732 | [
"Apache-2.0"
] | 5 | 2016-10-31T16:26:56.000Z | 2021-05-05T20:53:10.000Z | assignments/PA5/soln_PA5_RecSys.ipynb | saravanan-thirumuruganathan/cse5334Spring2015 | 1ba5fe4e76bfc6e336aa50fb50b8916c0cae0732 | [
"Apache-2.0"
] | null | null | null | assignments/PA5/soln_PA5_RecSys.ipynb | saravanan-thirumuruganathan/cse5334Spring2015 | 1ba5fe4e76bfc6e336aa50fb50b8916c0cae0732 | [
"Apache-2.0"
] | 2 | 2017-01-06T07:38:05.000Z | 2018-03-22T10:20:17.000Z | 55.174701 | 847 | 0.68591 | [
[
[
"#Programming Assignment 5: Building a Movie Recommendation System",
"_____no_output_____"
],
[
"###Team Details:\n\nWhen submitting, fill your team details in this cell. Note that this is a markdown cell.\n\nStudent 1 Full Name: \nStudent 1 Student ID: \nStudent 1 Email Address: \n\nStudent 2 Full Name: \nStudent 2 Student ID: \nStudent 2 Email Address: \n\nStudent 3 Full Name: \nStudent 3 Student ID: \nStudent 3 Email Address: ",
"_____no_output_____"
],
[
"##Assignment Details\n\nIn this assignment, we will explore some of the fundamental concepts in building a recommendation systems for movies. This assignment material is inspired by code from a number of resources including those from Hanspeter Pfister, YHat, Sudeep Das, Jonny and others. At a high level, you will be doing the following tasks:\n\n### 1. Exploratory Analysis/Sparsity\n\nBefore building the actual recommender, we will explore the movie dataset and see how ratings are distributed across different dimensions and how the sparse the overall dataset is. These factors (long tail/sparsity) cause lot of issues when building a recommender.\n\n### 2. Nearest Neighbor based Recommender System\n\nWe will start with a simple nearest neighbor based recommender system. Given a movie, we will try to find the top-$k$ most similar movies. We will just provide the list of movies without predicting the ratings.\n\n### 3. Item based Collaborative Filtering\n\nIn this task, we will try to predict the ratings also. We will use item based collaborative filtering that computes similarity between movies and use them for recommendation.\n\n### 4. User based Collaborative Filtering\n\nIn this task, we will design a user based collaborative filtering that is very similar to the recommender you designed in Task 3.\n\n### 5. Latent Factor Models\n\nIn this task, we will explore couple of advanced models involving latent factors.",
"_____no_output_____"
]
],
[
[
"########### Do not change anything below\n\n%matplotlib inline \n\n#Array processing\nimport numpy as np\n\n#Data analysis, wrangling and common exploratory operations\nimport pandas as pd\nfrom pandas import Series, DataFrame\nfrom IPython.display import display\n\n#For visualization. Matplotlib for basic viz and seaborn for more stylish figures + statistical figures not in MPL.\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\nimport scipy as sp\n#SVD for Sparse matrices\nfrom scipy.sparse.linalg import svds\n\nfrom sklearn.metrics.pairwise import euclidean_distances\n\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n\nfrom collections import defaultdict, Counter\nimport operator\n\nfrom matplotlib import rcParams\nrcParams['figure.figsize'] = (10, 6)\n\n########################If needed you can import additional packages for helping you, although I would discourage it\n########################Put all your imports in the space below. If you use some strange package, \n##########################provide clear information as to how we can install it.\n\n#######################End imports###################################",
"_____no_output_____"
]
],
[
[
"# Part 1: Exploratory Analysis/Sparsity",
"_____no_output_____"
],
[
"**Dataset:** We will be using the MovieLens 100K dataset. It is a fairly small dataset with 100K ratings from 1000 users on 1700 movies. You can download the dataset from http://grouplens.org/datasets/movielens/ . Some basic details about the dataset can be found in the README text file at http://files.grouplens.org/datasets/movielens/ml-100k-README.txt . You will need to understand the structure before doing the experiments. Download and unzip the file in the same folder as the IPython notebook. The rest of the code assumes that there is a folder called \"ml-100k\" in the current directory.\n\n",
"_____no_output_____"
]
],
[
[
"#####Do not change anything below\n\n#Load the user data\nusers_df = pd.read_csv('ml-100k/u.user', sep='|', names=['UserId', 'Age', 'Gender', 'Occupation', 'ZipCode'])\n#Load the movies data: we will only use movie id and title for this assignment\nmovies_df = pd.read_csv('ml-100k/u.item', sep='|', names=['MovieId', 'Title'], usecols=range(2))\n#Load the ratings data: ignore the timestamps\nratings_df = pd.read_csv('ml-100k/u.data', sep='\\t', names=['UserId', 'MovieId', 'Rating'],usecols=range(3))\n\n#Working on three different data frames is a pain\n# Let us create a single dataset by \"joining\" these three data frames\nmovie_ratings_df = pd.merge(movies_df, ratings_df)\nmovielens_df = pd.merge(movie_ratings_df, users_df)\n\nmovielens_df.head()",
"_____no_output_____"
],
[
"#Task t1a: Print the NAME of the top-10 movies with most ratings\nprint movielens_df.groupby('Title').size().order(ascending=False)[:10]",
"Title\nStar Wars (1977) 583\nContact (1997) 509\nFargo (1996) 508\nReturn of the Jedi (1983) 507\nLiar Liar (1997) 485\nEnglish Patient, The (1996) 481\nScream (1996) 478\nToy Story (1995) 452\nAir Force One (1997) 431\nIndependence Day (ID4) (1996) 429\ndtype: int64\n"
],
[
"#Task t1b: Let us now analyze the rating behavior of the 1000 users in the dataset\n# Create a histogram based on the number of ratings per user with 50 bins. \n# Title=\"Count of Ratings per User\", XLabel=\"Ratings per user\", YLabel=\"#Users\"\n\n#t1b_user_rating_count is a groupby object that counts the number of ratings for each user.\nt1b_user_rating_count = movielens_df.groupby('UserId')['Rating'].count()\nt1b_user_rating_count.hist(bins=50)\nplt.xlabel(\"Ratings per user\")\nplt.ylabel(\"#Users\")\nplt.title(\"Count of Ratings per User\")\n\n####The figure below shows that most users leave less than 25 ratings while few outliers leave a lot of ratings",
"_____no_output_____"
],
[
"#Task t1c: Let us now analyze the rating behavior of the 1000 users in the dataset\n# Create a histogram based on the number of ratings per movie with 50 bins. \n# Title=\"Count of Ratings per Movie\", XLabel=\"Ratings per Movie\", YLabel=\"#Movies\"\n\n#t1c_user_rating_count is a groupby object that counts the number of ratings for each movie.\nt1c_user_rating_count = movielens_df.groupby('MovieId')['Rating'].count()\nt1c_user_rating_count.hist(bins=50)\nplt.xlabel(\"Ratings per Movie\")\nplt.ylabel(\"#Movies\")\nplt.title(\"Count of Ratings per Movie\")\n\n####The figure below shows that most movies receive less than 25 ratings while few popular get a lot of ratings",
"_____no_output_____"
],
[
"#Task t1d: Let us now analyze the rating distribution\n# Create a histogram based on the ratings received for each movie with 5 bins. \n# Title=\"Ratings Histogram\", XLabel=\"Rating Provided\", YLabel=\"#Ratings\"\n\nmovielens_df['Rating'].hist(bins=5)\nplt.xlabel(\"Rating Provided\")\nplt.ylabel(\"#Ratings\")\nplt.title(\"Ratings Histogram\")\n\n####The figure below shows that most movies at least 35K of ratings provided a score of 4 or higher.\n#The code below shows that the average rating is 3.5\n# This is problematic because each user has a different rating scale\n# For some users 1 is a bad movie while for some others 3 is bad\n# So a good model must take into the account the baseline rating behavior of users and movies\n\nprint \"Average rating of ALL movies is\", round(movie_ratings_df['Rating'].mean(), 2)",
"Average rating of ALL movies is 3.53\n"
],
[
"#Task t1e: Let us now study the baseline rating behavior in more detail\n# For each user compute his/her average rating\n# Create a histogram based on the average ratings with 5 bins. \n# Title=\"Histogram of Average Ratings of Users\", XLabel=\"Average Rating\", YLabel=\"#Users\"\n\n#t1e_avg_ratings is a groupby object with average rating for each user\nt1e_avg_ratings = movielens_df.groupby('UserId')['Rating'].mean()\n\nt1e_avg_ratings.hist(bins=5)\nplt.title(\"Histogram of Average Ratings of Users\")\nplt.xlabel(\"Average Rating\")\nplt.ylabel(\"#Users\")\n\n\n####The figure below shows that while the average rating of users vary\n# What does this imply?\n\n",
"_____no_output_____"
],
[
"#Task t1f: Let us now study the baseline rating behavior in more detail\n# For each movie compute its average rating\n# Create a histogram based on the average ratings with 5 bins. \n# Title=\"Histogram of Average Ratings of Movies\", XLabel=\"Average Rating\", YLabel=\"#Movies\"\n\n#t1e_avg_ratings is a groupby object with average rating for each user\nt1f_avg_ratings = movielens_df.groupby('MovieId')['Rating'].mean()\n\nt1f_avg_ratings.hist(bins=5)\nplt.title(\"Histogram of Average Ratings of Movies\")\nplt.xlabel(\"Average Rating\")\nplt.ylabel(\"#Movies\")\n\n\n####The figure below shows that while the average rating of movies vary\n# What does this imply?\n\n",
"_____no_output_____"
]
],
[
[
"**Common Support**: The concept of common support is a key idea is recommender systems. Given two items (movies in our case), the common support is the number of reviewers who rated both items. It is useful in both K-nearest neighbor and collaborative filtering based recommenders. Specifically, if the common support is 0 for a pair of movies, then it is quite hard to find their similarity!",
"_____no_output_____"
]
],
[
[
"#Task t1g: Let us now analyze the common support for movielens dataset\n# Here is the high level idea\n# We are going to create an array and populate it with the common support for all pair of movies\n# We then are going to plot a histogram and see its distribution.\n\n#This task might take quite some time - 1-2 hours for typical machines !\n\nt1g_all_movies = movies_df['MovieId'].unique()\nt1g_allpair_commonsupport = []\n\nfor i, movie1 in enumerate(t1g_all_movies):\n for j, movie2 in enumerate(t1g_all_movies):\n #Do not count a pair twice\n if i < j: \n userids_who_rated_movie1 = movielens_df[movielens_df.MovieId==movie1].UserId.unique()\n userids_who_rated_movie2 = movielens_df[movielens_df.MovieId==movie2].UserId.unique()\n num_common_users = len( set(userids_who_rated_movie1).intersection(userids_who_rated_movie2) ) \n t1g_allpair_commonsupport.append(num_common_users)\n\n \nprint \"Average common support is \", round(np.mean(t1g_allpair_commonsupport), 2)\nplt.hist(t1g_allpair_commonsupport)\n\n \n\n\n\n#### What the average common support and its distribution imply?\n\n\n",
"Average common support is 7.11\n"
],
[
"#Task t1h: Let us now consider how sparse the matrix is\nt1h_sparsity = 100000./(1682*943)\nprint \"Sparsity of the dataset is \", t1h_sparsity\n\n#This graph is actually less sparse than typical datasets for recommender systems\n# which often have a sparsity much lower than 1%\n# As discussed in the class, the sparsity imposes huge problem in terms of designing efficient and correct algorithms",
"Sparsity of the dataset is 0.0630466936422\n"
],
[
"#Task t1i: Compute the average rating for each movie grouped by gender\n# In other words, for each movie, compute the average rating given by men and women\n# Hint: use a pivot table\n\nt1i_movielens_mean_ratings = movielens_df.pivot_table('Rating', index ='Title', columns='Gender',aggfunc='mean')\ndisplay(t1i_movielens_mean_ratings[:10])",
"_____no_output_____"
]
],
[
[
"# Part 2: Nearest Neighbor based Recommender System",
"_____no_output_____"
],
[
"Let us now build a simple global recommendation system based on the nearest neighbor idea. ",
"_____no_output_____"
]
],
[
[
"#Task t2a: \n# Create a dictionary where key is Movie Name and value is id\n# You can either use the movies_df or read and parse the u.item file yourself\nmovie_name_to_id_dictionary = {}\n\n#Write code to populate the movie names to this array\nall_movie_names = []\n\nf = open(\"ml-100k/u.item\", \"r\")\nlines = f.readlines()\nfor line in lines:\n data = line.split(\"|\")\n movie_name, movie_id = data[1], int(data[0])\n movie_name_to_id_dictionary[movie_name] = movie_id\n all_movie_names.append(movie_name)\nf.close()",
"_____no_output_____"
],
[
"#Task t2b: Write a function that takes two inputs: \n# movie_id: id of the movie and common_users: a set of user ids\n# and returns the list of rows corresponding to their movie ratings \n\ndef get_movie_reviews(movie_id, common_users):\n #Get a boolean vector for themovielens_dfns_dfs provided by users in common_users for movie movie_id\n # Hint: use the isin operator of Pandas\n mask = (movielens_df['UserId'].isin(common_users)) & (movielens_df['MovieId'] == movie_id)\n \n #Create a subset of data where the mask is True i.e. only collect data from users who satisfy the condition above\n # Then sort them based on userid\n movie_ratings = movielens_df[mask].sort('UserId')\n \n #Do not change below\n #Return the unique set of ratings provided\n movie_ratings = movie_ratings[movie_ratings['UserId'].duplicated()==False]\n return movie_ratings\n \n",
"_____no_output_____"
],
[
"#Do not change below\n\n#Here are some sample test cases for evaluating t2b\nprint \"get_movie_reviews(1, set([1]))\"\ndisplay( get_movie_reviews(1, set([1])) )\n\nprint \"get_movie_reviews(1, set(range(1, 10)))\"\ndisplay( get_movie_reviews(1, set(range(1, 10))) )\n\nprint \"get_movie_reviews(100, set(range(1, 10)))\"\ndisplay( get_movie_reviews(100, set(range(1, 10))) )\n\nprint \"get_movie_reviews(784, set(range(1, 784)))\"\ndisplay( get_movie_reviews(784, set(range(1, 784))) )\n\n\n",
"get_movie_reviews(1, set([1]))\n"
],
[
"#Task t2c: Let us now calculate the similarity between two movies\n# based on the set of users who rated both movies\n#Using euclidean distance is a bad idea - but simplifies the code\n\ndef calculate_similarity(movie_name_1, movie_name_2, min_common_users=0):\n \n movie1 = movie_name_to_id_dictionary[movie_name_1]\n movie2 = movie_name_to_id_dictionary[movie_name_2]\n \n #This is the set of UNIQUE user ids who reviewed movie1\n users_who_rated_movie1 = movielens_df[movielens_df['MovieId'] == movie1]['UserId'].unique()\n \n #This is the set of UNIQUE user ids who reviewed movie2\n users_who_rated_movie2 = movielens_df[movielens_df['MovieId'] == movie2]['UserId'].unique()\n \n #Compute the common users who rated both movies: \n # hint convert both to set and do the intersection\n common_users = set(users_who_rated_movie1).intersection(users_who_rated_movie2)\n \n #Using the code you wrote in t2a, get the reviews for the movies and common users\n movie1_reviews = get_movie_reviews(movie1, common_users)\n movie2_reviews = get_movie_reviews(movie2, common_users)\n \n #Now you have the data frame for both movies\n # Use the euclidean_distances function from sklean (imported already)\n # to compute the distance between their rating values\n distance = euclidean_distances(movie1_reviews['Rating'], movie2_reviews['Rating'])\n\n if len(common_users) < min_common_users:\n return [[float('inf')]]\n return distance",
"_____no_output_____"
],
[
"#Do not change below\nprint calculate_similarity(\"Toy Story (1995)\", \"GoldenEye (1995)\")\nprint calculate_similarity(\"GoldenEye (1995)\", \"Tomorrow Never Dies (1997)\")\nprint calculate_similarity(\"Batman Forever (1995)\", \"Batman & Robin (1997)\")",
"[[ 13.60147051]]\n[[ 6.244998]]\n[[ 4.12310563]]\n"
],
[
"#Task t2d: Given a movie, find the top-k most similar movies\n# that have the lowest euclidean distance \n\n#Here is the high level logic:\n# for each movie in all_movie_names (Except the input movie name)\n# compute its similarity and store it in an array\n# return the k movies with the smallest distances\n# remember to pass min_common_users to calculate_similarity\ndef get_top_k_similar_movies(input_movie_name, k=5, min_common_users=0):\n movie_similarity = [] \n for movie_name in all_movie_names:\n if input_movie_name == movie_name:\n continue\n similarity = calculate_similarity(input_movie_name, movie_name, min_common_users)\n similarity = similarity[0][0]\n movie_similarity.append( (similarity, movie_name) )\n return sorted(movie_similarity)[:k]\n\n ",
"_____no_output_____"
],
[
"#print get_top_k_similar_movies(\"Toy Story (1995)\", 10)\nprint \"\\nMovies similar to GoldenEye [25]\", get_top_k_similar_movies(\"GoldenEye (1995)\", 10, 25)\nprint \"\\nMovies similar to GoldenEye [50]\", get_top_k_similar_movies(\"GoldenEye (1995)\", 10, 50)\nprint \"\\nMovies similar to GoldenEye [100]\", get_top_k_similar_movies(\"GoldenEye (1995)\", 10, 100)\nprint \"\\n\\n\"\n\nprint \"\\nMovies similar to Usual Suspects [25]\", get_top_k_similar_movies(\"Usual Suspects, The (1995)\", 10, 25)\nprint \"\\nMovies similar to Usual Suspects [50]\", get_top_k_similar_movies(\"Usual Suspects, The (1995)\", 10, 50)\nprint \"\\nMovies similar to Usual Suspects [100]\", get_top_k_similar_movies(\"Usual Suspects, The (1995)\", 10, 100)\nprint \"\\n\\n\"\n\nprint \"\\nMovies similar to Batman Forever [25]\", get_top_k_similar_movies(\"Batman Forever (1995)\", 10, 25)\nprint \"\\nMovies similar to Batman Forever [50]\", get_top_k_similar_movies(\"Batman Forever (1995)\", 10, 50)\nprint \"\\nMovies similar to Batman Forever [100]\", get_top_k_similar_movies(\"Batman Forever (1995)\", 10, 100)\nprint \"\\n\\n\"\n\nprint \"\\nMovies similar to Shawshank Redemption [25]\", get_top_k_similar_movies(\"Shawshank Redemption, The (1994)\", 10, 25)\nprint \"\\nMovies similar to Shawshank Redemption [50]\", get_top_k_similar_movies(\"Shawshank Redemption, The (1994)\", 10, 50)\nprint \"\\nMovies similar to Shawshank Redemption [100]\", get_top_k_similar_movies(\"Shawshank Redemption, The (1994)\", 10, 100)\nprint \"\\n\\n\"\n",
" \nMovies similar to GoldenEye [25] [(4.2426406871192848, 'Program, The (1993)'), (4.6904157598234297, 'Up Close and Personal (1996)'), (4.7958315233127191, 'Quick and the Dead, The (1995)'), (4.8989794855663558, 'Murder at 1600 (1997)'), (5.0990195135927845, 'Down Periscope (1996)'), (5.0990195135927845, \"My Best Friend's Wedding (1997)\"), (5.0990195135927845, 'Nick of Time (1995)'), (5.2915026221291814, 'Devil in a Blue Dress (1995)'), (5.2915026221291814, 'Donnie Brasco (1997)'), (5.2915026221291814, 'Michael (1996)')]\n\nMovies similar to GoldenEye [50] [(7.2801098892805181, 'Star Trek: Generations (1994)'), (7.5498344352707498, 'Basic Instinct (1992)'), (7.6811457478686078, 'Jumanji (1995)'), (7.6811457478686078, 'Phenomenon (1996)'), (7.9372539331937721, 'Courage Under Fire (1996)'), (8.0, 'Outbreak (1995)'), (8.1240384046359608, 'Firm, The (1993)'), (8.2462112512353212, 'Abyss, The (1989)'), (8.2462112512353212, 'Cliffhanger (1993)'), (8.2462112512353212, 'Maverick (1994)')]\n\nMovies similar to GoldenEye [100] [(9.8488578017961039, 'Mission: Impossible (1996)'), (10.770329614269007, 'True Lies (1994)'), (11.532562594670797, 'Speed (1994)'), (12.165525060596439, 'Independence Day (ID4) (1996)'), (12.206555615733702, 'Top Gun (1986)'), (12.369316876852981, 'Rock, The (1996)'), (12.569805089976535, 'Batman (1989)'), (13.114877048604001, 'Star Trek: The Wrath of Khan (1982)'), (13.601470508735444, 'Toy Story (1995)'), (13.74772708486752, 'Blade Runner (1982)')]\n\n\n\n\nMovies similar to Usual Suspects [25] [(5.196152422706632, 'Candidate, The (1972)'), (5.196152422706632, 'To Catch a Thief (1955)'), (5.4772255750516612, 'Cat on a Hot Tin Roof (1958)'), (5.4772255750516612, 'Roman Holiday (1953)'), (5.5677643628300215, \"Sophie's Choice (1982)\"), (5.5677643628300215, 'Wallace & Gromit: The Best of Aardman Animation (1996)'), (5.6568542494923806, 'His Girl Friday (1940)'), (5.7445626465380286, 'Die xue shuang xiong (Killer, The) (1989)'), (5.7445626465380286, 'Some Folks Call It a Sling Blade (1993)'), (5.9160797830996161, 'Breaking the Waves (1996)')]\n\nMovies similar to Usual Suspects [50] [(6.5574385243020004, 'Close Shave, A (1995)'), (7.2801098892805181, 'Apt Pupil (1998)'), (7.2801098892805181, 'Philadelphia Story, The (1940)'), (7.6811457478686078, 'Good Will Hunting (1997)'), (7.810249675906654, 'Wrong Trousers, The (1993)'), (8.1240384046359608, 'Manchurian Candidate, The (1962)'), (8.1240384046359608, 'Six Degrees of Separation (1993)'), (8.3066238629180749, 'Maltese Falcon, The (1941)'), (8.6023252670426267, 'Sling Blade (1996)'), (8.7749643873921226, 'Secrets & Lies (1996)')]\n\nMovies similar to Usual Suspects [100] [(9.3273790530888157, 'L.A. Confidential (1997)'), (9.4868329805051381, 'Rear Window (1954)'), (9.9498743710661994, 'Vertigo (1958)'), (10.04987562112089, 'North by Northwest (1959)'), (10.440306508910551, 'To Kill a Mockingbird (1962)'), (10.63014581273465, 'Glory (1989)'), (11.661903789690601, 'Godfather: Part II, The (1974)'), (11.704699910719626, 'Titanic (1997)'), (11.789826122551595, 'M*A*S*H (1970)'), (11.832159566199232, 'Casablanca (1942)')]\n\n\n\n\nMovies similar to Batman Forever [25] [(4.1231056256176606, 'Batman & Robin (1997)'), (4.6904157598234297, 'Down Periscope (1996)'), (4.6904157598234297, 'Junior (1994)'), (4.7958315233127191, 'Jack (1996)'), (5.0, 'Murder at 1600 (1997)'), (5.0990195135927845, 'Chain Reaction (1996)'), (5.196152422706632, \"City Slickers II: The Legend of Curly's Gold (1994)\"), (5.3851648071345037, 'Drop Zone (1994)'), (5.5677643628300215, 'Angels in the Outfield (1994)'), (5.6568542494923806, 'Casper (1995)')]\n\nMovies similar to Batman Forever [50] [(7.3484692283495345, 'Addams Family Values (1993)'), (7.9372539331937721, 'Net, The (1995)'), (8.1853527718724504, 'First Knight (1995)'), (8.3666002653407556, 'Dragonheart (1996)'), (8.9442719099991592, 'Eraser (1996)'), (9.1104335791442992, 'Broken Arrow (1996)'), (9.1651513899116797, 'Outbreak (1995)'), (9.1651513899116797, 'Star Trek: Generations (1994)'), (9.1651513899116797, 'Young Guns (1988)'), (9.2195444572928871, 'Interview with the Vampire (1994)')]\n\nMovies similar to Batman Forever [100] [(15.491933384829668, 'Batman (1989)'), (16.30950643030009, 'Jurassic Park (1993)'), (17.146428199482248, 'Indiana Jones and the Last Crusade (1989)'), (18.055470085267789, 'Fugitive, The (1993)'), (20.46948949045872, 'Return of the Jedi (1983)'), (21.283796653792763, 'Empire Strikes Back, The (1980)'), (22.06807649071391, 'Raiders of the Lost Ark (1981)'), (22.693611435820433, 'Star Wars (1977)'), (inf, \"'Til There Was You (1997)\"), (inf, '1-900 (1994)')]\n\n\n\n\nMovies similar to Shawshank Redemption [25] [(4.0, 'Some Folks Call It a Sling Blade (1993)'), (5.4772255750516612, 'Roman Holiday (1953)'), (5.5677643628300215, 'To Catch a Thief (1955)'), (5.6568542494923806, 'Looking for Richard (1996)'), (5.9160797830996161, 'As Good As It Gets (1997)'), (5.9160797830996161, 'Thin Blue Line, The (1988)'), (6.2449979983983983, 'Amistad (1997)'), (6.324555320336759, 'Candidate, The (1972)'), (6.4031242374328485, 'M (1931)'), (6.4031242374328485, 'Manon of the Spring (Manon des sources) (1986)')]\n\nMovies similar to Shawshank Redemption [50] [(6.7823299831252681, 'Close Shave, A (1995)'), (7.3484692283495345, 'Good Will Hunting (1997)'), (7.6157731058639087, 'High Noon (1952)'), (8.1240384046359608, 'Treasure of the Sierra Madre, The (1948)'), (8.2462112512353212, 'Mr. Smith Goes to Washington (1939)'), (8.3666002653407556, 'Hamlet (1996)'), (8.426149773176359, 'Manchurian Candidate, The (1962)'), (8.8317608663278477, 'Secrets & Lies (1996)'), (8.8881944173155887, '12 Angry Men (1957)'), (8.8881944173155887, 'Cinema Paradiso (1988)')]\n\nMovies similar to Shawshank Redemption [100] [(9.2736184954957039, 'Glory (1989)'), (9.9498743710661994, 'North by Northwest (1959)'), (10.344080432788601, 'Rear Window (1954)'), (10.440306508910551, 'Cool Hand Luke (1967)'), (10.723805294763608, 'Searching for Bobby Fischer (1993)'), (10.770329614269007, 'To Kill a Mockingbird (1962)'), (10.816653826391969, 'Vertigo (1958)'), (10.908712114635714, 'Bridge on the River Kwai, The (1957)'), (11.090536506409418, 'Right Stuff, The (1983)'), (11.269427669584644, \"It's a Wonderful Life (1946)\")]\n\n\n\n"
]
],
[
[
"#Task 3: Item based Collaborative Filtering\n\nIn this task, let us try to perform item based collaborative filtering. In spirit, it is very similar to what you already did for Task 2. With some minor changes, you can easily build an item based collaborative filtering recommenders.",
"_____no_output_____"
]
],
[
[
"#Do not change below\n# By default euclidean distance can be give arbitrary values\n# Let us \"normalize\" it by limit its value between 0 and 1 and slightly change the interpretation\n# 0 means that the preferences are very different\n# 1 means that preferences are identical\n# For tasks 3 and 4, remember to use this function\n\n#Vec1 and vec2 are vectors\ndef euclidean_distance_normed(vec1, vec2):\n if len(vec1) == 0:\n return 0.0\n euc_distance = euclidean_distances(vec1, vec2)[0][0]\n return 1.0 / (1.0 + euc_distance)",
"_____no_output_____"
],
[
"#Task t3a:\n# In this task, you want to compute the similarity between two items\n# which in this case means ratings_df\n# You can use code from 2c except that you must now call euclidean_distance_normed\n# when computing the distance\n\ndef calculate_similarity_normed(movie_name_1, movie_name_2, min_common_users=0):\n movie1 = movie_name_to_id_dictionary[movie_name_1]\n movie2 = movie_name_to_id_dictionary[movie_name_2]\n\n #This is the set of UNIQUE user ids who reviewed movie1\n users_who_rated_movie1 = movielens_df[movielens_df['MovieId'] == movie1]['UserId'].unique()\n \n #This is the set of UNIQUE user ids who reviewed movie2\n users_who_rated_movie2 = movielens_df[movielens_df['MovieId'] == movie2]['UserId'].unique()\n \n #Compute the common users who rated both movies: \n # hint convert both to set and do the intersection\n common_users = set(users_who_rated_movie1).intersection(users_who_rated_movie2)\n\n #Using the code you wrote in t2a, get the reviews for the movies and common users\n movie1_reviews = get_movie_reviews(movie1, common_users)\n movie2_reviews = get_movie_reviews(movie2, common_users)\n \n #Do not change below\n \n #Now you have the data frame for both movies\n # Use the euclidean_distances function from sklean (imported already)\n # to compute the distance between their rating values\n distance = euclidean_distance_normed(movie1_reviews['Rating'].values, movie2_reviews['Rating'].values)\n\n if len(common_users) < min_common_users:\n return 0.0\n return distance",
"_____no_output_____"
],
[
"#Do not change below\nprint calculate_similarity_normed(\"Toy Story (1995)\", \"GoldenEye (1995)\")\nprint calculate_similarity_normed(\"GoldenEye (1995)\", \"Tomorrow Never Dies (1997)\")\nprint calculate_similarity_normed(\"Batman Forever (1995)\", \"Batman & Robin (1997)\")",
"0.0684862527649\n0.138026263116\n0.195194101601\n"
],
[
"#Do not change below\n\n#We are now going to create item-item similarity database\n# Since our data is \"small\", we will use a non-traditional approach of using nested hashes\n# In real-life, using something like databases or other data structures is far more preferable\n\n#Here is the high level structure\n#{\n# movie_name_1: \n# { \n# movie_name_2: similarity_between_movie_1_and_2, \n# movie_name_3: similarity_between_movie_1_and_3, \n# ....\n# movie_name_n: similarity_between_movie_1_and_n\n# },\n# movie_name_2:\n# {\n# movie_name_1: similarity_between_movie_2_and_1, \n# movie_name_3: similarity_between_movie_2_and_3, \n# ....\n# movie_name_n: similarity_between_movie_2_and_n\n# },\n# ....\n# movie_name_n:\n# {\n# movie_name_1: similarity_between_movie_n_and_1, \n# movie_name_2: similarity_between_movie_n_and_2, \n# ....\n# movie_name_n-1: similarity_between_movie_n_and_n-1\n# },\n#} \n \n\n#Here is how to use this data structuere:\n\n#To get similarity between movies\n# data[movie1][movie2]\n#To get similarity between one movie and all others\n# data[movie1]",
"_____no_output_____"
],
[
"#DO not change below\n#This hash stores the movie to movie \n# as described above\nmovie_similarity_hash = defaultdict(dict)",
"_____no_output_____"
],
[
"#Item based filtering is expensive as you need to compute similarity of all pairs of items\n# for this dataset it is 1682*1682 ~ 28 lakh pairs or 2.8 million\n# running all of them might take hours and close to a day\n# instead let us run on a smaller dataset\n# specifically, let us only focus on the top-250 movies based on ratings\n# which is more manageable\n\n\n#Task t3b: \n# Get the top-k movie names with most ratings\n#Hint: use Counter class\n\ndef top_k_movie_names(k):\n movie_ratings_counter = Counter()\n \n with open(\"ml-100k/u.data\",\"r\") as f:\n for line in f:\n user_id, movie_id, rating, timestamp = line.split(\"\\t\")\n movie_name = all_movie_names[ int(movie_id) - 1]\n movie_ratings_counter[movie_name] += 1\n \n return movie_ratings_counter.most_common(k)",
"_____no_output_____"
],
[
"#Do not change below\nprint \"Top-10\", top_k_movie_names(10), \"\\n\"\nprint \"Top-25\", top_k_movie_names(25), \"\\n\"",
"Top-10 [('Star Wars (1977)', 583), ('Contact (1997)', 509), ('Fargo (1996)', 508), ('Return of the Jedi (1983)', 507), ('Liar Liar (1997)', 485), ('English Patient, The (1996)', 481), ('Scream (1996)', 478), ('Toy Story (1995)', 452), ('Air Force One (1997)', 431), ('Independence Day (ID4) (1996)', 429)] \n\nTop-25 [('Star Wars (1977)', 583), ('Contact (1997)', 509), ('Fargo (1996)', 508), ('Return of the Jedi (1983)', 507), ('Liar Liar (1997)', 485), ('English Patient, The (1996)', 481), ('Scream (1996)', 478), ('Toy Story (1995)', 452), ('Air Force One (1997)', 431), ('Independence Day (ID4) (1996)', 429), ('Raiders of the Lost Ark (1981)', 420), ('Godfather, The (1972)', 413), ('Pulp Fiction (1994)', 394), ('Twelve Monkeys (1995)', 392), ('Silence of the Lambs, The (1991)', 390), ('Jerry Maguire (1996)', 384), ('Chasing Amy (1997)', 379), ('Rock, The (1996)', 378), ('Empire Strikes Back, The (1980)', 367), ('Star Trek: First Contact (1996)', 365), ('Back to the Future (1985)', 350), ('Titanic (1997)', 350), ('Mission: Impossible (1996)', 344), ('Fugitive, The (1993)', 336), ('Indiana Jones and the Last Crusade (1989)', 331)] \n\n"
],
[
"#Do not change below\ntop_250_movie_names = [item[0] for item in top_k_movie_names(250)]",
"_____no_output_____"
],
[
"#Task t3c:\n#Use the following logic\n# for each movie in movie_names:\n# for all other movies in movie_names:\n# compute similarity between two movies using calculate_similarity_normed\n# remember to pass min_common_users to that function\n# note that movie_similarity_hash is a defaultdict \n# so similarity between movie1 and movie2 can be set as movie_similarity_hash[movie1][movie2]\n# btw, similarity in our case is commutative. \n# i.e. similarity(movie1, movie2) = similarity(movie2, movie1)\n# so do not call the function twice !\n# movie_names is an array that lists the movies for which you have to compute pairwise similarity\ndef compute_movie_to_movie_similarity(movie_names, min_common_users=0):\n for movie_name_1 in movie_names:\n for movie_name_2 in movie_names:\n if movie_name_1 == movie_name_2:\n continue\n similarity = calculate_similarity_normed(movie_name_1, movie_name_2, min_common_users)\n movie_similarity_hash[movie_name_1][movie_name_2] = similarity\n movie_similarity_hash[movie_name_2][movie_name_1] = similarity \n ",
"_____no_output_____"
],
[
"#Do not change below\n\n#Let us first test if your code above is correct by testing against a small subset of data\nmovie_similarity_hash = defaultdict(dict)\n# let use the top-10 movies\ncompute_movie_to_movie_similarity(top_250_movie_names[:10], min_common_users=0)\n\n#Get similarity with \ndisplay(movie_similarity_hash[\"Toy Story (1995)\"])\ndisplay(movie_similarity_hash['Return of the Jedi (1983)'])\n\nprint movie_similarity_hash[\"Toy Story (1995)\"][\"Independence Day (ID4) (1996)\"]",
"_____no_output_____"
],
[
"#Do not change below\n#Let us now test against top-250 most popular movies\n#This might take 10-20 mins to run!\nmovie_similarity_hash = defaultdict(dict)\ncompute_movie_to_movie_similarity(top_250_movie_names, min_common_users=25)\n",
"_____no_output_____"
],
[
"#Do not change below\n#Do this if you want to persist the data \n\n# Let us persist the movie-movie similarity data structure \n# that way you dont need to re-run the whole thing\n#pickle is a serialization library in Python\n# To persist/serialize, use the following line\n#pickle.dump(movie_similarity_hash, open(\"movie_similarity.pickle\", \"wb\"))\n# To deserialize, uncomment the following line \n#movie_similarity_hash = pickle.load( open( \"movie_similarity.pickle\", \"rb\" ) )\n",
"\n"
],
[
"for movie_name in top_250_movie_names[:10]:\n print \"Top-10 most similar movies for \", movie_name, \" :\", \n print sorted(movie_similarity_hash[movie_name].items(), key=operator.itemgetter(1), reverse=True)[:10]\n print \"\\n\"\n",
"Top-10 most similar movies for Star Wars (1977) : [('Fly Away Home (1996)', 0.12613198362288319), (\"Ulee's Gold (1997)\", 0.084626326089585924), ('Good Will Hunting (1997)', 0.082402672566341831), ('Apt Pupil (1998)', 0.080920442903129275), ('Gattaca (1997)', 0.080069658725771431), ('Wag the Dog (1997)', 0.07979214086871815), ('Sling Blade (1996)', 0.079245895810613809), ('African Queen, The (1951)', 0.07871102875529136), ('Rainmaker, The (1997)', 0.07844773813482285), ('Secrets & Lies (1996)', 0.075254704849630052)]\n\n\nTop-10 most similar movies for Contact (1997) : [('Philadelphia (1993)', 0.10230216299200159), ('Sling Blade (1996)', 0.094882133490771736), ('Fried Green Tomatoes (1991)', 0.093051003668180476), ('Room with a View, A (1986)', 0.092610094432975923), (\"Singin' in the Rain (1952)\", 0.092175602102042759), ('Shine (1996)', 0.092175602102042759), ('Maltese Falcon, The (1941)', 0.092175602102042759), ('Bound (1996)', 0.091747370480532636), ('Sneakers (1992)', 0.091747370480532636), ('Mask, The (1994)', 0.089695015344041368)]\n\n\nTop-10 most similar movies for Fargo (1996) : [('Manchurian Candidate, The (1962)', 0.099449199236264399), (\"Ulee's Gold (1997)\", 0.08815170219611887), ('Maltese Falcon, The (1941)', 0.087410245452875457), ('Good Will Hunting (1997)', 0.085983444756559363), ('Sling Blade (1996)', 0.084297269155557394), ('Rainmaker, The (1997)', 0.081799777282574593), ('African Queen, The (1951)', 0.081210303141612275), ('Wag the Dog (1997)', 0.080920442903129275), ('Rear Window (1954)', 0.07844773813482285), ('Vertigo (1958)', 0.07844773813482285)]\n\n\nTop-10 most similar movies for Return of the Jedi (1983) : [('Fly Away Home (1996)', 0.12849622184722817), (\"Ulee's Gold (1997)\", 0.10676232268609791), ('Gattaca (1997)', 0.093952725662966904), ('Wag the Dog (1997)', 0.091747370480532636), ('Rainmaker, The (1997)', 0.090094108300614636), ('Seven Years in Tibet (1997)', 0.089301349778500683), ('Shine (1996)', 0.08704668331836253), ('Apt Pupil (1998)', 0.086687761389570364), ('Kiss the Girls (1997)', 0.084626326089585924), ('Sling Blade (1996)', 0.083019512538737683)]\n\n\nTop-10 most similar movies for Liar Liar (1997) : [('Mask, The (1994)', 0.096331396777550107), ('While You Were Sleeping (1995)', 0.091325248684348978), ('Ghost and the Darkness, The (1996)', 0.090498756211208911), ('GoldenEye (1995)', 0.090094108300614636), ('Maverick (1994)', 0.08815170219611887), ('Con Air (1997)', 0.083972136564709449), ('River Wild, The (1994)', 0.083333333333333329), ('Cape Fear (1991)', 0.083019512538737683), ('Home Alone (1990)', 0.082709315626306693), ('Pretty Woman (1990)', 0.081503394203052748)]\n\n\nTop-10 most similar movies for English Patient, The (1996) : [('Fried Green Tomatoes (1991)', 0.094413879633246586), ('Maverick (1994)', 0.092175602102042759), ('Room with a View, A (1986)', 0.090498756211208911), ('Cinderella (1950)', 0.090094108300614636), ('Right Stuff, The (1983)', 0.088529810866542852), ('Firm, The (1993)', 0.08815170219611887), ('Sneakers (1992)', 0.087778549957133301), ('Mask, The (1994)', 0.086333380578904162), ('Cape Fear (1991)', 0.084626326089585924), (\"Monty Python's Life of Brian (1979)\", 0.084626326089585924)]\n\n\nTop-10 most similar movies for Scream (1996) : [('Maverick (1994)', 0.10113069765789216), (\"Singin' in the Rain (1952)\", 0.094413879633246586), ('Cinderella (1950)', 0.088529810866542852), ('Room with a View, A (1986)', 0.088529810866542852), ('Bound (1996)', 0.084297269155557394), ('While You Were Sleeping (1995)', 0.083972136564709449), ('Philadelphia (1993)', 0.082709315626306693), ('Shine (1996)', 0.082099515221765715), ('Clueless (1995)', 0.081799777282574593), ('Crimson Tide (1995)', 0.080920442903129275)]\n\n\nTop-10 most similar movies for Toy Story (1995) : [(\"Ulee's Gold (1997)\", 0.11696132920126338), ('Kiss the Girls (1997)', 0.10815240673485554), ('Rainmaker, The (1997)', 0.1060878539025194), ('Gattaca (1997)', 0.10290397182775128), ('Wag the Dog (1997)', 0.10230216299200159), ('Shine (1996)', 0.10171118008217982), ('Seven Years in Tibet (1997)', 0.098907726574930466), ('Cool Hand Luke (1967)', 0.0973366881823024), ('Peacemaker, The (1997)', 0.095840694682461411), ('Manchurian Candidate, The (1962)', 0.093051003668180476)]\n\n\nTop-10 most similar movies for Air Force One (1997) : [('Cinderella (1950)', 0.11519216806670013), ('While You Were Sleeping (1995)', 0.11189119247086728), ('Firm, The (1993)', 0.11034777731716484), ('Sneakers (1992)', 0.10960059084055324), ('Maverick (1994)', 0.10230216299200159), ('Crimson Tide (1995)', 0.10230216299200159), ('Bound (1996)', 0.10171118008217982), ('GoldenEye (1995)', 0.10113069765789216), ('Hunt for Red October, The (1990)', 0.10056040392403998), ('Mask, The (1994)', 0.10000000000000001)]\n\n\nTop-10 most similar movies for Independence Day (ID4) (1996) : [('Rainmaker, The (1997)', 0.11034777731716484), ('Kiss the Girls (1997)', 0.10745035092526581), ('Gattaca (1997)', 0.10477782979607683), (\"Ulee's Gold (1997)\", 0.10171118008217982), ('Peacemaker, The (1997)', 0.098907726574930466), ('Seven Years in Tibet (1997)', 0.093952725662966904), ('Wag the Dog (1997)', 0.091747370480532636), ('Cop Land (1997)', 0.089301349778500683), ('G.I. Jane (1997)', 0.087778549957133301), ('Maverick (1994)', 0.086333380578904162)]\n\n\n"
],
[
"#Task t3d\n\n#Before doing t3d, please complete t4a so that user_rating_hash is available\n# this will make the code below easier\n\n#In this task, we are going to predict the rating of a user u for a movie m using item based collaborative filtering\n#Here is the high level logic:\n# for each item i rated by this user:\n# s = similarity between i and input movie m \n# if similarity between i and m is 0, ignore this item \n# compute weighted rating for m based on i as rating for i * s\n# compute the predicted rating as sum of all weighted ratings / sum of all similarities\n\ndef predict_rating_for_movie_icf(movie_similarity_hash, input_user_id, input_movie_name, movies_considered):\n total_weighted_rating = 0.0\n total_similarity= 0.0\n \n #Hint: movie_similarity_hash is a nested hash where user id is key and \n # all their rating as a dictionary \n # this dictionary is ordered as moviename: rating\n\n #if this user has already rated the movie, return that rating\n if input_movie_name in user_rating_hash[input_user_id]:\n return user_rating_hash[input_user_id][input_movie_name]\n \n #For each movie the user has rated\n for movie_name in user_rating_hash[input_user_id].keys():\n\n #if user rated some movie, but it is not in the subset of movies that we computed pairwise similarity\n # such as top-250, then do not consider it either\n # for this task, the input is in movies_considered \n if movie_name not in movies_considered:\n continue\n \n #compute similarity between movies\n #dont recompute = use the hash\n similarity = movie_similarity_hash[input_movie_name][movie_name]\n \n #Reject item if similarity is 0\n if similarity <= 0.0:\n continue\n \n #Compute weighted rating\n weighted_rating = similarity * user_rating_hash[input_user_id][movie_name]\n \n #update total_weighted_rating and total_similarity\n total_weighted_rating += weighted_rating\n total_similarity += similarity\n \n #Do not change below\n if total_similarity == 0.0:\n return 0.0\n \n return total_weighted_rating / total_similarity",
"_____no_output_____"
],
[
"#Let us compute the rating for first 5 users for the top-20 movies that they have not seen\nfor user_id in range(1, 5+1):\n print user_id, [ (movie_name, \n round(predict_rating_for_movie_icf(movie_similarity_hash, user_id, movie_name, top_250_movie_names),2))\n for movie_name in top_250_movie_names[:20] \n if movie_name not in user_rating_hash[user_id]]\n \n#print movie_name, predict_rating_for_movie_icf(movie_similarity_hash, 1, 'Liar Liar (1997)', min_common_users=25)\n\n",
"1 [('Liar Liar (1997)', 3.83), ('English Patient, The (1996)', 3.87), ('Scream (1996)', 3.87), ('Air Force One (1997)', 3.87)]\n2 [('Return of the Jedi (1983)', 3.91), ('Independence Day (ID4) (1996)', 3.85), ('Raiders of the Lost Ark (1981)', 3.92), ('Pulp Fiction (1994)', 3.93), ('Twelve Monkeys (1995)', 3.87), ('Silence of the Lambs, The (1991)', 3.91), ('Chasing Amy (1997)', 3.89), ('Rock, The (1996)', 3.86), ('Empire Strikes Back, The (1980)', 3.91), ('Star Trek: First Contact (1996)', 3.86)]\n3 [('Star Wars (1977)', 2.95), ('Fargo (1996)', 2.92), ('English Patient, The (1996)', 2.92), ('Toy Story (1995)', 2.92), ('Independence Day (ID4) (1996)', 2.89), ('Raiders of the Lost Ark (1981)', 2.92), ('Godfather, The (1972)', 2.93), ('Pulp Fiction (1994)', 2.95), ('Twelve Monkeys (1995)', 2.94), ('Silence of the Lambs, The (1991)', 2.92), ('Jerry Maguire (1996)', 2.9), ('Rock, The (1996)', 2.89), ('Empire Strikes Back, The (1980)', 2.95), ('Star Trek: First Contact (1996)', 2.9)]\n4 [('Fargo (1996)', 4.21), ('Return of the Jedi (1983)', 4.26), ('English Patient, The (1996)', 4.17), ('Toy Story (1995)', 4.23), ('Independence Day (ID4) (1996)', 4.18), ('Raiders of the Lost Ark (1981)', 4.27), ('Godfather, The (1972)', 4.24), ('Pulp Fiction (1994)', 4.25), ('Twelve Monkeys (1995)', 4.24), ('Silence of the Lambs, The (1991)', 4.27), ('Jerry Maguire (1996)', 4.23), ('Chasing Amy (1997)', 4.19), ('Rock, The (1996)', 4.23), ('Empire Strikes Back, The (1980)', 4.29), ('Star Trek: First Contact (1996)', 4.22)]\n5 [('Contact (1997)', 3.35), ('Liar Liar (1997)', 3.25), ('English Patient, The (1996)', 3.31), ('Scream (1996)', 3.3), ('Air Force One (1997)', 3.29), ('Godfather, The (1972)', 3.36), ('Pulp Fiction (1994)', 3.34), ('Twelve Monkeys (1995)', 3.35), ('Jerry Maguire (1996)', 3.32), ('Chasing Amy (1997)', 3.36), ('Rock, The (1996)', 3.32)]\n"
],
[
"#Task t3e: \n#Here is the pseudocode for recommending movies\n# for each movie this user has not rated in movies_considered:\n# predict rating for this movie and this user using t4d\n# return the top-k movies\ndef recommend_movies_icf(input_user_id, movies_considered, movie_similarity_hash,\n user_rating_hash, k=10, min_common_movies=5):\n predicted_ratings = []\n \n #Your code here\n for movie_name in movies_considered:\n if movie_name in user_rating_hash[input_user_id]:\n continue\n \n #Predict the rating for input_user_id and movie_name using t3d\n predicted_rating = predict_rating_for_movie_icf(movie_similarity_hash, \n input_user_id, movie_name, movies_considered)\n #append rating and movie name to predicted_ratings\n predicted_ratings.append( (predicted_rating, movie_name) )\n \n return sorted(predicted_ratings, reverse=True)[:k]",
"_____no_output_____"
],
[
"#Do not change below:\n\n#Let us predict top-5 movies for first 10 users\nfor user_id in range(1,11):\n print user_id, recommend_movies_icf(user_id, top_250_movie_names, movie_similarity_hash, \n user_rating_hash, k=10, min_common_movies=5)",
" 1 [(4.1925848292261962, 'Fly Away Home (1996)'), (4.0983740953356511, \"Ulee's Gold (1997)\"), (4.0853921793090748, 'Seven Years in Tibet (1997)'), (3.9897313336931295, 'Maltese Falcon, The (1941)'), (3.9879158866803466, 'Manchurian Candidate, The (1962)'), (3.9876557080885249, 'Cop Land (1997)'), (3.9782574914172879, 'Wag the Dog (1997)'), (3.978195449823783, \"Singin' in the Rain (1952)\"), (3.9776616970615382, 'Secrets & Lies (1996)'), (3.9561068860479827, 'Mother (1996)')]\n2 [(4.0808708045508908, 'Cool Hand Luke (1967)'), (4.0758760619946868, 'Maltese Falcon, The (1941)'), (4.0533051054926501, 'Patton (1970)'), (4.0083596465934832, 'Reservoir Dogs (1992)'), (3.9881299279283864, 'Wag the Dog (1997)'), (3.9839921546749677, 'Bound (1996)'), (3.9765105208616371, 'Father of the Bride Part II (1995)'), (3.9734726233103057, 'Abyss, The (1989)'), (3.9617904510489481, 'Rumble in the Bronx (1995)'), (3.9401087977258875, 'Manchurian Candidate, The (1962)')]\n3 [(4.0, 'Fly Away Home (1996)'), (3.2010326559372495, 'Chinatown (1974)'), (3.157701069290638, 'Beavis and Butt-head Do America (1996)'), (3.143952685392172, 'Ed Wood (1994)'), (3.1433803387730994, 'Clerks (1994)'), (3.1237868346046724, 'Austin Powers: International Man of Mystery (1997)'), (3.0798009033435956, 'Annie Hall (1977)'), (3.0759377820139293, 'Room with a View, A (1986)'), (3.0697683194380638, 'Postino, Il (1994)'), (3.0661680108458311, 'Mighty Aphrodite (1995)')]\n4 [(5.0, 'Fly Away Home (1996)'), (4.4632038663842257, 'Searching for Bobby Fischer (1993)'), (4.4196965100181682, 'Mighty Aphrodite (1995)'), (4.4191100601766378, 'To Kill a Mockingbird (1962)'), (4.4169766165480482, 'North by Northwest (1959)'), (4.4094677480337543, 'Emma (1996)'), (4.4042857143634695, 'Vertigo (1958)'), (4.4034520554502796, 'Postino, Il (1994)'), (4.3967716982383376, 'Secrets & Lies (1996)'), (4.3856413995509103, 'Big Night (1996)')]\n5 [(3.7183255738053607, 'Fly Away Home (1996)'), (3.6633254211881798, \"Ulee's Gold (1997)\"), (3.5372799055697985, 'Cop Land (1997)'), (3.5149683822988829, 'Seven Years in Tibet (1997)'), (3.469581917447591, 'Bound (1996)'), (3.4617867390614938, 'Wag the Dog (1997)'), (3.4527021275095611, 'Rainmaker, The (1997)'), (3.4496495884364671, 'Sling Blade (1996)'), (3.4400533520469319, \"Singin' in the Rain (1952)\"), (3.4399531973912678, 'Secrets & Lies (1996)')]\n6 [(3.6450757118770802, 'Titanic (1997)'), (3.6419674474846175, 'Swingers (1996)'), (3.6346263653247584, 'Apt Pupil (1998)'), (3.6261382018050177, 'Professional, The (1994)'), (3.6238256630349728, 'Nightmare Before Christmas, The (1993)'), (3.6176103367235144, 'Philadelphia (1993)'), (3.6163393236633965, 'Manchurian Candidate, The (1962)'), (3.6158801676523433, 'Everyone Says I Love You (1996)'), (3.6148660366491452, 'Con Air (1997)'), (3.6125458805115862, 'Mystery Science Theater 3000: The Movie (1996)')]\n7 [(4.5769327294745636, 'Fly Away Home (1996)'), (4.3569138485911942, 'Boogie Nights (1997)'), (4.3430811631356416, \"Ulee's Gold (1997)\"), (4.3387217336002255, 'Everyone Says I Love You (1996)'), (4.3370856474197863, 'L.A. Confidential (1997)'), (4.334079880695576, 'Good Will Hunting (1997)'), (4.3302758538211039, 'Lone Star (1996)'), (4.3231589530937846, 'Titanic (1997)'), (4.3159277940669369, 'Bound (1996)'), (4.3140087951876245, 'Big Night (1996)')]\n8 [(4.5099993281415864, 'Fly Away Home (1996)'), (4.2346862647980528, 'Big Night (1996)'), (4.232651237329506, 'Secrets & Lies (1996)'), (4.2214676248329042, 'Maltese Falcon, The (1941)'), (4.2194500071542702, 'Manchurian Candidate, The (1962)'), (4.2112900704044289, 'Philadelphia (1993)'), (4.2088630225331416, 'Sling Blade (1996)'), (4.2047351130720045, \"Singin' in the Rain (1952)\"), (4.1991235849187269, \"Ulee's Gold (1997)\"), (4.1940617742206738, 'Remains of the Day, The (1993)')]\n9 [(5.0, 'Fly Away Home (1996)'), (4.3881344831760405, 'Sneakers (1992)'), (4.384739673791211, 'Cinderella (1950)'), (4.3728633261524967, 'Crow, The (1994)'), (4.3601856063557216, 'Good, The Bad and The Ugly, The (1966)'), (4.3583657116832946, 'GoldenEye (1995)'), (4.3537695463082429, 'Die Hard: With a Vengeance (1995)'), (4.3487405697301451, 'Nightmare Before Christmas, The (1993)'), (4.3485936712624973, \"Singin' in the Rain (1952)\"), (4.3455946019288847, 'Star Trek VI: The Undiscovered Country (1991)')]\n10 [(4.3656740682168014, \"Ulee's Gold (1997)\"), (4.3633159999804878, 'Good Will Hunting (1997)'), (4.360223994431875, 'Titanic (1997)'), (4.352806826677643, 'Tomorrow Never Dies (1997)'), (4.3500732774417745, 'Apt Pupil (1998)'), (4.3494915136704577, 'Professional, The (1994)'), (4.3491740447223943, 'Chasing Amy (1997)'), (4.34581077138371, 'Good, The Bad and The Ugly, The (1966)'), (4.3448056765651897, \"Schindler's List (1993)\"), (4.3436282240868342, 'Postino, Il (1994)')]\n"
]
],
[
[
"#Task 4: User based Collaborative Filtering\n\nIn this task, let us try to perform user based collaborative filtering. ",
"_____no_output_____"
]
],
[
[
"#In order to simplify the coding, let us create a nested hash structure to store the user-rating data\n# It will look as follows:\n#{\n# u1: {movie_name_1:rating1, movie_name_2:rating2, ....}, \n# ....\n# un: {movie_name_1:rating1, movie_name_2:rating2, ....}, \n#}\n\n#Of course, we will only store the movies that the user rated\n#Use the all_movie_names to convert movie id to movie name\n# remember that Movielens uses 1 based indexing\n# so the name of movieid i is in all_movie_names[i-1]",
"_____no_output_____"
],
[
"#Task t4a\n#Create the data structure as discussed above\n# here is the logic:\n# for each line in file ml-100k/u.data:\n# set user_rating_hash[user][movie] = rating\n# read the instructions above again!\n\ndef compute_user_rating_hash():\n user_rating_hash = defaultdict(dict)\n \n #Your code below\n with open(\"ml-100k/u.data\",\"r\") as f:\n for line in f:\n user_id, movie_id, rating, timestamp = line.split(\"\\t\")\n user_rating_hash[int(user_id)][all_movie_names[int(movie_id)-1]] = int(rating) \n \n return user_rating_hash",
"_____no_output_____"
],
[
"#Do not change below\nuser_rating_hash = compute_user_rating_hash()",
"_____no_output_____"
],
[
"#Do not change below\n#How many users are there?\nprint len(user_rating_hash.keys())\n#How many movies did each of the first 20 users rated?\nprint [len(user_rating_hash[i].keys()) for i in range(1,20+1)] \n#print the ratings of user 4\ndisplay(user_rating_hash[4])",
"943\n[271, 61, 53, 24, 175, 208, 401, 59, 22, 184, 181, 51, 632, 98, 103, 140, 28, 277, 20, 48]\n"
],
[
"#Task t4b:\n#We need to modify our logic for computing distance\n#Here is the high level pseudocode:\n# movie1 = movie names rated by user 1\n# movie2 = movie names rated by user 2\n# common movies = set intersection of movie1 and movie2\n# if number of common movies is less than min_common_movies, return 0.0 [not 0]\n# other wise create a vector with rating for common movies only\n# compute euclidean distance between the vectors\n# return 1 / (1+euclidean distace)\n\ndef compute_user_user_similarity(user_rating_hash, user_id_1, user_id_2, min_common_movies=0):\n #Get list of movie names rated by user 1. hint use keys function [see above for usage]\n movies_rated_user1 = user_rating_hash[user_id_1].keys()\n movies_rated_user2 = user_rating_hash[user_id_2].keys()\n \n #compute common movies\n common_movies = set(movies_rated_user1).intersection( set(movies_rated_user2) )\n if len(common_movies) < min_common_movies:\n return 0.0\n \n common_movies = sorted(list(common_movies))\n \n #vector1 is the set of ratings for user1 for movies in common_movies\n vector1 = [user_rating_hash[user_id_1][movie] for movie in common_movies]\n #vector2 is the set of ratings for user2 for movies in common_movies\n vector2 = [user_rating_hash[user_id_2][movie] for movie in common_movies]\n \n #Compute distance and return 1.0/(1.0+distance)\n distance = euclidean_distances(vector1, vector2)[0][0]\n return 1.0 / ( 1.0 + distance)",
"_____no_output_____"
],
[
"#Testing code\nprint [round(compute_user_user_similarity(user_rating_hash, 1, i),2) for i in range(1, 10+1)]\nprint [round(compute_user_user_similarity(user_rating_hash, 784, i),2) for i in range(1, 10+1)]",
"[1.0, 0.16, 0.16, 0.19, 0.08, 0.07, 0.06, 0.16, 0.31, 0.1]\n[0.19, 0.19, 0.11, 0.33, 1.0, 0.19, 0.18, 0.5, 0.21, 0.29]\n"
],
[
"#Task t4c\n#This function finds the k-most similar users \n#Here is the high level logic:\n# for each user in all_user_ids other than the input user id:\n# find similarity between this user and input_user_id and store as (similarity, other userid)\n# sort based on similarity\n# return top-k\n# remember to pass min_common_movies\ndef top_k_most_similar_users(user_rating_hash, input_user_id, all_user_ids, k=10, min_common_movies=0):\n user_similarity = []\n \n for user_id in all_user_ids:\n if user_id == input_user_id:\n continue\n similarity = compute_user_user_similarity(user_rating_hash, input_user_id, user_id, min_common_movies)\n user_similarity.append( ( similarity, user_id) )\n return sorted(user_similarity, reverse=True)[:k]",
"_____no_output_____"
],
[
"#Do not change below\nall_user_ids = range(1, 943+1)\nprint top_k_most_similar_users(user_rating_hash, 1, all_user_ids, 10, 5)\nprint top_k_most_similar_users(user_rating_hash, 1, all_user_ids, 10, 10)\nprint top_k_most_similar_users(user_rating_hash, 812, all_user_ids, 10, 5)\nprint top_k_most_similar_users(user_rating_hash, 812, all_user_ids, 10, 20)",
"[(0.41421356237309509, 876), (0.36602540378443865, 105), (0.33333333333333331, 895), (0.33333333333333331, 282), (0.33333333333333331, 107), (0.3090169943749474, 842), (0.3090169943749474, 696), (0.3090169943749474, 520), (0.3090169943749474, 516), (0.3090169943749474, 433)]\n[(0.3090169943749474, 516), (0.3090169943749474, 433), (0.3090169943749474, 359), (0.25, 800), (0.25, 691), (0.2402530733520421, 564), (0.2402530733520421, 549), (0.2402530733520421, 46), (0.23166247903553999, 941), (0.22400923773979589, 252)]\n[(0.5, 816), (0.5, 768), (0.41421356237309509, 555), (0.41421356237309509, 4), (0.36602540378443865, 534), (0.36602540378443865, 314), (0.36602540378443865, 127), (0.36602540378443865, 38), (0.33333333333333331, 826), (0.33333333333333331, 727)]\n[(0.21712927295533244, 451), (0.16396078054371141, 782), (0.13652705949581431, 721), (0.095840694682461411, 181), (0.0, 943), (0.0, 942), (0.0, 941), (0.0, 940), (0.0, 939), (0.0, 938)]\n"
],
[
"#Task t4d\n#In this task, we are going to predict the rating of a user for a movie using user based collaborative filtering\n#Here is the high level logic:\n# for each user u in all_user_ids:\n# s= similarity between u and input_user_id [remember to pass min_common_movies]\n# if similairty is 0.0 ignore u\n# if u has not rated this movie, ignore again\n# suppose u has rated this movie with a value of r\n# i am now going to give a \"weighted rating\" as r*s\n# compute the predicted rating as sum of all weighted ratings / sum of all similarities\n\ndef predict_rating_for_movie_ucf(user_rating_hash, input_user_id, movie_name, all_user_ids, min_common_movies=5):\n total_weighted_rating = 0.0\n total_similarity= 0.0\n \n for user_id in all_user_ids:\n if user_id == input_user_id:\n continue\n \n #compute similarity between users\n similarity = compute_user_user_similarity(user_rating_hash, user_id, input_user_id, min_common_movies)\n \n #Reject user if similarity is 0\n if similarity <= 0.0:\n continue\n \n #reject user if (s)he has not rated the movie\n if movie_name not in user_rating_hash[user_id]:\n continue\n \n #Compute weighted rating\n weighted_rating = similarity * user_rating_hash[user_id][movie_name]\n \n #update total_weighted_rating and total_similarity\n total_weighted_rating += weighted_rating\n total_similarity += similarity\n \n #Do not change below\n if total_similarity == 0.0:\n return 0.0\n \n return total_weighted_rating / total_similarity",
"_____no_output_____"
],
[
"#Do not change below\nall_user_ids = range(1, 943+1)\nfor user_id in range(1, 5+1):\n print \"user_id = \", user_id\n print [ round(predict_rating_for_movie_ucf(user_rating_hash, user_id, all_movie_names[i], all_user_ids, min_common_movies=5),1)\n for i in range(1, 10+1)]\n print [ round(predict_rating_for_movie_ucf(user_rating_hash, user_id, all_movie_names[i], all_user_ids, min_common_movies=10),1)\n for i in range(1, 10+1)]\n print \"\\n\"",
"user_id = 1\n[3.2, 3.1, 3.6, 3.3, 3.8, 3.8, 4.0, 3.9, 3.9, 3.9]\n[3.2, 3.1, 3.6, 3.3, 3.5, 3.8, 4.0, 3.9, 3.9, 3.8]\n\n\nuser_id = 2\n[3.2, 3.1, 3.6, 3.3, 3.7, 3.8, 4.0, 3.9, 3.8, 3.9]\n[3.2, 3.1, 3.6, 3.3, 3.5, 3.8, 4.1, 3.9, 3.8, 3.8]\n\n\nuser_id = 3\n[3.2, 3.0, 3.6, 3.3, 3.4, 3.8, 3.9, 3.9, 3.8, 3.9]\n[3.0, 2.8, 3.5, 3.1, 3.2, 3.8, 4.0, 3.9, 3.7, 3.8]\n\n\nuser_id = 4\n[3.3, 3.0, 3.6, 3.4, 3.4, 3.8, 3.9, 3.9, 3.7, 3.9]\n[3.2, 2.9, 3.4, 3.0, 2.8, 3.8, 4.0, 3.8, 3.5, 3.9]\n\n\nuser_id = 5\n[3.2, 3.0, 3.5, 3.3, 3.6, 3.8, 4.0, 3.8, 3.8, 3.8]\n[3.2, 3.0, 3.5, 3.3, 3.5, 3.8, 4.0, 3.8, 3.8, 3.8]\n\n\n"
],
[
"#Task t4e: \n#Here is the pseudocode for recommending movies\n# for each movie this user has not rated:\n# for all other users:\n# predict rating for this movie and this user using t4d\n# return the top-k movies\ndef recommend_movies_ucf(user_rating_hash, all_user_ids, input_user_id, k=10, min_common_movies=5):\n predicted_ratings = []\n \n #Your code here\n for movie_name in all_movie_names:\n if movie_name in user_rating_hash[input_user_id]:\n continue\n \n #Predict the rating for input_user_id and movie_name using t4d\n predicted_rating = predict_rating_for_movie_ucf(user_rating_hash, input_user_id, movie_name, \n all_user_ids, min_common_movies)\n #append rating and movie name to predicted_ratings\n predicted_ratings.append( (predicted_rating, movie_name) )\n \n return sorted(predicted_ratings, reverse=True)[:k]",
"_____no_output_____"
],
[
"#Do not change below\nall_user_ids = range(1, 943+1)\n\nfor user_id in range(1, 5):\n print recommend_movies_ucf(user_rating_hash, all_user_ids, user_id, k=10, min_common_movies=5)",
"[(5.0000000000000009, 'Saint of Fort Washington, The (1993)'), (5.0, 'They Made Me a Criminal (1939)'), (5.0, \"Someone Else's America (1995)\"), (5.0, 'Santa with Muscles (1996)'), (5.0, 'Prefontaine (1997)'), (5.0, 'Marlene Dietrich: Shadow and Light (1996) '), (5.0, 'Little City (1998)'), (5.0, 'Great Day in Harlem, A (1994)'), (5.0, 'Entertaining Angels: The Dorothy Day Story (1996)'), (5.0, 'Aiqing wansui (1994)')]\n[(5.0000000000000009, 'Prefontaine (1997)'), (5.0, 'They Made Me a Criminal (1939)'), (5.0, 'Star Kid (1997)'), (5.0, \"Someone Else's America (1995)\"), (5.0, 'Santa with Muscles (1996)'), (5.0, 'Saint of Fort Washington, The (1993)'), (5.0, 'Marlene Dietrich: Shadow and Light (1996) '), (5.0, 'Great Day in Harlem, A (1994)'), (5.0, 'Entertaining Angels: The Dorothy Day Story (1996)'), (5.0, 'Aiqing wansui (1994)')]\n[(5.0, 'Tough and Deadly (1995)'), (5.0, 'Star Kid (1997)'), (5.0, 'Santa with Muscles (1996)'), (5.0, 'Saint of Fort Washington, The (1993)'), (5.0, 'Marlene Dietrich: Shadow and Light (1996) '), (5.0, 'Great Day in Harlem, A (1994)'), (5.0, 'Entertaining Angels: The Dorothy Day Story (1996)'), (5.0, 'Aiqing wansui (1994)'), (4.9999999999999991, 'Prefontaine (1997)'), (4.6888446196178126, 'Anna (1996)')]\n[(5.0000000000000009, 'Prefontaine (1997)'), (5.0, 'Star Kid (1997)'), (5.0, \"Someone Else's America (1995)\"), (5.0, 'Santa with Muscles (1996)'), (5.0, 'Saint of Fort Washington, The (1993)'), (5.0, 'Marlene Dietrich: Shadow and Light (1996) '), (5.0, 'Little City (1998)'), (5.0, 'Great Day in Harlem, A (1994)'), (4.6385142554001959, 'Leading Man, The (1996)'), (4.6325248550982909, 'Crossfire (1947)')]\n"
]
],
[
[
"#Task 5: Latent Factor Models\n\nIn this task, let us try to find the simplest SVD based latent factor model.",
"_____no_output_____"
]
],
[
[
"number_of_users = 943\nnumber_of_movies = 1682\n\nratings_matrix = sp.sparse.lil_matrix((number_of_users, number_of_movies))\n\n",
"_____no_output_____"
],
[
"#Task t5a: This task requires a different data structure and hence different from prior tasks.\n# Here is the high level idea:\n# - Create a sparse matrix of type lil_matrix \n# - populate it with the data from ratings file\n# - if user id i gave movie id j with a rating r, set matrix[i-1,j-1] to r \n# ie rows=users, col=movies\n# Hint: If you are reading it from file, note that Python treats the data from file as a string\n# so you might want to convert them to integer before inserting them.\nwith open(\"ml-100k/u.data\",\"r\") as f:\n for line in f:\n user_id, movie_id, rating, timestamp = line.split(\"\\t\")\n ratings_matrix[ int(user_id)-1, int(movie_id)-1] = int(rating)",
"_____no_output_____"
],
[
"print \"Matrix shape is\", ratings_matrix.shape\nprint \"Number of non zero values\", ratings_matrix.nnz\nprint \"Number of non zero values\", ratings_matrix.nonzero()",
"Matrix shape is (943, 1682)\nNumber of non zero values 158695\nNumber of non zero values (array([ 0, 0, 0, ..., 942, 942, 942], dtype=int32), array([ 0, 1, 2, ..., 1187, 1227, 1329], dtype=int32))\n"
],
[
"#Task t5b:\n# Perform SVD on the ratings matrix\n# Hint use the svds function imported above and not svd \n# K is the number of factors to have\ndef perform_svd(ratings_matrix, K=100):\n U, S, V = None, None, None\n return U, S, V",
"_____no_output_____"
],
[
"#Task t5c:\n# Note that S is is an array and not a matrix\n# Create a diagonal matrix where the diagonal matrix is populated from S\n# For eg, if S = 2,4, 8\n# then output must be\n# 2 0 0\n# 0 4 0\n# 0 0 8\n\ndef construct_diagonal_matrix(S):\n return None",
"_____no_output_____"
],
[
"#Task t5d: \n# We are now going to reconstruct the matrix from the SVD that we have got\n# new matrix = U S V \n# But its shape will be different as we only used the top-k factors\ndef reconstruct_low_rank_matrix(U, S, V):\n return None",
"_____no_output_____"
],
[
"#Task t5e:\n# Using the reconstructed matrix, predict the rating of user to a movie\ndef predict_rating_svd(user_id, movie_id):\n return 0.0",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e741e0f5801c0b44c25e57b28e943c786b7ecfd3 | 697,141 | ipynb | Jupyter Notebook | graficos_almendralejo.ipynb | mharias/covid_almendralejo | d754f51751c15c326f5355e9632a5817d461edb4 | [
"MIT"
] | 1 | 2021-01-15T21:16:48.000Z | 2021-01-15T21:16:48.000Z | graficos_almendralejo.ipynb | mharias/covid_almendralejo | d754f51751c15c326f5355e9632a5817d461edb4 | [
"MIT"
] | null | null | null | graficos_almendralejo.ipynb | mharias/covid_almendralejo | d754f51751c15c326f5355e9632a5817d461edb4 | [
"MIT"
] | null | null | null | 173.50448 | 224,192 | 0.840507 | [
[
[
"# Objetivo",
"_____no_output_____"
],
[
"En este trabajo vamos a analizar los datos epidemiológicos generados para Almendralejo por la Comunidad Extremadura desde aquí [fuente](https://saludextremadura.ses.es/web/casospositivos). Siguiendo los procesos estándares descargaremos los datos desde gitub, analizaremos los campos y prepararemos una serie de gráficas que añadan aunque sea un poco de información a lo que está ocurriendo.",
"_____no_output_____"
],
[
"\n@author: Manuel H. Arias \n\n@Twitter: @walyt\n\n@mail: [email protected]\n\n",
"_____no_output_____"
],
[
"## Importación de librerias estándares para DAE",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.ticker import FuncFormatter\nfrom matplotlib import cm\nimport matplotlib.dates as mdates\nimport matplotlib.ticker as ticker\nfrom matplotlib.dates import (YEARLY, MONTHLY, DateFormatter, MonthLocator,DayLocator,\n rrulewrapper, WeekdayLocator,RRuleLocator, drange)\nimport matplotlib.image as mpimg\nfrom matplotlib.animation import FuncAnimation\nfrom matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox\nimport matplotlib.colors as colors\n\nimport numpy as np\nfrom datetime import datetime,timedelta\nimport seaborn as sns\n%matplotlib inline\n\nimport urllib.request\n\nimport matplotlib.image as mpimg\nfrom matplotlib.animation import FuncAnimation\nfrom matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox",
"_____no_output_____"
]
],
[
[
"## Descargamos la información",
"_____no_output_____"
],
[
"Hacemos en primer lugar una actualización de parámetros y preparación de variables que necesitaremos durante el ejercicio",
"_____no_output_____"
]
],
[
[
"pd.options.display.max_rows = 999 #Variable de contexto para permitir la presentación de datos por pantalla\npd.set_option('display.max_columns', None)",
"_____no_output_____"
],
[
"#url de la fuente de datos\npath_fuente_datos='datos/almendralejo.xlsx'\npath_fuente_datos_github='https://github.com/mharias/covid_almendralejo/blob/main/datos/almendralejo.csv?raw=true'\n#path_fuente_datos_github_csv='https://github.com/mharias/covid_almendralejo/blob/main/datos/covid_almendralejo.csv?raw=true'\n\npoblacion_almendralejo = 33474 #https://es.wikipedia.org/wiki/Almendralejo\n",
"_____no_output_____"
]
],
[
[
"Leemos los datos en un `pandas`",
"_____no_output_____"
]
],
[
[
"datos = pd.read_csv(path_fuente_datos_github,sep=',')",
"_____no_output_____"
],
[
"datos",
"_____no_output_____"
],
[
"#datos = pd.read_excel(path_fuente_datos,skiprows=2)",
"_____no_output_____"
]
],
[
[
"veamos una rápida descripción de la información:",
"_____no_output_____"
],
[
"y un muestreo de valores y de algunas columnas de interés:",
"_____no_output_____"
]
],
[
[
"datos.tail()",
"_____no_output_____"
],
[
"datos['date']=pd.to_datetime(datos['date'],format='%Y-%m-%d')",
"_____no_output_____"
],
[
"datos['media_7']=datos['Casos positivos'].rolling(window=7).mean().round()",
"_____no_output_____"
],
[
"datos['ia14'] = datos['Casos positivos'].rolling(window=14).sum()/poblacion_almendralejo*100000",
"_____no_output_____"
],
[
"datos['ia_ratio'] = datos['ia14'].pct_change(periods=7).add(1)",
"_____no_output_____"
],
[
"datos.tail(19)",
"_____no_output_____"
]
],
[
[
"# Gráficos",
"_____no_output_____"
],
[
" A continuación vamos a crear un gráfico múltiple que nos permita visualizar cada una de las columnas con datos numéricos. Ello nos permitirá",
"_____no_output_____"
],
[
"Una vez analizadas cada una de las columnas de datos podremos elegir las que queremos presentar..",
"_____no_output_____"
],
[
"Preparemos un gráfico tipo [Facetgrid](https://seaborn.pydata.org/generated/seaborn.FacetGrid.html), al que añadiremos tantos gráficos como provincias, presentando una de las columnas de datos. Elegimos nº de hospitalizados por 100.000 habitantes. Dejo otras claves con `#` para que fácilmente se puedan generar. Este ejemplo esta basado en el código del link anterior.",
"_____no_output_____"
]
],
[
[
"sns.set(style=\"white\",rc={\"axes.facecolor\": (0, 0, 0, 0)})\n\n# Preparamos los datos. Es importante añadir un zero a los campos NotANumber..\n\n\nclave_avg='daily_cases_PCR_avg7'\nclave_ratio_avg = 'ratio_daily_cases_PCR_avg7'\ncolor_ratio = 'red'\ncolor_fill = 'royalblue'\ncolor_titulos = 'navy'\ncolor_linea='darkred'\nclave_ratio = 'ia_ratio'\nIA='ia14'\navg='media_7'\n\ncasos_nuevos='Casos positivos'\nactivos='Activos'\n\nventana_IA=14\ntitulo_positivos = 'Almendralejo: casos positivos comunicados diarios y media 7 días'\ntitulo_activos = 'Almendralejo: casos activos comunicados'\ntitulo_IA='Almendralejo: IA14, casos acumulados de los últimos 14 días por 100.000 habitantes\\n\\\nEn rojo ratio de la IA14 con respecto a la de 7 días anteriores.'\nfuente_positivos='Salud, Junta Extremadura'\npath_fuente_positivos='https://saludextremadura.ses.es/web/casospositivos'\n\nfuente_activos='Ayto Almendralejo'\npath_fuente_activos='https://www.almendralejo.es/noticias.php'\n\nautor='@walyt'\n\n\ny_IA='IA a 14 días'\ny_ratio='Ratio a 7 días'\npath='graficos/'\n \n",
"_____no_output_____"
],
[
"colores = sns.color_palette(\"Blues_r\", 5)\ncolores = sns.color_palette('mako', 5)\nplt.style.use('seaborn-white')\n#f.suptitle(titulo,fontsize=20,x=0.5,y=1,color=color_titulos)\nf = plt.figure(figsize=(10,5))\nax1 = f.add_subplot(1,1,1,)\n\n\n#colors = cm.RdYlBu_r(datos[casos_nuevos]/datos[casos_nuevos].max())\ncolors = cm.Blues(datos[casos_nuevos]/datos[casos_nuevos].max())\n\n\nbarras = ax1.bar(datos['date'],datos[casos_nuevos],color=colors,width=1.2)\nax1.plot(datos['date'],datos[avg],color='orange',lw=3,label='media 7 días')\n\nax1.yaxis.set_tick_params(labelsize=16,labelcolor='navy')\nax1.xaxis.set_tick_params(labelsize=10,labelcolor='navy',width=0,rotation=60)\nax1.set_title(titulo_positivos,size=20,color='navy')\n\nbarras_ = np.concatenate(([barras[0]],[barras[-1]]))\nbarras_ = np.concatenate((barras_,barras[::-1][:-2:7]))\nbarras_ = barras[::-1][::14][::1]\nfor barra in barras_:\n height = barra.get_height()\n ax1.text(barra.get_x() + barra.get_width()/2., 1*height,\n '%d' % int(height),ha='center',fontsize=12, va='bottom',color='navy')\n\n\n\n\nfmt = '%.0f' # Format you want the ticks, e.g. '40%'\nyticks = ticker.FormatStrFormatter(fmt)\nax1.yaxis.set_major_formatter(yticks)\n#ax1.set_yticks([0,10,20,30,40,50,60,70,80,90,100])\n\nXmajorFmt = DateFormatter('%-d-%b')\nax1.xaxis.set_major_formatter(XmajorFmt)\nax1.xaxis.set_major_locator(WeekdayLocator([datetime.today().weekday()])) #incluye hoy y 7 antes\n#ax1.set_xticks(ax1.get_xticks()[:])\nax1.set_xticks(datos['date'].values[::-1][::14][::-1])\nlegend = ax1.legend(framealpha=0,fancybox=True,fontsize='x-large')\nplt.setp(legend.get_texts(), color='navy')\n\nax1.grid(True,axis='y')\nf.text(0.1, 0.01, 'Data Source:{} {}'.format(fuente_positivos,path_fuente_positivos), horizontalalignment='left',\n verticalalignment='center', fontsize=14,color=color_titulos)\nf.text(0.9,0.01,'{}'.format(autor),\n verticalalignment='center',fontsize=14,horizontalalignment='left',color=color_titulos)\n[spine.set_visible(False) for spine in ax1.spines.values()]\n#f.tight_layout(w_pad=0,h_pad=5,pad=1)\n\nf.tight_layout()\nf.savefig('graficos/almendralejo_nuevos_casos.png',dpi=200,bbox_inches='tight')",
"_____no_output_____"
],
[
"barras[-1]",
"_____no_output_____"
],
[
"colores = sns.color_palette(\"Blues_r\", 5)\n\nplt.style.use('seaborn-white')\n#f.suptitle(titulo,fontsize=20,x=0.5,y=1,color=color_titulos)\nf = plt.figure(figsize=(10,5))\nax1 = f.add_subplot(1,1,1,)\n\n\ncolors = cm.RdYlBu_r(datos[casos_nuevos]/datos[casos_nuevos].max())\n\ncolors='navy'\nbarras = ax1.bar(datos['date'],datos[activos],color=colors)\n#ax1.plot(datos['date'],datos[avg],color='navy',lw=3,label='media 7 días')\n\nax1.yaxis.set_tick_params(labelsize=16,labelcolor='navy')\nax1.xaxis.set_tick_params(labelsize=10,labelcolor='navy',width=0)\nax1.set_title(titulo_activos,size=20,color='navy')\nlista_xticks=[]\nfor barra in barras:\n height = barra.get_height()\n try: \n ax1.text(barra.get_x() + barra.get_width()/2., 1*height,'%d' % int(height),ha='center',fontsize=16, va='bottom',color='navy')\n lista_xticks.append(barra.get_x())\n except:\n continue\n\nfmt = '%.0f' # Format you want the ticks, e.g. '40%'\nyticks = ticker.FormatStrFormatter(fmt)\nax1.yaxis.set_major_formatter(yticks)\n#ax1.set_yticks([0,10,20,30,40,50,60,70,80,90,100])\n\nXmajorFmt = DateFormatter('%-d-%b')\nax1.xaxis.set_major_formatter(XmajorFmt)\n#ax1.xaxis.set_major_locator(WeekdayLocator([0]))\n#ax1.set_xticks(ax1.get_xticks()[1:-1])\nax1.set_xticks(lista_xticks)\n\n\nlegend = ax1.legend(framealpha=0,fancybox=True,fontsize='x-large')\nplt.setp(legend.get_texts(), color='navy')\n\nax1.grid(True,axis='y')\nf.text(0.1, -0.0, 'Data Source:{} {}'.format(fuente_activos,path_fuente_activos), horizontalalignment='left',\n verticalalignment='center', fontsize=12,color=color_titulos)\nf.text(0.1,-0.05,'{}'.format(autor),\n fontsize=12,horizontalalignment='left',color=color_titulos)\nf.text(0.1,-0.1,'No se han encontrado datos para las fechas vacías'.format(autor),\n fontsize=12,horizontalalignment='left',color=color_titulos)\n\n[spine.set_visible(False) for spine in ax1.spines.values()]\nf.tight_layout(w_pad=0,h_pad=2,pad=1)\n\n#f.tight_layout()\nf.savefig('graficos/almendralejo_activos.png',dpi=200,bbox_inches='tight')",
"No handles with labels found to put in legend.\n"
],
[
"colores = sns.color_palette(\"Blues_r\", 5)\n\nplt.style.use('seaborn-white')\n#f.suptitle(titulo,fontsize=20,x=0.5,y=1,color=color_titulos)\nf = plt.figure(figsize=(10,5))\nax1 = f.add_subplot(1,1,1,)\n\n\ncolors = cm.RdYlBu_r(datos[casos_nuevos]/datos[casos_nuevos].max())\n\n\nax1.bar(datos['date'],datos[IA],color='navy',lw=3,alpha=.3)\n\nax1.yaxis.set_tick_params(labelsize=12,labelcolor='navy')\nax1.xaxis.set_tick_params(labelsize=10,labelcolor='navy',width=0)\nax1.set_title(titulo_IA,size=20,color='navy')\n\n\n\nfmt = '%00.0f' # Format you want the ticks, e.g. '40%'\nyticks = ticker.FormatStrFormatter(fmt)\nax1.yaxis.set_major_formatter(yticks)\n#ax1.set_yticks([0,10,20,30,40,50,60,70,80,90,100])\n\nXmajorFmt = DateFormatter('%-d-%b')\nax1.xaxis.set_major_formatter(XmajorFmt)\nax1.xaxis.set_major_locator(WeekdayLocator([0,4]))\n#ax1.set_xticks(ax1.get_xticks()[1:-1])\n\nlegend = ax1.legend(framealpha=0,fancybox=True,fontsize='x-large')\nplt.setp(legend.get_texts(), color='navy')\n\nax1.grid(True,axis='y')\n\nax2 = ax1.twinx()\nax2.scatter(datos['date'],datos[clave_ratio],color='red',lw=3,label='ratio ia14 a 7 días')\nax2.xaxis.set_tick_params(labelsize=10,labelcolor='navy',width=0)\nax2.yaxis.set_tick_params(labelsize=12,labelcolor='red',width=0)\nXmajorFmt = DateFormatter('%-d-%b')\nax2.xaxis.set_major_formatter(XmajorFmt)\nax2.xaxis.set_major_locator(WeekdayLocator([0]))\nax2.set_ylim(0,2)\nf.text(0.1, 0.01, 'Data Source:{} {}'.format(fuente_positivos,path_fuente_positivos), horizontalalignment='left',\n verticalalignment='center', fontsize=12,color=color_titulos)\nf.text(0.9,+0.01,'{}'.format(autor),\n fontsize=12,horizontalalignment='left',color=color_titulos)\n[spine.set_visible(False) for spine in ax1.spines.values()]\nf.tight_layout(w_pad=0,h_pad=2,pad=1)\n\n#f.tight_layout()\nf.savefig('graficos/almendralejo_ia14.png',dpi=200,bbox_inches='tight')",
"No handles with labels found to put in legend.\n"
],
[
"datos.to_csv('datos/almendralejo.csv',index=False,sep=',')",
"_____no_output_____"
],
[
"def grafico_EPG(datos):\n \n IA14='ia14'\n casos='Casos positivos'\n p7 = 'p7_avg7'\n fecha = 'date'\n titulo = 'Evolución de Empiric Potential Growth en {}\\n'.format('Almendralejo') \n fuente_modelo = 'Fuente del modelo @biocomsc \\n' \n titulo = titulo + fuente_modelo\n un_dia=timedelta(days=1)\n dos_dias=timedelta(days=2)\n cinco_dias=timedelta(days=5)\n seis_dias=timedelta(days=6)\n cuatro_dias=timedelta(days=4)\n path='graficos/'\n nombre_fichero=path+'EPG_de_{}'.format('Almendralejo')\n \n def calculo_p7(x):\n fecha_actual=x['date']\n try:\n valor = (datos.loc[datos['date']==fecha_actual+un_dia,casos].values[0]+\\\n datos.loc[datos['date']==(fecha_actual),casos].values[0]+\\\n datos.loc[datos['date']==(fecha_actual-un_dia),casos].values[0])/\\\n (datos.loc[datos['date']==(fecha_actual-cuatro_dias),casos].values[0]+\\\n datos.loc[datos['date']==(fecha_actual-seis_dias),casos].values[0]+\\\n datos.loc[datos['date']==(fecha_actual-cinco_dias),casos].values[0])\n except:\n valor = np.NaN\n return valor\n \n datos[p7] = datos.apply(calculo_p7,axis=1)\n datos[p7] = datos[p7].rolling(window=7).mean()\n datos = datos.reset_index()\n datos = datos.drop(['index'],axis=1)\n datos=datos[[fecha,IA14,p7]]\n \n color_rojo = 'red'\n color_verde = 'green'\n color_amarillo = 'yellow'\n colores = sns.color_palette(\"Blues\",datos.shape[0])[::]\n color_texto = 'navy'\n eje_X = ' Incidencia Acumulada a 14 días'\n eje_Y = 'Parámetro \\u03C1'+'7'\n\n fuente_modelo = 'https://biocomsc.upc.edu/en/shared/20200412_report_web_27.pdf'\n\n\n x_max = round((datos[IA14].max()//100+1)*100,-2)\n y_max = min(3,round(datos[p7].max(),1))\n rango = np.arange(1,x_max,1)\n\n fig,ax = plt.subplots(figsize=(10, 5), dpi=144)\n ax.set_xlim(0,x_max)\n ax.set_ylim(0,y_max)\n ax.grid(False,axis='both')\n \n ax.set_ylabel(eje_Y,size=14,color='black')\n ax.set_xlabel(eje_X,size=14,color='black')\n rango=np.arange(0,x_max,1)\n \n ax.fill_between(rango, 0, 30/rango,\n facecolor=\"blue\", # The fill color\n color=color_verde, # The outline color\n alpha=0.5) \n ax.fill_between(rango, 30/rango, 100/rango,\n facecolor='blue', # The fill color\n color=color_amarillo, # The outline color\n alpha=0.5) \n ax.fill_between(rango,100/rango,10,\n facecolor=\"blue\", # The fill color\n color=color_rojo, # The outline color\n alpha=0.5)\n \n ax.set_title(titulo, fontsize=12)\n\n ax.scatter(datos[IA14],datos[p7],color=colores,marker='o',\n s=2)\n\n x_init,y_init = datos.iloc[8][[IA14,p7]]\n for i in range(0,datos.shape[0]):\n ax.plot(datos.loc[i:i+1,IA14],datos.loc[i:i+1][p7],\n color=colores[::][i],lw=2,scaley=False)\n\n style = dict(size=8, color=colores[i],weight='bold')\n if (i==7) | (i % 7 == 0) | (i==(datos.shape[0]-2)):\n texto = ax.annotate(datos.loc[i,fecha].strftime('%d-%b'),\n xy=(datos.loc[i,IA14],datos.loc[i,p7]),\n xycoords='data',\n xytext=(-20, 10), \n textcoords='offset points',\n **style)\n plt.savefig(nombre_fichero,dpi=144) \n plt.show()\n return ",
"_____no_output_____"
],
[
"grafico_EPG(datos)",
"<ipython-input-19-9388e5c91090>:21: RuntimeWarning: divide by zero encountered in long_scalars\n valor = (datos.loc[datos['date']==fecha_actual+un_dia,casos].values[0]+\\\n<ipython-input-19-9388e5c91090>:61: RuntimeWarning: divide by zero encountered in true_divide\n ax.fill_between(rango, 0, 30/rango,\n<ipython-input-19-9388e5c91090>:65: RuntimeWarning: divide by zero encountered in true_divide\n ax.fill_between(rango, 30/rango, 100/rango,\n<ipython-input-19-9388e5c91090>:69: RuntimeWarning: divide by zero encountered in true_divide\n ax.fill_between(rango,100/rango,10,\n"
],
[
"colores = sns.color_palette(\"Blues_r\", 5)\n\nplt.style.use('seaborn-white')\n#f.suptitle(titulo,fontsize=20,x=0.5,y=1,color=color_titulos)\nf = plt.figure(figsize=(10,5))\nax1 = f.add_subplot(1,1,1,)\n\n\ncolors = cm.RdYlBu_r(datos[casos_nuevos]/datos[casos_nuevos].max())\n\n\nax1.fill_between(datos['date'],datos[IA],color='navy',lw=5,alpha=.3)\n\nax1.yaxis.set_tick_params(labelsize=20,labelcolor='navy')\nax1.xaxis.set_tick_params(labelsize=14,rotation=45,labelcolor='navy',width=0)\nax1.set_title(titulo_IA,size=20,color='navy')\n\n\n\nfmt = '%00.0f' # Format you want the ticks, e.g. '40%'\nyticks = ticker.FormatStrFormatter(fmt)\nax1.yaxis.set_major_formatter(yticks)\n#ax1.set_yticks([0,10,20,30,40,50,60,70,80,90,100])\n\nXmajorFmt = DateFormatter('%-d-%b')\nax1.xaxis.set_major_formatter(XmajorFmt)\nax1.xaxis.set_major_locator(WeekdayLocator([0,4]))\n#ax1.set_xticks(ax1.get_xticks()[1:-1])\n\nlegend = ax1.legend(framealpha=0,fancybox=True,fontsize='x-large')\nplt.setp(legend.get_texts(), color='navy')\n\nax1.grid(True,axis='both')\n\nax2 = ax1.twinx()\nax2.plot(datos['date'],datos[clave_ratio],color='red',lw=.5,label='ratio ia14 a 7 días')\nax2.xaxis.set_tick_params(labelsize=10,labelcolor='navy',width=0)\nax2.yaxis.set_tick_params(labelsize=20,labelcolor='red',width=0)\nXmajorFmt = DateFormatter('%-d-%b')\nax2.xaxis.set_major_formatter(XmajorFmt)\nax2.xaxis.set_major_locator(DayLocator([1,10,20]))\nax2.set_ylim(0,2)\nax2.set_xticks(datos['date'].values[::-1][::14][::-1])\nstyle = dict(size=20, color=color_titulos,weight='normal')\ntexto = ax1.annotate('{:0.0f}'.format(datos.iloc[-1][IA]),\n xy=(datos.iloc[-1]['date'],datos.iloc[-1][IA]),\n xycoords='data',\n xytext=(0, 0), \n textcoords='offset points',\n **style)\n\nf.text(0.1, 0.01, 'Data Source:{} {}'.format(fuente_positivos,path_fuente_positivos), horizontalalignment='left',\n verticalalignment='center', fontsize=12,color=color_titulos)\nf.text(0.9,+0.01,'{}'.format(autor),\n fontsize=12,horizontalalignment='left',color=color_titulos)\n[spine.set_visible(False) for spine in ax1.spines.values()]\n[spine.set_visible(False) for spine in ax2.spines.values()]\nf.tight_layout(w_pad=0,h_pad=2,pad=1)\n\n#f.tight_layout()\nf.savefig('graficos/almendralejo_ia14.png',dpi=200,bbox_inches='tight')",
"No handles with labels found to put in legend.\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e741e300e25cd9b33fc8372f991042db7b252986 | 8,127 | ipynb | Jupyter Notebook | gae/.ipynb_checkpoints/train-checkpoint.ipynb | UDFGEU87e/my_gae | dcda1a1baaa66dffb0accca6503bdeaa81ff5179 | [
"MIT"
] | null | null | null | gae/.ipynb_checkpoints/train-checkpoint.ipynb | UDFGEU87e/my_gae | dcda1a1baaa66dffb0accca6503bdeaa81ff5179 | [
"MIT"
] | null | null | null | gae/.ipynb_checkpoints/train-checkpoint.ipynb | UDFGEU87e/my_gae | dcda1a1baaa66dffb0accca6503bdeaa81ff5179 | [
"MIT"
] | null | null | null | 37.279817 | 119 | 0.546819 | [
[
[
"from __future__ import division\nfrom __future__ import print_function\n\nimport time\nimport os\n\n# Train on CPU (hide GPU) due to memory constraints\nos.environ['CUDA_VISIBLE_DEVICES'] = \"0,1\"\n\nimport tensorflow as tf\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import average_precision_score\n\nfrom gae.optimizer import OptimizerAE, OptimizerVAE\nfrom gae.input_data import load_data\nfrom gae.model import GCNModelAE, GCNModelVAE\nfrom gae.preprocessing import preprocess_graph, construct_feed_dict, sparse_to_tuple, mask_test_edges\n\ndef del_all_flags(FLAGS):\n flags_dict = FLAGS._flags() \n keys_list = [keys for keys in flags_dict] \n for keys in keys_list:\n FLAGS.__delattr__(keys)\n\ndel_all_flags(tf.flags.FLAGS)\n\n\n# Settings\nflags = tf.app.flags\nFLAGS = flags.FLAGS\nflags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')\nflags.DEFINE_integer('epochs', 200, 'Number of epochs to train.')\nflags.DEFINE_integer('hidden1', 32, 'Number of units in hidden layer 1.')\nflags.DEFINE_integer('hidden2', 16, 'Number of units in hidden layer 2.')\nflags.DEFINE_float('weight_decay', 0., 'Weight for L2 loss on embedding matrix.')\nflags.DEFINE_float('dropout', 0., 'Dropout rate (1 - keep probability).')\n\nflags.DEFINE_string('model', 'gcn_ae', 'Model string.')\nflags.DEFINE_string('dataset', 'cora', 'Dataset string.')\nflags.DEFINE_integer('features', 1, 'Whether to use features (1) or not (0).')\n\nmodel_str = FLAGS.model\ndataset_str = FLAGS.dataset\n\n# Load data\nadj, features = load_data(dataset_str)\n\n# Store original adjacency matrix (without diagonal entries) for later\nadj_orig = adj\nadj_orig = adj_orig - sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape)\nadj_orig.eliminate_zeros()\n\nadj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = mask_test_edges(adj)\n",
"_____no_output_____"
],
[
"adj = adj_train\n\nif FLAGS.features == 0:\n features = sp.identity(features.shape[0]) # featureless\n\n# Some preprocessing\nadj_norm = preprocess_graph(adj)\n\n# Define placeholders\nplaceholders = {\n 'features': tf.sparse_placeholder(tf.float32),\n 'adj': tf.sparse_placeholder(tf.float32),\n 'adj_orig': tf.sparse_placeholder(tf.float32),\n 'dropout': tf.placeholder_with_default(0., shape=())\n}\n\nnum_nodes = adj.shape[0]\n\nfeatures = sparse_to_tuple(features.tocoo())\nnum_features = features[2][1]\nfeatures_nonzero = features[1].shape[0]\n\n# Create model\nmodel = None\nif model_str == 'gcn_ae':\n model = GCNModelAE(placeholders, num_features, features_nonzero)\nelif model_str == 'gcn_vae':\n model = GCNModelVAE(placeholders, num_features, num_nodes, features_nonzero)\n\npos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()\nnorm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2)\n\n# Optimizer\nwith tf.name_scope('optimizer'):\n if model_str == 'gcn_ae':\n opt = OptimizerAE(preds=model.reconstructions,\n labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'],\n validate_indices=False), [-1]),\n pos_weight=pos_weight,\n norm=norm)\n elif model_str == 'gcn_vae':\n opt = OptimizerVAE(preds=model.reconstructions,\n labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'],\n validate_indices=False), [-1]),\n model=model, num_nodes=num_nodes,\n pos_weight=pos_weight,\n norm=norm)\n\n# Initialize session\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\ncost_val = []\nacc_val = []\n\n\ndef get_roc_score(edges_pos, edges_neg, emb=None):\n if emb is None:\n feed_dict.update({placeholders['dropout']: 0})\n emb = sess.run(model.z_mean, feed_dict=feed_dict)\n\n def sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n # Predict on test set of edges\n adj_rec = np.dot(emb, emb.T)\n preds = []\n pos = []\n for e in edges_pos:\n preds.append(sigmoid(adj_rec[e[0], e[1]]))\n pos.append(adj_orig[e[0], e[1]])\n\n preds_neg = []\n neg = []\n for e in edges_neg:\n preds_neg.append(sigmoid(adj_rec[e[0], e[1]]))\n neg.append(adj_orig[e[0], e[1]])\n\n preds_all = np.hstack([preds, preds_neg])\n labels_all = np.hstack([np.ones(len(preds)), np.zeros(len(preds_neg))])\n roc_score = roc_auc_score(labels_all, preds_all)\n ap_score = average_precision_score(labels_all, preds_all)\n\n return roc_score, ap_score\n\n\ncost_val = []\nacc_val = []\nval_roc_score = []\n\nadj_label = adj_train + sp.eye(adj_train.shape[0])\nadj_label = sparse_to_tuple(adj_label)\n\n# Train model\nfor epoch in range(FLAGS.epochs):\n\n t = time.time()\n # Construct feed dictionary\n feed_dict = construct_feed_dict(adj_norm, adj_label, features, placeholders)\n feed_dict.update({placeholders['dropout']: FLAGS.dropout})\n # Run single weight update\n outs = sess.run([opt.opt_op, opt.cost, opt.accuracy], feed_dict=feed_dict)\n\n # Compute average loss\n avg_cost = outs[1]\n avg_accuracy = outs[2]\n\n roc_curr, ap_curr = get_roc_score(val_edges, val_edges_false)\n val_roc_score.append(roc_curr)\n\n print(\"Epoch:\", '%04d' % (epoch + 1), \"train_loss=\", \"{:.5f}\".format(avg_cost),\n \"train_acc=\", \"{:.5f}\".format(avg_accuracy), \"val_roc=\", \"{:.5f}\".format(val_roc_score[-1]),\n \"val_ap=\", \"{:.5f}\".format(ap_curr),\n \"time=\", \"{:.5f}\".format(time.time() - t))\n\nprint(\"Optimization Finished!\")\n\nroc_score, ap_score = get_roc_score(test_edges, test_edges_false)\nprint('Test ROC score: ' + str(roc_score))\nprint('Test AP score: ' + str(ap_score))\n",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code"
]
] |
e741ea33db082070fe7c208d6506b708240d5b8f | 5,058 | ipynb | Jupyter Notebook | Covid_Project1.ipynb | saikale/Deck | 7f9843ed14390c1e65e9cae3b96732a2da36437f | [
"Apache-2.0"
] | null | null | null | Covid_Project1.ipynb | saikale/Deck | 7f9843ed14390c1e65e9cae3b96732a2da36437f | [
"Apache-2.0"
] | null | null | null | Covid_Project1.ipynb | saikale/Deck | 7f9843ed14390c1e65e9cae3b96732a2da36437f | [
"Apache-2.0"
] | null | null | null | 27.048128 | 223 | 0.476868 | [
[
[
"<a href=\"https://colab.research.google.com/github/saikale/Deck/blob/main/Covid_Project1.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"from bs4 import BeautifulSoup\nimport html5lib\nimport requests\n\n",
"_____no_output_____"
],
[
"def population():\n r = requests.get(\"https://www.worldometers.info/world-population/india-population/\")\n soup = BeautifulSoup(r.content, 'html.parser')\n target = soup.find(\n \"div\", class_=\"col-md-8 country-pop-description\").find_all_next(\"strong\")\n return (target.text)",
"_____no_output_____"
],
[
"total_pop= population()\ntotal_pop=total_pop.replace(\",\",\"\")\nprint(total_pop)\n",
"1397013045\n"
],
[
"def vaccination_done():\n r = requests.get('https://www.mygov.in/covid-19')\n soup = BeautifulSoup(r.content,'html.parser')\n target = soup.find(\n \"div\", class_=\"total-vcount\").find_all_next(\"strong\")\n return (target.text)",
"_____no_output_____"
],
[
"total_vaccinated = vaccination_done()\ntotal_vaccinated=total_vaccinated.replace(\",\",\"\")\n#print(total_vaccinated)\n",
"_____no_output_____"
],
[
"def vaccination_done():\n r = requests.get('https://www.mygov.in/covid-19')\n soup = BeautifulSoup(r.content,'html.parser')\n target = soup.find(\n \"div\", class_=\"total-vcount\").find_all_next(\"strong\")[0]\n return (target.text)\n\n\ntotal_vaccinated = vaccination_done()\ntotal_vaccinated=total_vaccinated.replace(\",\",\"\")\n#print(total_vaccinated)\n\n",
"_____no_output_____"
],
[
"def avg_vaccination_daily():\n r = requests.get('https://www.mygov.in/covid-19')\n soup = BeautifulSoup(r.content, 'html.parser')\n target = soup.find(\"div\", class_=\"yday-vcount\").find_all_next(\"strong\")\n return (target.text)",
"_____no_output_____"
],
[
"daily_vaccination = avg_vaccination_daily()\ndaily_vaccination=daily_vaccination.replace(\",\",\"\")\n#print(daily_vaccination)\n",
"_____no_output_____"
],
[
"vaccination_left = float(total_pop) - float(total_vaccinated)\ndays_left = vaccination_left / float(daily_vaccination)",
"_____no_output_____"
],
[
"print('Total days left to complete vaccination process in India is :' + str(round(days_left)))",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e741f6d2556dab3f7578cbfc48b4bb9d987adaea | 639,182 | ipynb | Jupyter Notebook | src/demo_fit_MoG.ipynb | sjchoi86/density_network | 8170fba38567037a432c44fa2909076544a94b75 | [
"MIT"
] | 23 | 2018-09-11T05:32:40.000Z | 2020-07-24T00:08:54.000Z | src/demo_fit_MoG.ipynb | sjchoi86/density_network | 8170fba38567037a432c44fa2909076544a94b75 | [
"MIT"
] | null | null | null | src/demo_fit_MoG.ipynb | sjchoi86/density_network | 8170fba38567037a432c44fa2909076544a94b75 | [
"MIT"
] | 6 | 2018-10-12T21:03:06.000Z | 2019-05-24T07:02:13.000Z | 1,618.182278 | 60,900 | 0.960625 | [
[
[
"# Fit Data to a mixutre of Gaussian distribution",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom mog_class import MoG_class,MoG_indep_class\nfrom util import nzr\n%matplotlib inline \nprint (\"Packages loaded.\")",
"Packages loaded.\n"
]
],
[
[
"### GMM with 'MoG_indep_class'\n- Let each output dimension to have each mixture probabilities'",
"_____no_output_____"
]
],
[
[
"# Instantiate MoG class\ntf.reset_default_graph() # Reset graph\nsess = tf.Session()\nM = MoG_indep_class(_x_dim=2,_k=5,_sess=sess)\nprint (\"MoG class instantiated.\")",
"MoG class instantiated.\n"
],
[
"# Training data\nmu1,mu2,mu3,mu4 = np.array([10,0]),np.array([10,10]),np.array([0,10]),np.array([0,0])\nvar1,var2,var3,var4 = 1/4,1,4,1/16\nn1,n2,n3,n4 = 500,500,500,500\nx_train = np.concatenate((mu1+np.sqrt(var1)*np.random.randn(n1,2),\n mu2+np.sqrt(var2)*np.random.randn(n2,2),\n mu3+np.sqrt(var3)*np.random.randn(n3,2),\n mu4+np.sqrt(var4)*np.random.randn(n4,2)))\nn_train = x_train.shape[0]\nx_train = nzr(x_train).nzd_data # normalize training data \n# Train\nmax_iter = 1000\nfor iter in range(max_iter):\n x_batch = x_train[np.random.permutation(n_train)[:128],:] # current batch\n _,cost_val = M.sess.run([M.optm,M.cost],feed_dict={M.x:x_batch})\n # Debug \n if ((iter%(max_iter//5))==0) | (iter==(max_iter-1)):\n M.plot_samples(_n_sample=1000,_x_train=x_train,\n _title_str='[%03d/%d] Blue: samples, Red: training data, Green: GMM pdf'%(iter,max_iter),\n _figsize=(18,4))\n print (\"[%03d/%d] cost:%.4f\"%(iter,max_iter,cost_val))",
"_____no_output_____"
]
],
[
[
"### Train GMM with 'MoG_class'\n- Let each mixture to model multivariate Gaussian",
"_____no_output_____"
]
],
[
[
"# Instantiate MoG class\ntf.reset_default_graph() # Reset graph\nsess = tf.Session()\nM = MoG_class(_x_dim=2,_k=5,_sess=sess)\nprint (\"MoG class instantiated.\")",
"MoG class instantiated.\n"
],
[
"# Training data\nmu1,mu2,mu3,mu4 = np.array([10,0]),np.array([10,10]),np.array([0,10]),np.array([0,0])\nvar1,var2,var3,var4 = 1/4,1,4,1/16\nn1,n2,n3,n4 = 500,500,500,500\nx_train = np.concatenate((mu1+np.sqrt(var1)*np.random.randn(n1,2),\n mu2+np.sqrt(var2)*np.random.randn(n2,2),\n mu3+np.sqrt(var3)*np.random.randn(n3,2),\n mu4+np.sqrt(var4)*np.random.randn(n4,2)))\nn_train = x_train.shape[0]\nx_train = nzr(x_train).nzd_data # normalize training data \n# Train\nmax_iter = 1000\nfor iter in range(max_iter):\n x_batch = x_train[np.random.permutation(n_train)[:128],:] # current batch\n _,cost_val = M.sess.run([M.optm,M.cost],feed_dict={M.x:x_batch})\n # Debug \n if ((iter%(max_iter//5))==0) | (iter==(max_iter-1)):\n M.plot_samples(_n_sample=1000,_x_train=x_train,\n _title_str='[%03d/%d] Blue: samples, Red: training data'%(iter,max_iter))\n print (\"[%03d/%d] cost:%.4f\"%(iter,max_iter,cost_val))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7420dcc328cd62dcabd7f06133bd485e52fe9ac | 13,213 | ipynb | Jupyter Notebook | scripts.ipynb | sizhky/torch_snippets | b7d260ead4752bf4a27ccbbb997f1966f14db90e | [
"Apache-2.0",
"MIT"
] | 12 | 2020-05-03T14:45:35.000Z | 2022-02-17T11:47:12.000Z | scripts.ipynb | sizhky/torch_snippets | b7d260ead4752bf4a27ccbbb997f1966f14db90e | [
"Apache-2.0",
"MIT"
] | 1 | 2022-02-26T10:43:52.000Z | 2022-02-26T10:43:52.000Z | scripts.ipynb | sizhky/torch_snippets | b7d260ead4752bf4a27ccbbb997f1966f14db90e | [
"Apache-2.0",
"MIT"
] | 2 | 2021-07-25T10:20:23.000Z | 2022-03-03T13:26:43.000Z | 42.349359 | 130 | 0.654053 | [
[
[
"### Parse all notebooks and update the library files",
"_____no_output_____"
]
],
[
[
"from nbdev.export import *\nnotebook2script()",
"Converted charts.ipynb.\nConverted index.ipynb.\nConverted logging.ipynb.\nConverted markups.ipynb.\nConverted misc.ipynb.\nConverted paths.ipynb.\nConverted registry.ipynb.\nConverted report.ipynb.\nConverted show.ipynb.\nConverted sklegos.ipynb.\n"
]
],
[
[
"### Parse all notebooks and update the docs",
"_____no_output_____"
]
],
[
[
"from nbdev.export2html import nbdev_build_docs\nnbdev_build_docs(n_workers=0)",
"converting: /mnt/sda1/code/torch_snippets/nbs/charts.ipynb\nconverting: /mnt/sda1/code/torch_snippets/nbs/logging.ipynb\nconverting: /mnt/sda1/code/torch_snippets/nbs/markups.ipynb\nconverting: /mnt/sda1/code/torch_snippets/nbs/paths.ipynb\nconverting: /mnt/sda1/code/torch_snippets/nbs/registry.ipynb\nconverting: /mnt/sda1/code/torch_snippets/nbs/sklegos.ipynb\nconverting /mnt/sda1/code/torch_snippets/nbs/index.ipynb to README.md\n"
]
],
[
[
"### Update the notebooks if there have been any adhoc changes (bug-fixes) directly made in library files",
"_____no_output_____"
]
],
[
[
"from nbdev.sync import nbdev_update_lib\nnbdev_update_lib()",
"Converted charts.py.\nConverted logger.py.\nConverted markup.py.\nConverted paths.py.\nConverted registry.py.\nConverted sklegos.py.\n"
],
[
"!make release",
"rm -rf dist\npython setup.py sdist bdist_wheel\nrunning sdist\nrunning egg_info\nwriting torch_snippets.egg-info/PKG-INFO\nwriting dependency_links to torch_snippets.egg-info/dependency_links.txt\nwriting entry points to torch_snippets.egg-info/entry_points.txt\nwriting requirements to torch_snippets.egg-info/requires.txt\nwriting top-level names to torch_snippets.egg-info/top_level.txt\nreading manifest file 'torch_snippets.egg-info/SOURCES.txt'\nreading manifest template 'MANIFEST.in'\nwarning: no files found matching 'CONTRIBUTING.md'\nwarning: no previously-included files matching '__pycache__' found under directory '*'\nwriting manifest file 'torch_snippets.egg-info/SOURCES.txt'\nrunning check\ncreating torch_snippets-0.465\ncreating torch_snippets-0.465/torch_snippets\ncreating torch_snippets-0.465/torch_snippets.egg-info\ncreating torch_snippets-0.465/torch_snippets/thinc_parser\ncopying files to torch_snippets-0.465...\ncopying LICENSE -> torch_snippets-0.465\ncopying LICENSE.txt -> torch_snippets-0.465\ncopying MANIFEST.in -> torch_snippets-0.465\ncopying README.md -> torch_snippets-0.465\ncopying settings.ini -> torch_snippets-0.465\ncopying setup.cfg -> torch_snippets-0.465\ncopying setup.py -> torch_snippets-0.465\ncopying torch_snippets/__init__.py -> torch_snippets-0.465/torch_snippets\ncopying torch_snippets/_nbdev.py -> torch_snippets-0.465/torch_snippets\ncopying torch_snippets/charts.py -> torch_snippets-0.465/torch_snippets\ncopying torch_snippets/fastcores.py -> torch_snippets-0.465/torch_snippets\ncopying torch_snippets/loader.py -> torch_snippets-0.465/torch_snippets\ncopying torch_snippets/logger.py -> torch_snippets-0.465/torch_snippets\ncopying torch_snippets/markup.py -> torch_snippets-0.465/torch_snippets\ncopying torch_snippets/misc.py -> torch_snippets-0.465/torch_snippets\ncopying torch_snippets/paths.py -> torch_snippets-0.465/torch_snippets\ncopying torch_snippets/registry.py -> torch_snippets-0.465/torch_snippets\ncopying torch_snippets/sklegos.py -> torch_snippets-0.465/torch_snippets\ncopying torch_snippets/torch_loader.py -> torch_snippets-0.465/torch_snippets\ncopying torch_snippets.egg-info/PKG-INFO -> torch_snippets-0.465/torch_snippets.egg-info\ncopying torch_snippets.egg-info/SOURCES.txt -> torch_snippets-0.465/torch_snippets.egg-info\ncopying torch_snippets.egg-info/dependency_links.txt -> torch_snippets-0.465/torch_snippets.egg-info\ncopying torch_snippets.egg-info/entry_points.txt -> torch_snippets-0.465/torch_snippets.egg-info\ncopying torch_snippets.egg-info/not-zip-safe -> torch_snippets-0.465/torch_snippets.egg-info\ncopying torch_snippets.egg-info/requires.txt -> torch_snippets-0.465/torch_snippets.egg-info\ncopying torch_snippets.egg-info/top_level.txt -> torch_snippets-0.465/torch_snippets.egg-info\ncopying torch_snippets/thinc_parser/__init__.py -> torch_snippets-0.465/torch_snippets/thinc_parser\ncopying torch_snippets/thinc_parser/parser.py -> torch_snippets-0.465/torch_snippets/thinc_parser\nWriting torch_snippets-0.465/setup.cfg\ncreating dist\nCreating tar archive\nremoving 'torch_snippets-0.465' (and everything under it)\nrunning bdist_wheel\nrunning build\nrunning build_py\ncopying torch_snippets/charts.py -> build/lib/torch_snippets\ncopying torch_snippets/loader.py -> build/lib/torch_snippets\ncopying torch_snippets/logger.py -> build/lib/torch_snippets\ncopying torch_snippets/markup.py -> build/lib/torch_snippets\ncopying torch_snippets/paths.py -> build/lib/torch_snippets\ncopying torch_snippets/registry.py -> build/lib/torch_snippets\ncopying torch_snippets/sklegos.py -> build/lib/torch_snippets\ncopying torch_snippets/_nbdev.py -> build/lib/torch_snippets\ncopying torch_snippets/__init__.py -> build/lib/torch_snippets\ncopying torch_snippets/thinc_parser/parser.py -> build/lib/torch_snippets/thinc_parser\ninstalling to build/bdist.linux-x86_64/wheel\nrunning install\nrunning install_lib\ncreating build/bdist.linux-x86_64/wheel\ncreating build/bdist.linux-x86_64/wheel/torch_snippets\ncopying build/lib/torch_snippets/charts.py -> build/bdist.linux-x86_64/wheel/torch_snippets\ncopying build/lib/torch_snippets/fastcores.py -> build/bdist.linux-x86_64/wheel/torch_snippets\ncopying build/lib/torch_snippets/loader.py -> build/bdist.linux-x86_64/wheel/torch_snippets\ncopying build/lib/torch_snippets/logger.py -> build/bdist.linux-x86_64/wheel/torch_snippets\ncopying build/lib/torch_snippets/markup.py -> build/bdist.linux-x86_64/wheel/torch_snippets\ncopying build/lib/torch_snippets/misc.py -> build/bdist.linux-x86_64/wheel/torch_snippets\ncopying build/lib/torch_snippets/paths.py -> build/bdist.linux-x86_64/wheel/torch_snippets\ncopying build/lib/torch_snippets/registry.py -> build/bdist.linux-x86_64/wheel/torch_snippets\ncopying build/lib/torch_snippets/sklegos.py -> build/bdist.linux-x86_64/wheel/torch_snippets\ncreating build/bdist.linux-x86_64/wheel/torch_snippets/thinc_parser\ncopying build/lib/torch_snippets/thinc_parser/parser.py -> build/bdist.linux-x86_64/wheel/torch_snippets/thinc_parser\ncopying build/lib/torch_snippets/thinc_parser/__init__.py -> build/bdist.linux-x86_64/wheel/torch_snippets/thinc_parser\ncopying build/lib/torch_snippets/torch_loader.py -> build/bdist.linux-x86_64/wheel/torch_snippets\ncopying build/lib/torch_snippets/_nbdev.py -> build/bdist.linux-x86_64/wheel/torch_snippets\ncopying build/lib/torch_snippets/__init__.py -> build/bdist.linux-x86_64/wheel/torch_snippets\nrunning install_egg_info\nCopying torch_snippets.egg-info to build/bdist.linux-x86_64/wheel/torch_snippets-0.465-py3.9.egg-info\nrunning install_scripts\nadding license file \"LICENSE\" (matched pattern \"LICEN[CS]E*\")\nadding license file \"LICENSE.txt\" (matched pattern \"LICEN[CS]E*\")\ncreating build/bdist.linux-x86_64/wheel/torch_snippets-0.465.dist-info/WHEEL\ncreating 'dist/torch_snippets-0.465-py3-none-any.whl' and adding 'build/bdist.linux-x86_64/wheel' to it\nadding 'torch_snippets/__init__.py'\nadding 'torch_snippets/_nbdev.py'\nadding 'torch_snippets/charts.py'\nadding 'torch_snippets/fastcores.py'\nadding 'torch_snippets/loader.py'\nadding 'torch_snippets/logger.py'\nadding 'torch_snippets/markup.py'\nadding 'torch_snippets/misc.py'\nadding 'torch_snippets/paths.py'\nadding 'torch_snippets/registry.py'\nadding 'torch_snippets/sklegos.py'\nadding 'torch_snippets/torch_loader.py'\nadding 'torch_snippets/thinc_parser/__init__.py'\nadding 'torch_snippets/thinc_parser/parser.py'\nadding 'torch_snippets-0.465.dist-info/LICENSE'\nadding 'torch_snippets-0.465.dist-info/LICENSE.txt'\nadding 'torch_snippets-0.465.dist-info/METADATA'\nadding 'torch_snippets-0.465.dist-info/WHEEL'\nadding 'torch_snippets-0.465.dist-info/entry_points.txt'\nadding 'torch_snippets-0.465.dist-info/top_level.txt'\nadding 'torch_snippets-0.465.dist-info/RECORD'\nremoving build/bdist.linux-x86_64/wheel\ntwine upload --repository pypi dist/*\nUploading distributions to https://upload.pypi.org/legacy/\nUploading torch_snippets-0.465-py3-none-any.whl\n100%|██████████████████████████████████████| 50.9k/50.9k [00:05<00:00, 10.1kB/s]\nUploading torch_snippets-0.465.tar.gz\n100%|██████████████████████████████████████| 49.5k/49.5k [00:02<00:00, 19.1kB/s]\n\nView at:\nhttps://pypi.org/project/torch-snippets/0.465/\nnbdev_conda_package\nnbdev_bump_version\n/bin/bash: line 1: nbdev_conda_package: command not found\nOld version: 0.465\nTraceback (most recent call last):\n File \"/home/yyr/miniconda3/bin/nbdev_bump_version\", line 8, in <module>\n sys.exit(nbdev_bump_version())\n File \"/home/yyr/miniconda3/lib/python3.9/site-packages/fastcore/script.py\", line 107, in _f\n tfunc(**merge(args, args_from_prog(func, xtra)))\n File \"/home/yyr/miniconda3/lib/python3.9/site-packages/nbdev/cli.py\", line 31, in nbdev_bump_version\n cfg.d['version'] = bump_version(get_config().version, part)\n File \"/home/yyr/miniconda3/lib/python3.9/site-packages/nbdev/cli.py\", line 21, in bump_version\n version[part] = str(int(version[part]) + 1)\nIndexError: list index out of range\nmake: *** [Makefile:25: release] Error 1\n"
]
],
[
[
"### Create a softlink",
"_____no_output_____"
]
],
[
[
"'''\n%cd nbs\n!ln -s ../<libname> .\n%cd ..\n''';",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7421f58d2d6fba76e7ab0fdb8c2a4f596131f34 | 136,144 | ipynb | Jupyter Notebook | chapter2-6/chapter3_kmeans.py.ipynb | jinheeson1008/first-steps-with-tensorflow | bbacaac4e9c1e00888dd71f798c1bb75a98a5824 | [
"MIT"
] | 122 | 2016-08-29T07:54:01.000Z | 2021-11-24T11:09:41.000Z | chapter2-6/chapter3_kmeans.py.ipynb | jinheeson1008/first-steps-with-tensorflow | bbacaac4e9c1e00888dd71f798c1bb75a98a5824 | [
"MIT"
] | 2 | 2017-11-09T08:19:53.000Z | 2018-12-28T02:51:48.000Z | chapter2-6/chapter3_kmeans.py.ipynb | jinheeson1008/first-steps-with-tensorflow | bbacaac4e9c1e00888dd71f798c1bb75a98a5824 | [
"MIT"
] | 113 | 2016-08-29T07:55:13.000Z | 2022-02-09T20:34:58.000Z | 440.595469 | 66,588 | 0.94353 | [
[
[
"%pylab\n%matplotlib inline\n%load_ext watermark\n%watermark -v -p numpy,pandas,tensorflow",
"Using matplotlib backend: Qt5Agg\nPopulating the interactive namespace from numpy and matplotlib\nCPython 3.5.6\nIPython 6.5.0\n\nnumpy 1.15.2\npandas 0.23.4\ntensorflow 1.13.1\n"
]
],
[
[
"필요한 패키지를 로드합니다.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport tensorflow as tf",
"_____no_output_____"
],
[
"import seaborn as sns",
"_____no_output_____"
]
],
[
[
"1000개의 데이터를 난수로 생성합니다. 대략 절반 정도는 평균:0.5, 표준편차:0.6의 x값과 평균:0.3, 표준편차:0.9의 y값을 가지고 나머지 절만 정도는 평균:2.5, 표준편차:0.4의 x값과 평균:0.8, 표준편차:0.5의 y값을 가집니다.",
"_____no_output_____"
]
],
[
[
"num_vectors = 1000\nnum_clusters = 4\nnum_steps = 100\nvector_values = []\nfor i in range(num_vectors):\n if np.random.random() > 0.5:\n vector_values.append([np.random.normal(0.5, 0.6),\n np.random.normal(0.3, 0.9)])\n else:\n vector_values.append([np.random.normal(2.5, 0.4),\n np.random.normal(0.8, 0.5)])",
"_____no_output_____"
]
],
[
[
"vector_values 의 2차원 배열의 값을 각각 데이터프레임의 컬럼으로 지정합니다. 시본으로 그래프를 그립니다.",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame({\"x\": [v[0] for v in vector_values], \n \"y\": [v[1] for v in vector_values]})\nsns.lmplot(\"x\", \"y\", data=df, fit_reg=False, height=7)\nplt.show()",
"_____no_output_____"
]
],
[
[
"vector_values를 사용하여 constant를 만들고 초기 센트로이드 네개를 랜덤하게 선택합니다. 그런 후에 vectors, centroids 텐서에 각각 차원을 추가합니다.",
"_____no_output_____"
]
],
[
[
"vectors = tf.constant(vector_values)\ncentroids = tf.Variable(tf.slice(tf.random_shuffle(vectors), [0,0], [num_clusters,-1]))\nexpanded_vectors = tf.expand_dims(vectors, 0)\nexpanded_centroids = tf.expand_dims(centroids, 1)\n\nprint(expanded_vectors.get_shape())\nprint(expanded_centroids.get_shape())",
"WARNING:tensorflow:From /home/haesun/anaconda3/envs/first-steps-with-tensorflow/lib/python3.5/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\n(1, 1000, 2)\n(4, 1, 2)\n"
]
],
[
[
"각 데이터 포인트에서 가장 가까운 센트로이드의 인덱스를 계산합니다.",
"_____no_output_____"
]
],
[
[
"distances = tf.reduce_sum(tf.square(tf.subtract(expanded_vectors, expanded_centroids)), 2)\nassignments = tf.argmin(distances, 0)",
"_____no_output_____"
]
],
[
[
"각 클러스터의 평균 값을 계산하여 새로운 센트로이드를 구합니다.",
"_____no_output_____"
]
],
[
[
"means = tf.concat([\n tf.reduce_mean(\n tf.gather(vectors, \n tf.reshape(\n tf.where(\n tf.equal(assignments, c)\n ),[1,-1])\n ),reduction_indices=[1])\n for c in range(num_clusters)], 0)\n\nupdate_centroids = tf.assign(centroids, means)",
"_____no_output_____"
]
],
[
[
"변수를 초기화하고 세션을 시작합니다.",
"_____no_output_____"
]
],
[
[
"init_op = tf.global_variables_initializer()\n\nsess = tf.Session()\nsess.run(init_op)",
"_____no_output_____"
]
],
[
[
"100번의 반복을 하여 센트로이드를 계산하고 결과를 출력합니다.",
"_____no_output_____"
]
],
[
[
"for step in range(num_steps):\n _, centroid_values, assignment_values = sess.run([update_centroids, centroids, assignments])\n\nprint(\"centroids\")\nprint(centroid_values)",
"centroids\n[[ 0.38575467 0.2242536 ]\n [ 0.5428277 1.3617442 ]\n [ 2.4923067 0.82333124]\n [ 0.6738804 -0.9226212 ]]\n"
]
],
[
[
"vector_values 데이터를 클러스터에 따라 색깔을 구분하여 산포도를 그립니다.",
"_____no_output_____"
]
],
[
[
"data = {\"x\": [], \"y\": [], \"cluster\": []}\nfor i in range(len(assignment_values)):\n data[\"x\"].append(vector_values[i][0])\n data[\"y\"].append(vector_values[i][1])\n data[\"cluster\"].append(assignment_values[i])\ndf = pd.DataFrame(data)\nsns.lmplot(\"x\", \"y\", data=df, \n fit_reg=False, height=7, \n hue=\"cluster\", legend=False)\nplt.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74236024f0ef120b249de737d0a0e7e1ed6df5b | 37,516 | ipynb | Jupyter Notebook | pytorch/02. linear regression.ipynb | zzsza/TIL | 8e623ebcbeca0f3fe1acbc2900f992a3c462a50b | [
"MIT"
] | 22 | 2017-10-30T06:47:12.000Z | 2020-04-15T11:50:31.000Z | pytorch/02. linear regression.ipynb | zzsza/TIL | 8e623ebcbeca0f3fe1acbc2900f992a3c462a50b | [
"MIT"
] | null | null | null | pytorch/02. linear regression.ipynb | zzsza/TIL | 8e623ebcbeca0f3fe1acbc2900f992a3c462a50b | [
"MIT"
] | 17 | 2017-10-30T01:30:51.000Z | 2021-08-31T18:41:15.000Z | 58.436137 | 12,762 | 0.716041 | [
[
[
"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.init as init\nfrom visdom import Visdom\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.autograd import Variable\n\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"[Pytorch Document](http://pytorch.org/docs/master/optim.html)",
"_____no_output_____"
]
],
[
[
"# hyper parameters\ninput_size = 2\noutput_size = 2\nnum_epochs = 60\nlearning_rate = 0.001\n",
"_____no_output_____"
],
[
"# toy dataset\nx_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168], \n [9.779], [6.182], [7.59], [2.167], [7.042], \n [10.791], [5.313], [7.997], [3.1]], dtype=np.float32)\n\ny_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573], \n [3.366], [2.596], [2.53], [1.221], [2.827], \n [3.465], [1.65], [2.904], [1.3]], dtype=np.float32)",
"_____no_output_____"
],
[
"x_train.shape",
"_____no_output_____"
],
[
"y_train.shape",
"_____no_output_____"
],
[
"# lenear regression model\nclass LinearRegression(nn.Module):\n def __init__(self, input_size, output_size):\n super(LinearRegression, self).__init__()\n self.linear = nn.Linear(input_size, output_size)\n self.linear2 = nn.Linear(2,1)\n self.sigmoid = nn.Sigmoid()\n \n def forward(self, x):\n out = self.linear(x)\n return out\n\n ",
"_____no_output_____"
],
[
"# super 는 다중상속 환경에서 빛을 발함",
"_____no_output_____"
]
],
[
[
"[super](https://docs.python.org/3/library/functions.html#super)",
"_____no_output_____"
]
],
[
[
"model = LinearRegression(input_size, output_size)",
"_____no_output_____"
],
[
"model",
"_____no_output_____"
],
[
"for i in model.parameters():\n print(i)",
"Parameter containing:\n 0.1058 0.0408\n 0.3093 0.7037\n[torch.FloatTensor of size 2x2]\n\nParameter containing:\n 0.0542\n 0.3221\n[torch.FloatTensor of size 2]\n\nParameter containing:\n-0.6821 -0.4828\n[torch.FloatTensor of size 1x2]\n\nParameter containing:\n1.00000e-02 *\n 5.7143\n[torch.FloatTensor of size 1]\n\n"
],
[
"# loss and optimizer\ncriterion = nn.MSELoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)",
"_____no_output_____"
],
[
"for epoch in range(num_epochs):\n # convert numpy array to torch variable\n inputs = Variable(torch.from_numpy(x_train))\n targets = Variable(torch.from_numpy(y_train))\n \n optimizer.zero_grad()\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n \n if (epoch+1) % 5 == 0:\n print('Epoch [{}.{}], Loss: {:.7f}'.format(epoch+1, num_epochs, loss.data[0]))",
"_____no_output_____"
],
[
"# plot the graph\npredicted = model(Variable(torch.from_numpy(x_train))).data.numpy()\nplt.plot(x_train, y_train, 'ro', label='Original data')\nplt.plot(x_train, predicted, label='Fitted Line')\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"%%time\n# save the model\ntorch.save(model.state_dict(), 'model.pkl')",
"CPU times: user 599 µs, sys: 788 µs, total: 1.39 ms\nWall time: 767 µs\n"
]
],
[
[
"# Other Linear Regression",
"_____no_output_____"
]
],
[
[
"vis = Visdom()",
"_____no_output_____"
],
[
"# make dataset\nnum_data = 1000\n\nnoise = init.normal(torch.FloatTensor(num_data, 1), std=0.2)\nx = init.uniform(torch.Tensor(num_data, 1), -10, 10)\ny = 2*x + 3\ny_noise = 2*(x+noise)+3",
"_____no_output_____"
],
[
"x",
"_____no_output_____"
],
[
"y",
"_____no_output_____"
],
[
"model = nn.Linear(1,1)",
"_____no_output_____"
],
[
"model",
"_____no_output_____"
],
[
"output = model(Variable(x))",
"_____no_output_____"
],
[
"loss_func = nn.L1Loss()",
"_____no_output_____"
],
[
"optimizer = optim.SGD(model.parameters(), lr=1)",
"_____no_output_____"
],
[
"# train\nloss_arr = []\nlabel = Variable(y_noise)\nfor i in range(1000):\n output = model(Variable(x))\n optimizer.zero_grad()\n \n loss = loss_func(output, label)\n loss.backward()\n optimizer.step()\n loss_arr.append(loss.data.numpy()[0])\n \nparam_list = list(model.parameters())\nprint(param_list[0].data, param_list[1].data)",
"\n-0.6497\n[torch.FloatTensor of size 1x1]\n \n 2.9504\n[torch.FloatTensor of size 1]\n\n"
]
],
[
[
"optimizer.zero_grad : Clears the gradients of all optimized Variable s.",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e7423637ea4bdb59ece14c8d78558fc5b6d5aedb | 6,283 | ipynb | Jupyter Notebook | examples/notebook/contrib/max_flow_taha.ipynb | tias/or-tools | b37d9c786b69128f3505f15beca09e89bf078a89 | [
"Apache-2.0"
] | 1 | 2021-05-25T01:42:03.000Z | 2021-05-25T01:42:03.000Z | examples/notebook/contrib/max_flow_taha.ipynb | tias/or-tools | b37d9c786b69128f3505f15beca09e89bf078a89 | [
"Apache-2.0"
] | null | null | null | examples/notebook/contrib/max_flow_taha.ipynb | tias/or-tools | b37d9c786b69128f3505f15beca09e89bf078a89 | [
"Apache-2.0"
] | 1 | 2021-07-24T22:52:41.000Z | 2021-07-24T22:52:41.000Z | 32.723958 | 251 | 0.548623 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e74244bf2124845d34f949fb61a9cb30d0c99351 | 204,350 | ipynb | Jupyter Notebook | experiments/experiment_3/community_assignment.ipynb | neurodata/dos_and_donts | b49a61a8aa29dbde86651bd39c9322f0eb3c0694 | [
"BSD-3-Clause"
] | 3 | 2020-05-17T21:56:52.000Z | 2020-12-09T04:27:31.000Z | experiments/experiment_3/community_assignment.ipynb | neurodata/dos_and_donts | b49a61a8aa29dbde86651bd39c9322f0eb3c0694 | [
"BSD-3-Clause"
] | 2 | 2020-08-06T04:58:37.000Z | 2020-08-06T05:02:37.000Z | experiments/experiment_3/community_assignment.ipynb | neurodata/dos_and_donts | b49a61a8aa29dbde86651bd39c9322f0eb3c0694 | [
"BSD-3-Clause"
] | 1 | 2020-08-12T02:29:11.000Z | 2020-08-12T02:29:11.000Z | 336.655684 | 58,036 | 0.886073 | [
[
[
"%load_ext nb_black",
"_____no_output_____"
],
[
"from itertools import product\n\nimport numpy as np\nfrom graspy.embed import MultipleASE, OmnibusEmbed\nfrom graspy.cluster import KMeansCluster, GaussianCluster\nfrom graspy.simulations import er_np, sbm\nfrom graspy.models import SBMEstimator\nfrom graspy.plot import heatmap\nfrom scipy.stats import ttest_ind\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom joblib import Parallel, delayed\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n%matplotlib inline",
"_____no_output_____"
],
[
"def generate_data(m, n=[5, 15], p=0.5, delta=0):\n np.random.seed(None)\n\n if (p + delta > 1) or (p + delta < 1):\n assert ValueError(\"p + delta must be between [0, 1]\")\n\n p1 = [[p, p], [p, p]]\n p2 = [[p + delta, p], [p, p]]\n\n X = np.stack([sbm(n, p1) for _ in np.arange(m)])\n Y = np.stack([sbm(n, p2) for _ in np.arange(m)])\n\n return X, Y\n\n\ndef experiment(m, num_sig_nodes, num_non_sig_nodes, delta, reps):\n mase_aris = np.zeros(reps)\n omni_aris = np.zeros(reps)\n\n true_labels = [0] * num_sig_nodes + [1] * num_non_sig_nodes\n\n for i in range(reps):\n X, Y = generate_data(m=m, n=[num_sig_nodes, num_non_sig_nodes], delta=delta)\n stacked = np.vstack([X, Y])\n\n mase = MultipleASE(2)\n embeddings = mase.fit_transform(stacked)\n gclust = GaussianCluster(2, 2)\n mase_results = gclust.fit_predict(embeddings, true_labels)\n mase_aris[i] = mase_results[1]\n\n omni = OmnibusEmbed(2)\n embeddings = omni.fit_transform(stacked).mean(axis=0)\n gclust = GaussianCluster(2, 2)\n omni_results = gclust.fit_predict(embeddings, true_labels)\n omni_aris[i] = omni_results[1]\n\n return m, num_sig_nodes, mase_aris.mean(), omni_aris.mean()",
"_____no_output_____"
],
[
"X, Y = generate_data(50, n=[5, 15], delta=0.5)\nsns.set_context(\"talk\")\nfig, ax = plt.subplots(3, 2, sharex=\"col\", constrained_layout=True, figsize=(12, 10))\n\nfor i, test in enumerate([\"MASE\", \"Omni\"]):\n if test == \"MASE\":\n embedder = MultipleASE(2)\n embeddings = embedder.fit_transform(np.vstack([X, Y]))\n else:\n embedder = OmnibusEmbed(2)\n embeddings = embedder.fit_transform(np.vstack([X, Y])).mean(axis=0)\n gclust = GaussianCluster(2, 2)\n labels_gmm = gclust.fit_predict(embeddings)\n\n k = KMeansCluster(2)\n labels_kmeans = k.fit_predict(embeddings)\n\n sns.scatterplot(embeddings[:, 0], embeddings[:, 1], ax=ax[0, i])\n ax[0, i].set_title(f\"{test} Embeddings\")\n\n one_gmm = embeddings[labels_gmm == 0]\n two_gmm = embeddings[labels_gmm == 1]\n sns.scatterplot(one_gmm[:, 0], one_gmm[:, 1], ax=ax[1, i])\n sns.scatterplot(two_gmm[:, 0], two_gmm[:, 1], ax=ax[1, i])\n ax[1, i].set_title(f\"GMM o {test}\")\n\n one_km = embeddings[labels_kmeans == 0]\n two_km = embeddings[labels_kmeans == 1]\n sns.scatterplot(one_km[:, 0], one_km[:, 1], ax=ax[2, i])\n sns.scatterplot(two_km[:, 0], two_km[:, 1], ax=ax[2, i])\n ax[2, i].set_title(f\"KMeans o {test}\")\n\nfig.savefig(\"./figures/20200203_20_node_embeddings.png\", dpi=300, bbox_inches=\"tight\")",
"_____no_output_____"
]
],
[
[
"1. 1st vary the number of significant nodes and sample size (fix non-sig nodes = 15, fix delta=0.25, p = 0.5)\n\n2. 2nd vary the number of non-significant nodes and sample size (fix sig nodes = 5, fix delta=0.25, p = 0.5)",
"_____no_output_____"
]
],
[
[
"# Experiment 1\nspacing = 20\nms = np.linspace(0, 50, spacing + 1)[1:].astype(int)\nnum_sig_nodes = np.linspace(0, 100, spacing + 1)[1:].astype(int)\nnum_non_sig_nodes = 15\ndelta = 0.25\nreps = 100\n\nres = Parallel(n_jobs=-1, verbose=1)(\n delayed(experiment)(\n m=m,\n num_sig_nodes=n,\n num_non_sig_nodes=num_non_sig_nodes,\n delta=delta,\n reps=reps,\n )\n for m, n in product(ms, num_sig_nodes)\n)\n\nres_df = pd.DataFrame(res, columns=[\"m\", \"num_sig_nodes\", \"mase_ari\", \"omni_ari\"])\n\nres_df.to_csv(\"./results/20200130_vary_sig_nodes.csv\", index=False)",
"[Parallel(n_jobs=-1)]: Using backend LokyBackend with 96 concurrent workers.\n[Parallel(n_jobs=-1)]: Done 8 tasks | elapsed: 14.2s\n[Parallel(n_jobs=-1)]: Done 400 out of 400 | elapsed: 28.2min finished\n"
],
[
"fmt = lambda x: \"{:.2f}\".format(x)\n\nwith sns.plotting_context(\"talk\", font_scale=1.25):\n fig, ax = plt.subplots(\n 1,\n 3,\n gridspec_kw={\"width_ratios\": [1, 1, 0.05]},\n figsize=(16, 8),\n constrained_layout=True,\n )\n\n sns.heatmap(\n np.flipud(res_df.mase_ari.values.reshape(spacing, -1)),\n ax=ax[0],\n square=True,\n center=0,\n cmap=\"RdBu_r\",\n cbar_kws=dict(shrink=0.7),\n xticklabels=num_sig_nodes,\n yticklabels=ms[::-1] * 2,\n cbar_ax=ax[-1],\n vmin=0,\n vmax=1,\n )\n ax[0].set_title(\"MASE Average ARI\")\n\n sns.heatmap(\n np.flipud(res_df.omni_ari.values.reshape(spacing, -1)),\n ax=ax[1],\n square=True,\n center=0,\n cmap=\"RdBu_r\",\n cbar_kws=dict(shrink=0.7),\n cbar=False,\n xticklabels=num_sig_nodes,\n yticklabels=[],\n # cbar_ax=ax[-1],\n vmin=0,\n vmax=1,\n )\n ax[1].set_title(\"Omni Average ARI\")\n\n fig.text(-0.03, 0.5, \"Sample Size\", va=\"center\", rotation=\"vertical\")\n fig.text(0.5, -0.03, \"Number of Significant Nodes\", va=\"center\", ha=\"center\")\n\n fig.savefig(\"./figures/20200130_vary_sig_nodes.png\", dpi=300, bbox_inches=\"tight\")",
"_____no_output_____"
],
[
"# Experiment 2\nspacing = 20\nms = np.linspace(0, 50, spacing + 1)[1:].astype(int)\nnum_sig_nodes = 15\nnum_non_sig_nodes = np.linspace(0, 100, spacing + 1)[1:].astype(int)\ndelta = 0.25\nreps = 100\n\nres = Parallel(n_jobs=-1, verbose=1)(\n delayed(experiment)(\n m=m, num_sig_nodes=num_sig_nodes, num_non_sig_nodes=n, delta=delta, reps=reps\n )\n for m, n in product(ms, num_non_sig_nodes)\n)\n\nres_df = pd.DataFrame(res, columns=[\"m\", \"num_sig_nodes\", \"mase_ari\", \"omni_ari\"])\n\nres_df.to_csv(\"./results/20200130_vary_non_sig_nodes.csv\", index=False)",
"[Parallel(n_jobs=-1)]: Using backend LokyBackend with 96 concurrent workers.\n[Parallel(n_jobs=-1)]: Done 8 tasks | elapsed: 14.3s\n[Parallel(n_jobs=-1)]: Done 400 out of 400 | elapsed: 28.1min finished\n"
],
[
"fmt = lambda x: \"{:.2f}\".format(x)\n\nwith sns.plotting_context(\"talk\", font_scale=1.25):\n fig, ax = plt.subplots(\n 1,\n 3,\n gridspec_kw={\"width_ratios\": [1, 1, 0.05]},\n figsize=(16, 8),\n constrained_layout=True,\n )\n\n sns.heatmap(\n np.flipud(res_df.mase_ari.values.reshape(spacing, -1)),\n ax=ax[0],\n square=True,\n center=0,\n cmap=\"RdBu_r\",\n cbar_kws=dict(shrink=0.7),\n xticklabels=num_non_sig_nodes,\n yticklabels=ms[::-1] * 2,\n cbar_ax=ax[-1],\n vmin=0,\n vmax=1,\n )\n ax[0].set_title(\"MASE Average ARI\")\n\n sns.heatmap(\n np.flipud(res_df.omni_ari.values.reshape(spacing, -1)),\n ax=ax[1],\n square=True,\n center=0,\n cmap=\"RdBu_r\",\n cbar_kws=dict(shrink=0.7),\n cbar=False,\n xticklabels=num_non_sig_nodes,\n yticklabels=[],\n # cbar_ax=ax[-1],\n vmin=0,\n vmax=1,\n )\n ax[1].set_title(\"Omni Average ARI\")\n\n fig.text(-0.03, 0.5, \"Sample Size\", va=\"center\", rotation=\"vertical\")\n fig.text(0.5, -0.03, \"Number of Non-Significant Nodes\", va=\"center\", ha=\"center\")\n\n fig.savefig(\n \"./figures/20200130_vary_non_sig_nodes.png\", dpi=300, bbox_inches=\"tight\"\n )",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e742521c04fa1a5728998278634bb4499696eb16 | 4,136 | ipynb | Jupyter Notebook | actividades/01_match_filter/actividad.ipynb | phuijse/UACH-INFO185 | db46e6c1cad7926031afce2b34003862c287a302 | [
"MIT"
] | 6 | 2019-03-19T19:19:50.000Z | 2020-05-04T16:11:08.000Z | actividades/01_match_filter/actividad.ipynb | phuijse/UACH-INFO185 | db46e6c1cad7926031afce2b34003862c287a302 | [
"MIT"
] | null | null | null | actividades/01_match_filter/actividad.ipynb | phuijse/UACH-INFO185 | db46e6c1cad7926031afce2b34003862c287a302 | [
"MIT"
] | 6 | 2019-04-10T14:01:12.000Z | 2022-03-22T02:07:41.000Z | 31.333333 | 267 | 0.606625 | [
[
[
"%matplotlib notebook\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"# Transformada de Fourier 1D\n\n1. Use scipy.fftpack para calcular la transformada de Fourier de la señal con valores reales guardada en `data.pkl`\n1. Muestre el espectro de magnitud y el espectro de fase.\n1. Determine las frecuencias más relevantes estudiando el espectro de magnitud y extraiga sus amplitudes y ángulos asociados\n1. Reconstruya la señal \"limpia\" usando:\n\n$$\ny = \\sum_{k=1}^K A_k \\cos(2\\pi t f_k + \\phi_k) \n$$\n\ndonde $f_k, A_k, \\phi_k$ son las $K$ frecuencias, amplitudes y ángulos que usted seleccionó",
"_____no_output_____"
]
],
[
[
"import pickle\nt, s, Fs = pickle.load(open(\"data.pkl\", \"rb\"))\n\nfig, ax = plt.subplots(figsize=(6, 3), tight_layout=True)\nax.plot(t, s, '.');\nax.set_xlabel('Tiempo [s]')\nax.set_ylabel('Señal');",
"_____no_output_____"
]
],
[
[
"# Match filter\n\nUn *match filter* es un filtro convolucional cuyo objetivo es detectar la presencia de una señal modelo o *template* dentro de otra señal\n\nEn este experimento usaremos imágenes en escala de grises\n\n- La imagen denominada `template` corresponde al modelo\n- La imagen denominada `data` corresponde a la imagen de prueba, es decir la señal donde queremos detectar la presencia o ausencia de nuestro modelo\n\n",
"_____no_output_____"
]
],
[
[
"template = plt.imread(\"template.png\")\n\ndef get_data(s_noise=0.1):\n escena = plt.imread(\"mario1.png\")\n return escena + s_noise*np.random.randn(*escena.shape)\n\ndata = get_data(s_noise=0.1)\n\nfig, ax = plt.subplots(1, 2, figsize=(10, 4))\nax[0].imshow(template, cmap=plt.cm.Greys_r);\nax[1].imshow(data, cmap=plt.cm.Greys_r);",
"_____no_output_____"
]
],
[
[
"> El objetivo es programar un match filter que actue como un detector de monedas para Mario\n\nPara esto utilice la función de scipy [`correlate2d`](https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.signal.correlate2d.html). Instrucciones detalladas:\n\n1. Obtenga versiones centradas de `data` y `template` restandoles sus respectivas medias media (puede usar `np.mean`)\n1. Obtenga la correlación cruzada entre las señales usando `correlate2d`. Estudie el rango de valore que toma la correlación cruzada\n1. En base al resultado anterior construya y muestre una máscara binaria que sea 1 donde hay una moneda y 0 donde no hay una moneda revele sólo la posición del template en la imagen. Puede usar el operador `>` con un valor umbral adecuadamente seleccionado. \n1. Encuentre el valor máximo de `s_noise` con el cual es posible detectar el template usando el umbral seleccionado anteriormente",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e74257cdf1ffd9cc93f0df1610400b0173a0c409 | 181,178 | ipynb | Jupyter Notebook | Data_Visualization.ipynb | Bethinadileep/Backend-Development | c8d9b64c6471a24009b9e800258fe66580e4c5a4 | [
"MIT"
] | null | null | null | Data_Visualization.ipynb | Bethinadileep/Backend-Development | c8d9b64c6471a24009b9e800258fe66580e4c5a4 | [
"MIT"
] | null | null | null | Data_Visualization.ipynb | Bethinadileep/Backend-Development | c8d9b64c6471a24009b9e800258fe66580e4c5a4 | [
"MIT"
] | null | null | null | 247.173261 | 37,274 | 0.899083 | [
[
[
"<a href=\"https://colab.research.google.com/github/Bethinadileep/Backend-Development/blob/main/Data_Visualization.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"\n\n\n\n---\n\n",
"_____no_output_____"
],
[
"Visualising Data\n================\n\nThe purpose of scientific computation is insight not numbers: To understand the meaning of the (many) numbers we compute, we often need postprocessing, statistical analysis and graphical visualisation of our data. \n\nMatplotlib (Pylab) – plotting y=f(x), (and a bit more)\n------------------------------------------------------\n\nThe Python library *Matplotlib* is a python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments. Matplotlib tries to make easy things easy and hard things possible. You can generate plots, histograms, power spectra, bar charts, errorcharts, scatterplots, etc, with just a few lines of code.\n\n### Matplotlib and Pylab\n\nMatplotlib as *an object oriented plotting library*. Pylab is an interface to the same set of functions that imitates the (state-driven) Matlab plotting interface.\n\nPylab is slightly more convenient to use for easy plots, and Matplotlib gives far more detailed control over how plots are created. If you use Matplotlib routinely to produce figures, you are well advised to learn about the object oriented matplotlib interface (instead of the pylab interface).\n",
"_____no_output_____"
]
],
[
[
"#importing matplotlib\n\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"**Plotting a simple plot**",
"_____no_output_____"
]
],
[
[
"x=list(range(0,10,1))\n#default value is 1\ny=list(range(0,10,1))\nprint(\"x = \",x)\nprint(\"y = \",y)",
"x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\ny = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n"
],
[
"plt.figure(figsize=(5,5))\n\nplt.xlabel('X Axis ')\n\nplt.ylabel('Y Axis ')\nplt.title('Simple plot')\n#plt.rcParams[\"font_size\"]=15.0\n\nplt.bar(x,y)\nplt.show()\n",
"_____no_output_____"
],
[
"# x-axis values \nx = [5, 2, 9, 4, 7] \n#x.sort()\n \n# Y-axis values \ny = [10, 5, 8, 4, 2] \n\n# Function to plot \nplt.plot(x,y,\"o\",color='red') \n#plt.rcParams['font.size']=10.0\n# function to show the plot \nplt.show() ",
"[2, 4, 5, 8, 10]\n"
],
[
"plt.scatter(x,y)\nplt.show()",
"_____no_output_____"
],
[
"x=[2,3,4,6,6,8,9,9]\nplt.hist(x)\nplt.show()\n",
"_____no_output_____"
],
[
"activities = ['eat', 'sleep', 'work', 'code'] \nslices = [3, 7, 8, 6] \ncolors = ['r', 'y', 'g', 'b'] \n \n# plotting the pie chart \nplt.pie(slices, labels = activities, colors=colors, \n startangle=90, shadow = True, explode = (0, 0, 0.1, 0), \n radius = 1.2, autopct = '%.1f%%') \n\nplt.legend() \n \n# showing the plot \nplt.show()\n",
"_____no_output_____"
],
[
"import numpy as np\n\n# Compute the x and y coordinates for points on a sine curve\nx = np.arange(0, 3 * np.pi, 0.1)\ny = np.sin(x)\n\n# Plot the points using matplotlib\nplt.plot(x, y)\nplt.show() # You must call plt.show() to make graphics appear.",
"_____no_output_____"
]
],
[
[
"### Fine tuning your plot\n\nMatplotlib allows us to fine tune our plots in great detail. Here is an example:",
"_____no_output_____"
]
],
[
[
"\n\nx = np.arange(-3.14, 3.14, 0.01)\ny1 = np.sin(x)\ny2 = np.cos(x)\nplt.figure(figsize =(5 , 5))\nplt.plot(x, y1, label='sin(x)')\nplt.plot(x, y2, label='cos(x)')\nplt.legend()\nplt.grid()\nplt.xlabel('x')\nplt.title('This is the title of the graph')",
"_____no_output_____"
]
],
[
[
"showing some other useful commands:\n\n- `figure(figsize=(5,5))` sets the figure size to 5inch by 5inch\n\n- `plot(x,y1,label=’sin(x)’)` The “label” keyword defines the name of this line. The line label will be shown in the legend if the `legend()` command is used later.\n\n- Note that calling the `plot` command repeatedly, allows you to overlay a number of curves.\n\n- `axis([-2,2,-1,1])` This fixes the displayed area to go from xmin=-2 to xmax=2 in x-direction, and from ymin=-1 to ymax=1 in y-direction\n\n- `legend()` This command will display a legend with the labels as defined in the plot command. Try `help(pylab.legend)` to learn more about the placement of the legend.\n\n- `grid()` This command will display a grid on the backdrop.\n\n- `xlabel(’...’)` and `ylabel(’...’)` allow labelling the axes.\n\nNote further than you can chose different line styles, line thicknesses, symbols and colours for the data to be plotted. (The syntax is very similar to MATLAB.) For example:\n\n- `plot(x,y,’og’)` will plot circles (`o`) in green (`g`)\n\n- `plot(x,y,’-r’)` will plot a line (`-`) in red (`r`)\n\n- `plot(x,y,’-b’,linewidth=2)` will plot a blue line (`b`) with two two pixel thickness `linewidth=2` which is twice as wide as the default.\n\nThe full list of options can be found when typing `help(pylab.plot)` at the Python prompt. Because this documentation is so useful, we repeat parts of it here:",
"_____no_output_____"
],
[
" plot(*args, **kwargs)\n Plot lines and/or markers to the\n :class:`~matplotlib.axes.Axes`. *args* is a variable length\n argument, allowing for multiple *x*, *y* pairs with an\n optional format string. For example, each of the following is\n legal::\n\n plot(x, y) # plot x and y using default line style and color\n plot(x, y, 'bo') # plot x and y using blue circle markers\n plot(y) # plot y using x as index array 0..N-1\n plot(y, 'r+') # ditto, but with red plusses\n\n If *x* and/or *y* is 2-dimensional, then the corresponding columns\n will be plotted.\n\n An arbitrary number of *x*, *y*, *fmt* groups can be\n specified, as in::\n\n a.plot(x1, y1, 'g^', x2, y2, 'g-')\n\n Return value is a list of lines that were added.\n\n The following format string characters are accepted to control\n the line style or marker:\n\n ================ ===============================\n character description\n ================ ===============================\n '-' solid line style\n '--' dashed line style\n '-.' dash-dot line style\n ':' dotted line style\n '.' point marker\n ',' pixel marker\n 'o' circle marker\n 'v' triangle_down marker\n '^' triangle_up marker\n '<' triangle_left marker\n '>' triangle_right marker\n '1' tri_down marker\n '2' tri_up marker\n '3' tri_left marker\n '4' tri_right marker\n 's' square marker\n 'p' pentagon marker\n '*' star marker\n 'h' hexagon1 marker\n 'H' hexagon2 marker\n '+' plus marker\n 'x' x marker\n 'D' diamond marker\n 'd' thin_diamond marker\n '|' vline marker\n '_' hline marker\n ================ ===============================\n\n The following color abbreviations are supported:\n\n ========== ========\n character color\n ========== ========\n 'b' blue\n 'g' green\n 'r' red\n 'c' cyan\n 'm' magenta\n 'y' yellow\n 'k' black\n 'w' white\n ========== ========\n\n In addition, you can specify colors in many weird and\n wonderful ways, including full names (``'green'``), hex\n strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or\n grayscale intensities as a string (``'0.8'``). Of these, the\n string specifications can be used in place of a ``fmt`` group,\n but the tuple forms can be used only as ``kwargs``.\n\n Line styles and colors are combined in a single format string, as in\n ``'bo'`` for blue circles.\n\n The *kwargs* can be used to set line properties (any property that has\n a ``set_*`` method). You can use this to set a line label (for auto\n legends), linewidth, anitialising, marker face color, etc. Here is an\n example::\n\n plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)\n plot([1,2,3], [1,4,9], 'rs', label='line 2')\n axis([0, 4, 0, 10])\n legend()\n\n If you make multiple lines with one plot command, the kwargs\n apply to all those lines, e.g.::\n\n plot(x1, y1, x2, y2, antialised=False)\n\n Neither line will be antialiased.\n\n You do not need to use format strings, which are just\n abbreviations. All of the line properties can be controlled\n by keyword arguments. For example, you can set the color,\n marker, linestyle, and markercolor with::\n\n plot(x, y, color='green', linestyle='dashed', marker='o',\n markerfacecolor='blue', markersize=12). See\n :class:`~matplotlib.lines.Line2D` for details.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"#### Two (or more graphs) in one figure window\n\nThe `pylab.subplot` command allows to arrange several graphs within one figure window. The general syntax is\n\n subplot(numRows, numCols, plotNum)",
"_____no_output_____"
],
[
"For example, to arrange 4 graphs in a 2-by-2 matrix, and to select the first graph for the next plot command, one can use:\n\n```python\nsubplot(2, 2, 1)\n```",
"_____no_output_____"
],
[
"Here is a complete example plotting the sine and cosine curves in two graphs that are aligned underneath each other within the same window:",
"_____no_output_____"
]
],
[
[
"\nt = np.arange (0 , 2 * N . pi , 0.01)\n\n\n\nplt.subplot(2, 1, 1)\nplt.plot(t, N.sin(t))\nplt.xlabel('t')\nplt.ylabel('sin(t)')\n\nplt.subplot(2, 1, 2)\nplt.plot(t, N.cos(t))\nplt.xlabel('t')\nplt.ylabel('cos(t)')",
"_____no_output_____"
]
],
[
[
"#### Two (or more) figure windows",
"_____no_output_____"
]
],
[
[
"\nplt.figure(1)\nplt.plot(range(10),'o')\n\nplt.figure(2)\nplt.plot(range(100),'x')",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e74268c9f23ce507092a567f79faf0b98d03a3f4 | 188,093 | ipynb | Jupyter Notebook | iot23_ML_training.ipynb | LRAbbade/IoT_anomaly_detection | 515f2c1f93f327b794a5e2d45d31b1ee2025894f | [
"MIT"
] | 1 | 2020-11-15T18:23:58.000Z | 2020-11-15T18:23:58.000Z | iot23_ML_training.ipynb | LRAbbade/IoT_anomaly_detection | 515f2c1f93f327b794a5e2d45d31b1ee2025894f | [
"MIT"
] | null | null | null | iot23_ML_training.ipynb | LRAbbade/IoT_anomaly_detection | 515f2c1f93f327b794a5e2d45d31b1ee2025894f | [
"MIT"
] | null | null | null | 83.485575 | 11,888 | 0.823428 | [
[
[
"import os\nimport random\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix, f1_score, ConfusionMatrixDisplay, precision_score, recall_score\nfrom keras.models import Sequential\nfrom keras.layers import InputLayer, Dense, Flatten, Reshape, Dropout, Conv1D\nfrom keras.regularizers import L1L2\nfrom sklearn.utils import resample",
"_____no_output_____"
],
[
"sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=True))",
"Device mapping:\n/job:localhost/replica:0/task:0/device:GPU:0 -> device: 0, name: NVIDIA GeForce RTX 3070, pci bus id: 0000:06:00.0, compute capability: 8.6\n\n"
],
[
"shuffled_folder = 'dataset/shuffled/'\nshuffled_files = [shuffled_folder + file for file in os.listdir(shuffled_folder)]\nrandom.shuffle(shuffled_files)",
"_____no_output_____"
],
[
"num_files = len(shuffled_files)\ntraining_cut = round(0.4 * num_files)\ncross_cut = round(0.7 * num_files)",
"_____no_output_____"
],
[
"training_files = shuffled_files[:training_cut]\ncross_validation_files = shuffled_files[training_cut:cross_cut]\ntest_files = shuffled_files[cross_cut:]",
"_____no_output_____"
],
[
"def get_train_test_dfs(file):\n df = pd.read_parquet(file)\n df = df.fillna(0)\n X = df[[i for i in df.columns if not i == 'label']]\n y = df[['label']]\n return train_test_split(X, y, test_size=0.3, stratify=y)",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = get_train_test_dfs('dataset/shuffled/part_6')",
"_____no_output_____"
],
[
"def check_distrib(df):\n df = df[['label']]\n ones = len(df[df['label'] == 1])\n return ones / len(df)",
"_____no_output_____"
],
[
"len(X_train), len(y_train), len(X_test), len(y_test)",
"_____no_output_____"
],
[
"check_distrib(y_train), check_distrib(y_test)",
"_____no_output_____"
],
[
"def plot_confusion_matrix(y_true, y_pred):\n conf_mat = ConfusionMatrixDisplay(confusion_matrix(y_true, y_pred))\n conf_mat.plot()",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
]
],
[
[
"model = Sequential()\nmodel.add(Dense(units=50, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.add(Dense(units=200, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.add(Dense(units=500, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\n# model.add(Dropout(0.2))\nmodel.add(Dense(units=100, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.add(Dense(units=1, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.FalsePositives(), tf.keras.metrics.TrueNegatives()])",
"_____no_output_____"
],
[
"model.fit(X_train, y_train)",
"205857/205857 [==============================] - 1329s 6ms/step - loss: 0.5440 - precision_9: 0.7402 - false_positives_9: 428465.3804 - true_negatives_9: 0.0000e+00\n"
],
[
"y_pred = model.predict(X_test)\ny_pred = np.round(y_pred)",
"_____no_output_____"
],
[
"print(f'F1 =', f1_score(y_test, y_pred))\nprint(f'Precision =', precision_score(y_test, y_pred), '\\n')\nplot_confusion_matrix(y_test, y_pred)",
"F1 = 0.8505211735867056\nPrecision = 0.7399189563504854 \n\n"
],
[
"model = Sequential()\nmodel.add(Dense(units=1, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.add(Dense(units=1, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.FalsePositives(), tf.keras.metrics.TrueNegatives()])",
"_____no_output_____"
],
[
"model.fit(X_train, y_train)",
"205857/205857 [==============================] - 1000s 5ms/step - loss: 0.5434 - precision_10: 0.7406 - false_positives_10: 427910.3043 - true_negatives_10: 123.5893\n"
],
[
"y_pred = model.predict(X_test)\ny_pred = np.round(y_pred)",
"_____no_output_____"
],
[
"print(f'F1 =', f1_score(y_test, y_pred))\nprint(f'Precision =', precision_score(y_test, y_pred), '\\n')\nplot_confusion_matrix(y_test, y_pred)",
"F1 = 0.8506587435005278\nPrecision = 0.74014111430911 \n\n"
]
],
[
[
"---",
"_____no_output_____"
],
[
"### Try undersampling",
"_____no_output_____"
]
],
[
[
"def shuffle_df(df):\n return df.sample(frac=1).reset_index(drop=True)",
"_____no_output_____"
],
[
"def get_undersampled_df(file):\n X_train, X_test, y_train, y_test = get_train_test_dfs(file)\n train = pd.concat([X_train, y_train], axis=1)\n negative = train[train['label'] == 0]\n positive = train[train['label'] == 1]\n us_positive = shuffle_df(positive).iloc[:len(negative)]\n train = pd.concat([us_positive, negative])\n train = shuffle_df(train)\n X_train = train[[i for i in train.columns if i != 'label']]\n y_train = train[['label']]\n return X_train, X_test, y_train, y_test",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = get_undersampled_df('dataset/shuffled/part_6')",
"_____no_output_____"
],
[
"len(X_train), len(y_train), len(X_test), len(y_test)",
"_____no_output_____"
],
[
"check_distrib(y_train), check_distrib(y_test)",
"_____no_output_____"
],
[
"model = Sequential()\nmodel.add(Dense(units=1, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.add(Dense(units=1, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=[tf.keras.metrics.Recall(), tf.keras.metrics.FalseNegatives(), tf.keras.metrics.Precision()])",
"_____no_output_____"
],
[
"model.fit(X_train, y_train)",
"107079/107079 [==============================] - 537s 5ms/step - loss: 0.6484 - recall_3: 0.1939 - false_negatives: 367743.4495 - precision_5: 0.8413\n"
],
[
"y_pred = model.predict(X_test)\ny_pred = np.round(y_pred)",
"_____no_output_____"
],
[
"print(f'F1 =', f1_score(y_test, y_pred))\nprint(f'Precision =', precision_score(y_test, y_pred))\nprint(f'Recall =', recall_score(y_test, y_pred), '\\n')\nplot_confusion_matrix(y_test, y_pred)",
"F1 = 0.23103900739399005\nPrecision = 0.999875396714871 \n\nRecall = 0.13060930616199687 \n\n"
],
[
"model = Sequential()\nmodel.add(Dense(units=50, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.add(Dense(units=200, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.add(Dense(units=500, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\n# model.add(Dropout(0.2))\nmodel.add(Dense(units=100, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.add(Dense(units=1, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=[tf.keras.metrics.Recall(), tf.keras.metrics.FalseNegatives(), tf.keras.metrics.Precision()])",
"_____no_output_____"
],
[
"model.fit(X_train, y_train)",
"107079/107079 [==============================] - 698s 6ms/step - loss: 0.6472 - recall_4: 0.1375 - false_negatives_1: 371654.1647 - precision_6: 0.9643\n"
],
[
"y_pred = model.predict(X_test)\ny_pred = np.round(y_pred)",
"_____no_output_____"
],
[
"print(f'F1 =', f1_score(y_test, y_pred))\nprint(f'Precision =', precision_score(y_test, y_pred))\nprint(f'Recall =', recall_score(y_test, y_pred), '\\n')\nplot_confusion_matrix(y_test, y_pred)",
"F1 = 0.23101133034482205\nPrecision = 0.999989002690675 \n\nRecall = 0.13058967876655703 \n\n"
]
],
[
[
"---",
"_____no_output_____"
],
[
"### Try oversampling",
"_____no_output_____"
]
],
[
[
"X_train, X_test, y_train, y_test = get_train_test_dfs('dataset/shuffled/part_6')",
"_____no_output_____"
],
[
"len(X_train), len(y_train), len(X_test), len(y_test)",
"_____no_output_____"
],
[
"check_distrib(y_train), check_distrib(y_test)",
"_____no_output_____"
],
[
"train = pd.concat([X_train, y_train], axis=1)\nnegative = train[train['label'] == 0]\npositive = train[train['label'] == 1]",
"_____no_output_____"
],
[
"len(negative), len(positive)",
"_____no_output_____"
],
[
"neg_upsampled = resample(negative, n_samples=len(positive))",
"_____no_output_____"
],
[
"len(neg_upsampled), len(positive)",
"_____no_output_____"
],
[
"train = pd.concat([neg_upsampled, positive])\ntrain = shuffle_df(train)\nX_train = train[[i for i in train.columns if i != 'label']]\ny_train = train[['label']]",
"_____no_output_____"
],
[
"len(X_train)",
"_____no_output_____"
],
[
"check_distrib(y_train)",
"_____no_output_____"
],
[
"model = Sequential()\nmodel.add(Dense(units=1, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.add(Dense(units=1, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=[tf.keras.metrics.Recall(), tf.keras.metrics.FalseNegatives(), tf.keras.metrics.Precision()])",
"_____no_output_____"
],
[
"model.fit(X_train, y_train)",
"304634/304634 [==============================] - 1516s 5ms/step - loss: 0.6931 - recall_5: 0.5493 - false_negatives_2: 574033.9203 - precision_7: 0.4996\n"
],
[
"y_pred = model.predict(X_test)\ny_pred = np.round(y_pred)",
"_____no_output_____"
],
[
"print(f'F1 =', f1_score(y_test, y_pred))\nprint(f'Precision =', precision_score(y_test, y_pred))\nprint(f'Recall =', recall_score(y_test, y_pred), '\\n')\nplot_confusion_matrix(y_test, y_pred)",
"F1 = 0.0\n"
],
[
"model = Sequential()\nmodel.add(Dense(units=50, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.add(Dense(units=200, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.add(Dense(units=500, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\n# model.add(Dropout(0.2))\nmodel.add(Dense(units=100, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.add(Dense(units=1, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=[tf.keras.metrics.Recall(), tf.keras.metrics.FalseNegatives(), tf.keras.metrics.Precision()])",
"_____no_output_____"
],
[
"model.fit(X_train, y_train)",
"304634/304634 [==============================] - 2028s 7ms/step - loss: 0.6467 - recall_7: 0.1323 - false_negatives_4: 1059197.5009 - precision_9: 0.9916\n"
],
[
"y_pred = model.predict(X_test)\ny_pred = np.round(y_pred)",
"_____no_output_____"
],
[
"print(f'F1 =', f1_score(y_test, y_pred))\nprint(f'Precision =', precision_score(y_test, y_pred))\nprint(f'Recall =', recall_score(y_test, y_pred), '\\n')\nplot_confusion_matrix(y_test, y_pred)",
"F1 = 0.2314173085682645\nPrecision = 0.9999926829714453\nRecall = 0.13084914335993245 \n\n"
]
],
[
[
"### Trying both again with 60% of positives",
"_____no_output_____"
],
[
"#### Undersampling",
"_____no_output_____"
]
],
[
[
"X_train, X_test, y_train, y_test = get_train_test_dfs('dataset/shuffled/part_6')",
"_____no_output_____"
],
[
"len(X_train), len(y_train), len(X_test), len(y_test)",
"_____no_output_____"
],
[
"check_distrib(y_train), check_distrib(y_test)",
"_____no_output_____"
],
[
"train = pd.concat([X_train, y_train], axis=1)\nnegative = train[train['label'] == 0]\npositive = train[train['label'] == 1]\nlen(negative), len(positive)",
"_____no_output_____"
],
[
"positive = shuffle_df(positive).iloc[:int(len(negative) * 1.5)]\ntrain = pd.concat([positive, negative])\ntrain = shuffle_df(train)\nX_train = train[[i for i in train.columns if i != 'label']]\ny_train = train[['label']]",
"_____no_output_____"
],
[
"len(X_train)",
"_____no_output_____"
],
[
"check_distrib(y_train)",
"_____no_output_____"
],
[
"model = Sequential()\nmodel.add(Dense(units=1, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.add(Dense(units=1, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.FalsePositives()])",
"_____no_output_____"
],
[
"model.fit(X_train, y_train)",
"133849/133849 [==============================] - 585s 4ms/step - loss: 0.6433 - precision_12: 0.5971 - false_positives_2: 427015.1576\n"
],
[
"y_pred = model.predict(X_test)\ny_pred = np.round(y_pred)",
"_____no_output_____"
],
[
"print(f'F1 =', f1_score(y_test, y_pred))\nprint(f'Precision =', precision_score(y_test, y_pred))\nprint(f'Recall =', recall_score(y_test, y_pred), '\\n')\nplot_confusion_matrix(y_test, y_pred)",
"F1 = 0.8506150641390436\nPrecision = 0.7400728851714885\nRecall = 0.9999784577367123 \n\n"
],
[
"model = Sequential()\nmodel.add(Dense(units=50, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.add(Dense(units=200, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.add(Dense(units=500, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\n# model.add(Dropout(0.2))\nmodel.add(Dense(units=100, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.add(Dense(units=1, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.FalsePositives()])",
"_____no_output_____"
],
[
"model.fit(X_train, y_train)",
"133849/133849 [==============================] - 815s 6ms/step - loss: 0.6327 - precision_13: 0.5992 - false_positives_3: 428830.3528\n"
],
[
"y_pred = model.predict(X_test)\ny_pred = np.round(y_pred)",
"_____no_output_____"
],
[
"print(f'F1 =', f1_score(y_test, y_pred))\nprint(f'Precision =', precision_score(y_test, y_pred))\nprint(f'Recall =', recall_score(y_test, y_pred), '\\n')\nplot_confusion_matrix(y_test, y_pred)",
"F1 = 0.8505211735867056\nPrecision = 0.7399189563504854\nRecall = 1.0 \n\n"
]
],
[
[
"#### Oversampling",
"_____no_output_____"
]
],
[
[
"X_train, X_test, y_train, y_test = get_train_test_dfs('dataset/shuffled/part_6')",
"_____no_output_____"
],
[
"len(X_train), len(y_train), len(X_test), len(y_test)",
"_____no_output_____"
],
[
"check_distrib(y_train), check_distrib(y_test)",
"_____no_output_____"
],
[
"train = pd.concat([X_train, y_train], axis=1)\nnegative = train[train['label'] == 0]\npositive = train[train['label'] == 1]\nlen(negative), len(positive)",
"_____no_output_____"
],
[
"neg_upsampled = resample(negative, n_samples=int(0.6666 * len(positive)))",
"_____no_output_____"
],
[
"len(neg_upsampled), len(positive)",
"_____no_output_____"
],
[
"train = pd.concat([neg_upsampled, positive])\ntrain = shuffle_df(train)\nX_train = train[[i for i in train.columns if i != 'label']]\ny_train = train[['label']]",
"_____no_output_____"
],
[
"len(X_train)",
"_____no_output_____"
],
[
"check_distrib(y_train)",
"_____no_output_____"
],
[
"model = Sequential()\nmodel.add(Dense(units=1, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.add(Dense(units=1, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.FalsePositives()])",
"_____no_output_____"
],
[
"model.fit(X_train, y_train)",
"253852/253852 [==============================] - 1100s 4ms/step - loss: 0.6731 - precision_14: 0.6006 - false_positives_4: 812435.4165\n"
],
[
"y_pred = model.predict(X_test)\ny_pred = np.round(y_pred)",
"_____no_output_____"
],
[
"print(f'F1 =', f1_score(y_test, y_pred))\nprint(f'Precision =', precision_score(y_test, y_pred))\nprint(f'Recall =', recall_score(y_test, y_pred), '\\n')\nplot_confusion_matrix(y_test, y_pred)",
"F1 = 0.8505211735867056\nPrecision = 0.7399189563504854\nRecall = 1.0 \n\n"
],
[
"model = Sequential()\nmodel.add(Dense(units=50, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.add(Dense(units=200, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.add(Dense(units=500, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\n# model.add(Dropout(0.2))\nmodel.add(Dense(units=100, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.add(Dense(units=1, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)))\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.FalsePositives()])",
"_____no_output_____"
],
[
"model.fit(X_train, y_train)",
"253852/253852 [==============================] - 1482s 6ms/step - loss: 0.6320 - precision_15: 0.6003 - false_positives_5: 812276.5018\n"
],
[
"y_pred = model.predict(X_test)\ny_pred = np.round(y_pred)",
"_____no_output_____"
],
[
"print(f'F1 =', f1_score(y_test, y_pred))\nprint(f'Precision =', precision_score(y_test, y_pred))\nprint(f'Recall =', recall_score(y_test, y_pred), '\\n')\nplot_confusion_matrix(y_test, y_pred)",
"F1 = 0.8505211735867056\nPrecision = 0.7399189563504854\nRecall = 1.0 \n\n"
]
],
[
[
"---",
"_____no_output_____"
],
[
"### Try Grid search on undersampled df",
"_____no_output_____"
]
],
[
[
"X_train, X_test, y_train, y_test = get_undersampled_df('dataset/shuffled/part_6')",
"_____no_output_____"
],
[
"len(X_train), len(y_train), len(X_test), len(y_test)",
"_____no_output_____"
],
[
"check_distrib(y_train), check_distrib(y_test)",
"_____no_output_____"
],
[
"X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.3, stratify=y_train)",
"_____no_output_____"
],
[
"len(X_train), len(y_train), len(X_val), len(y_val)",
"_____no_output_____"
],
[
"check_distrib(y_train), check_distrib(y_val)",
"_____no_output_____"
],
[
"def make_model(layers, activation, regularizer, optimizer, loss, metrics):\n model = Sequential([\n Dense(units=n, activation=activation, kernel_regularizer=regularizer)\n for n in layers\n ])\n model.add(Dense(units=1, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)))\n model.compile(optimizer=optimizer, loss=loss, metrics=metrics)\n return model",
"_____no_output_____"
],
[
"def get_scores(model):\n y_pred = model.predict(X_val)\n y_pred = np.round(y_pred)\n return f1_score(y_val, y_pred), precision_score(y_val, y_pred), recall_score(y_val, y_pred)",
"_____no_output_____"
],
[
"def grid_search(params_list):\n best_model = None\n df = pd.DataFrame()\n for params in params_list:\n print('running params:', params)\n model = make_model(*params, metrics=[tf.keras.metrics.Recall(), tf.keras.metrics.FalseNegatives(), tf.keras.metrics.Precision()])\n model.fit(X_train, y_train)\n f1, precision, recall = get_scores(model)\n df = df.append(pd.DataFrame(data={\n 'layers': len(params[0]),\n 'nodes': params[0],\n 'activation': params[1],\n 'regulatrizer': params[2],\n 'optimizer': params[3],\n 'loss': params[4],\n 'f1': f1,\n 'precision': precision,\n 'recall': recall\n }))\n if best_model is None or f1 > df['f1'].max():\n best_model = model\n \n return df, best_model",
"_____no_output_____"
],
[
"res_df, best = grid_search([\n [[1], 'relu', L1L2(1e-5, 1e-5), 'adam', 'binary_crossentropy'],\n [[50, 50], 'relu', L1L2(1e-5, 1e-5), 'adam', 'binary_crossentropy'],\n [[100, 100], 'relu', L1L2(1e-5, 1e-5), 'adam', 'binary_crossentropy'],\n [[200, 200], 'relu', L1L2(1e-5, 1e-5), 'adam', 'binary_crossentropy'],\n [[10, 10, 10, 10], 'relu', L1L2(1e-5, 1e-5), 'adam', 'binary_crossentropy'],\n [[20, 20, 20], 'relu', L1L2(1e-5, 1e-5), 'adam', 'binary_crossentropy'],\n])",
"running params: [[1], 'relu', <keras.regularizers.L1L2 object at 0x00000197D9E7B430>, 'adam', 'binary_crossentropy']\n74955/74955 [==============================] - 367s 5ms/step - loss: 0.6599 - recall_4: 0.1238 - false_negatives_4: 260759.3819 - precision_4: 0.9779\nrunning params: [[50, 50], 'relu', <keras.regularizers.L1L2 object at 0x00000197D9E7B8B0>, 'adam', 'binary_crossentropy']\n74955/74955 [==============================] - 412s 5ms/step - loss: 0.6470 - recall_5: 0.1644 - false_negatives_5: 257997.8997 - precision_5: 0.8641\nrunning params: [[100, 100], 'relu', <keras.regularizers.L1L2 object at 0x00000197D9E7BE80>, 'adam', 'binary_crossentropy']\n74955/74955 [==============================] - 408s 5ms/step - loss: 0.6469 - recall_6: 0.1572 - false_negatives_6: 259446.8309 - precision_6: 0.9047\nrunning params: [[200, 200], 'relu', <keras.regularizers.L1L2 object at 0x00000197D9E7B3D0>, 'adam', 'binary_crossentropy']\n74955/74955 [==============================] - 432s 6ms/step - loss: 0.6470 - recall_7: 0.1409 - false_negatives_7: 259634.9176 - precision_7: 0.9414\nrunning params: [[10, 10, 10, 10], 'relu', <keras.regularizers.L1L2 object at 0x00000197D9E7BF10>, 'adam', 'binary_crossentropy']\n74955/74955 [==============================] - 501s 7ms/step - loss: 0.6470 - recall_8: 0.1402 - false_negatives_8: 260044.5841 - precision_8: 0.9387\nrunning params: [[20, 20, 20], 'relu', <keras.regularizers.L1L2 object at 0x00000197DA00D0A0>, 'adam', 'binary_crossentropy']\n74955/74955 [==============================] - 458s 6ms/step - loss: 0.6471 - recall_9: 0.1583 - false_negatives_9: 258817.3943 - precision_9: 0.8778\n"
],
[
"res_df.sort_values('f1', ascending=False)",
"_____no_output_____"
],
[
"y_pred = best.predict(X_test)\ny_pred = np.round(y_pred)",
"_____no_output_____"
],
[
"print(f'F1 =', f1_score(y_test, y_pred))\nprint(f'Precision =', precision_score(y_test, y_pred))\nprint(f'Recall =', recall_score(y_test, y_pred), '\\n')\nplot_confusion_matrix(y_test, y_pred)",
"F1 = 0.2303335905517547\nPrecision = 0.9988466101726045\nRecall = 0.13017606731143458 \n\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7426d2326594238022397f925ab34c778003d8b | 11,044 | ipynb | Jupyter Notebook | Code/Optimization model.ipynb | PEESEgroup/Virtual_Con | 308528f675703e6b0e1aef4cff46b6167f8a0a83 | [
"MIT"
] | null | null | null | Code/Optimization model.ipynb | PEESEgroup/Virtual_Con | 308528f675703e6b0e1aef4cff46b6167f8a0a83 | [
"MIT"
] | null | null | null | Code/Optimization model.ipynb | PEESEgroup/Virtual_Con | 308528f675703e6b0e1aef4cff46b6167f8a0a83 | [
"MIT"
] | null | null | null | 44 | 205 | 0.64569 | [
[
[
"import pandas as pd\nfrom itertools import product\nimport gurobipy as gp # Installation and license for Gurobi is required for the execution of optimization model\nfrom gurobipy import GRB ",
"_____no_output_____"
],
[
"# Step 1a: Data processing for 1-hub scenarios\n# 'Distance_matrix_1hub.csv' is an ixj distance matrix exported from 'Distance computing.ipynb'; i represents the number of participants and j denotes the number of alternative conference hubs\nDistance_matrix_1hub = pd.read_csv('Distance_matrix_1hub.csv')\nDistance_matrix_1hub = Distance_matrix_1hub.set_index('Unnamed: 0')\n\nairport_rank = Distance_matrix_1hub.columns.values\n\nnum_airport_1hub = 30\nnum_participant = 383\ncartesian_prod_1hub = list(product(range(num_participant), range(num_airport_1hub)))\nDistance_matrix_1hub = {(i,j): Distance_matrix_1hub.iloc[i,j] for i, j in cartesian_prod_1hub}",
"_____no_output_____"
],
[
"# Step 1b: Data processing for multi-hub scenarios\n# 'Distance_matrix_multihub.csv' is an ixj distance matrix exported from 'Distance computing.ipynb'; i represents the number of participants and j denotes the number of alternative conference hubs\nDistance_matrix_multihub = pd.read_csv('Distance_matrix_multihub.csv')\nDistance_matrix_multihub = Distance_matrix_multihub.set_index('Unnamed: 0')\n\nairport_rank = Distance_matrix_multihub.columns.values\n\nnum_airport_multihub = 36\nnum_participant = 383\ncartesian_prod_multihub = list(product(range(num_participant), range(num_airport_multihub)))\nDistance_matrix_multihub = {(i,j): Distance_matrix_multihub.iloc[i,j] for i, j in cartesian_prod_multihub}",
"_____no_output_____"
],
[
"# Step 1c: Data processing for \"maximum travel distance\" scenarios, additional to Step 1a/1b\n# Dist_threshold: 1000, 3000, 5000, 10000 km\nDist_threshold = 1000\nfor key, value in Distance_matrix_1hub.items():\n if Distance_matrix_1hub[key] > Dist_threshold:\n Distance_matrix_1hub[key] = 1e8\n \nfor key, value in Distance_matrix_multihub.items():\n if Distance_matrix_multihub[key] > Dist_threshold:\n Distance_matrix_multihub[key] = 1e8 ",
"_____no_output_____"
],
[
"# Step 2a: Optimization model for 1-hub in-person and \"maximum travel distance\" scenarios\nm = gp.Model('participant_airport')\n# big M\nM = 1e8\n# number of hubs: can be any interger in [1,6]\nN = 1\n\n# define variable y[j]: Constraint (10) in manuscript\nselect_airport = m.addVars(num_airport_1hub, vtype=GRB.BINARY, name='select_airport')\n# define variable x[i,j]: # Constraint (9) in manuscript\nassign_participant = m.addVars(cartesian_prod_1hub, vtype=GRB.BINARY, name='assign_participant')\n\n# Constraint (5) in manuscript\nm.addConstrs((gp.quicksum(assign_participant[(i,j)] for j in range(num_airport_1hub)) == 1 for i in range(num_participant)), name='Eq1')\n# Constraint (6) in manuscript \nm.addConstrs((gp.quicksum(assign_participant[(i,j)] for i in range(num_participant)) <= M*select_airport[j] for j in range(num_airport_1hub)), name='Eq2')\n# Constraint (7) in manuscript\nm.addConstrs((gp.quicksum(assign_participant[(i,j)] for i in range(num_participant)) >= 15*select_airport[j] for j in range(num_airport_1hub)), name='Eq3')\n# Constraint (8) in manuscript\nm.addConstr((gp.quicksum(select_airport[j] for j in range(num_airport_1hub)) == N), name='Eq4')\n# Equation (4) in manuscript\nm.setObjective(assign_participant.prod(Distance_matrix_1hub), GRB.MINIMIZE)\n\nm.optimize()",
"_____no_output_____"
],
[
"# Step 2b: Optimization model for 1-hub \"maximum virtual participation\" scenarios\nm = gp.Model('participant_airport')\n# big M\nM = 1e8\n# number of hubs: can be any interger in [1,6]\nN = 1\n# percentage of in-person participation (i.e., 10%, 30%, 50%, or 70%) \nalpha = 0.1\n\n# define variable y[j]: Constraint (10) in manuscript\nselect_airport = m.addVars(num_airport_1hub, vtype=GRB.BINARY, name='select_airport')\n# define variable x[i,j]: # Constraint (9) in manuscript\nassign_participant = m.addVars(cartesian_prod_1hub, vtype=GRB.BINARY, name='assign_participant')\n\n# Constraint (12) in manuscript\nm.addConstrs((gp.quicksum(assign_participant[(i,j)] for j in range(num_airport_1hub)) <= 1 for i in range(num_participant)), name='Eq1')\n# Constraint (6) in manuscript \nm.addConstrs((gp.quicksum(assign_participant[(i,j)] for i in range(num_participant)) <= M*select_airport[j] for j in range(num_airport_1hub)), name='Eq2')\n# Constraint (7) in manuscript\nm.addConstrs((gp.quicksum(assign_participant[(i,j)] for i in range(num_participant)) >= 15*select_airport[j] for j in range(num_airport_1hub)), name='Eq3')\n# Constraint (8) in manuscript\nm.addConstr((gp.quicksum(select_airport[j] for j in range(num_airport_1hub)) == N), name='Eq4')\n# Constraint (13) in manuscript\nm.addConstr((gp.quicksum(assign_participant[(i,j)] for i,j in cartesian_prod_1hub) >= alpha*num_participant), name='Eq5')\n# Equation (4) in manuscript\nm.setObjective(assign_participant.prod(Distance_matrix_1hub), GRB.MINIMIZE)\n\nm.optimize()",
"_____no_output_____"
],
[
"# Step 2c: Optimization model for multi-hub in-person and \"maximum travel distance\" scenarios\nm = gp.Model('participant_airport')\n# big M\nM = 1e8\n# number of hubs: can be any interger in [1,6]\nN = 1\n\n# define variable y[j]: Constraint (10) in manuscript\nselect_airport = m.addVars(num_airport_multihub, vtype=GRB.BINARY, name='select_airport')\n# define variable x[i,j]: # Constraint (9) in manuscript\nassign_participant = m.addVars(cartesian_prod_multihub, vtype=GRB.BINARY, name='assign_participant')\n\n# Constraint (5) in manuscript\nm.addConstrs((gp.quicksum(assign_participant[(i,j)] for j in range(num_airport_multihub)) == 1 for i in range(num_participant)), name='Eq1')\n# Constraint (6) in manuscript \nm.addConstrs((gp.quicksum(assign_participant[(i,j)] for i in range(num_participant)) <= M*select_airport[j] for j in range(num_airport_multihub)), name='Eq2')\n# Constraint (7) in manuscript\nm.addConstrs((gp.quicksum(assign_participant[(i,j)] for i in range(num_participant)) >= 15*select_airport[j] for j in range(num_airport_multihub)), name='Eq3')\n# Constraint (8) in manuscript\nm.addConstr((gp.quicksum(select_airport[j] for j in range(num_airport_multihub)) == N), name='Eq4')\n# Equation (4) in manuscript\nm.setObjective(assign_participant.prod(Distance_matrix_multihub), GRB.MINIMIZE)\n\nm.optimize()",
"_____no_output_____"
],
[
"# Step 2d: Optimization model for multi-hub in-person and \"maximum travel distance\" scenarios\nm = gp.Model('participant_airport')\n# big M\nM = 1e8\n# number of hubs: can be any interger in [1,6]\nN = 1\n# percentage of in-person participation (i.e., 10%, 30%, 50%, or 70%) \nalpha = 0.1\n\n# define variable y[j]: Constraint (10) in manuscript\nselect_airport = m.addVars(num_airport_multihub, vtype=GRB.BINARY, name='select_airport')\n# define variable x[i,j]: # Constraint (9) in manuscript\nassign_participant = m.addVars(cartesian_prod_multihub, vtype=GRB.BINARY, name='assign_participant')\n\n# Constraint (12) in manuscript\nm.addConstrs((gp.quicksum(assign_participant[(i,j)] for j in range(num_airport_multihub)) <= 1 for i in range(num_participant)), name='Eq1')\n# Constraint (6) in manuscript \nm.addConstrs((gp.quicksum(assign_participant[(i,j)] for i in range(num_participant)) <= M*select_airport[j] for j in range(num_airport_multihub)), name='Eq2')\n# Constraint (7) in manuscript\nm.addConstrs((gp.quicksum(assign_participant[(i,j)] for i in range(num_participant)) >= 15*select_airport[j] for j in range(num_airport_multihub)), name='Eq3')\n# Constraint (8) in manuscript\nm.addConstr((gp.quicksum(select_airport[j] for j in range(num_airport_multihub)) == N), name='Eq4')\n# Constraint (13) in manuscript\nm.addConstr((gp.quicksum(assign_participant[(i,j)] for i,j in cartesian_prod_multihub) >= alpha*num_participant), name='Eq5')\n# Equation (4) in manuscript\nm.setObjective(assign_participant.prod(Distance_matrix_multihub), GRB.MINIMIZE)\n\nm.optimize()",
"_____no_output_____"
],
[
"# print optimization results for the assignment of each participant to their optimized conference hubs\ndf_result = Distance_matrix_multihub.copy()\ndf_result = 0\ndf_result = {(i,j): abs(assign_participant[i, j].x) for i, j in assign_participant.keys()}\ndf_result_linear = [None] * num_participant\nk=0\nfor i,j in assign_participant.keys():\n if (abs(assign_participant[i,j].x) > 0.99):\n df_result_linear[i] = int(airport_rank[j])+1\n k=k+1\n\ndf_result_final = pd.DataFrame(df_result_linear)\nprint(df_result_final)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74279200477efed7d4905c8c32d663bb7c8cdaf | 3,975 | ipynb | Jupyter Notebook | Tarea1-sol.ipynb | diegotg2000/Tarea1-Lineal | 70c4bcd70d9ca484b64ade59d755de4389e3e1ee | [
"MIT"
] | null | null | null | Tarea1-sol.ipynb | diegotg2000/Tarea1-Lineal | 70c4bcd70d9ca484b64ade59d755de4389e3e1ee | [
"MIT"
] | null | null | null | Tarea1-sol.ipynb | diegotg2000/Tarea1-Lineal | 70c4bcd70d9ca484b64ade59d755de4389e3e1ee | [
"MIT"
] | null | null | null | 18.661972 | 103 | 0.445031 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7428f78da44aa95898814af0dd2180c3892e27f | 578,726 | ipynb | Jupyter Notebook | 1-) Image Processing with Cv2/9- Image Histograms with CV2.ipynb | AkifCanSonmez/ImageProccessingCourse | 15e9f5dea03c8da348890b71bbfc997ff7e1b4b8 | [
"MIT"
] | null | null | null | 1-) Image Processing with Cv2/9- Image Histograms with CV2.ipynb | AkifCanSonmez/ImageProccessingCourse | 15e9f5dea03c8da348890b71bbfc997ff7e1b4b8 | [
"MIT"
] | null | null | null | 1-) Image Processing with Cv2/9- Image Histograms with CV2.ipynb | AkifCanSonmez/ImageProccessingCourse | 15e9f5dea03c8da348890b71bbfc997ff7e1b4b8 | [
"MIT"
] | 1 | 2022-02-18T12:06:50.000Z | 2022-02-18T12:06:50.000Z | 1,059.937729 | 144,076 | 0.956525 | [
[
[
"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np",
"_____no_output_____"
],
[
"#resmi içe aktar\nimg = cv2.imread(\"red-blue.jpg\")\nimg_vis = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)",
"_____no_output_____"
],
[
"plt.figure(),plt.imshow(img_vis)",
"_____no_output_____"
],
[
"print(img_vis.shape)",
"(200, 400, 3)\n"
],
[
"img_hist = cv2.calcHist([img],channels=[0],mask = None, histSize = [256], ranges = [0,256])\nprint(img_hist.shape)\nplt.figure(),plt.plot(img_hist)\n",
"(256, 1)\n"
],
[
"\ncolor = (\"b\",\"g\",\"r\")\nplt.figure()\nfor i,c in enumerate(color):\n hist = cv2.calcHist([img],channels = [i], mask = None, histSize = [256], ranges = [0,256])\n plt.plot(hist,color=c)",
"_____no_output_____"
]
],
[
[
"#### ",
"_____no_output_____"
]
],
[
[
"golden_gate = cv2.imread(\"goldengate.jfif\")\ngolden_gate_vis = cv2.cvtColor(golden_gate,cv2.COLOR_BGR2RGB)\nplt.figure(),plt.imshow(golden_gate_vis),plt.title(\"Orijinal\")\n\nprint(golden_gate.shape)\n\nmask = np.zeros(golden_gate.shape[:2],np.uint8)\nplt.figure(),plt.imshow(mask,cmap=\"gray\"),plt.title(\"MASK\")\n\n",
"(194, 259, 3)\n"
],
[
"mask[150:200,0:150] = 255\nplt.figure(),plt.imshow(mask,cmap=\"gray\"),plt.title(\"Mask Size\")\n\n",
"_____no_output_____"
],
[
"masked_img_vis = cv2.bitwise_and(golden_gate,golden_gate,mask=mask)\nplt.figure(),plt.imshow(masked_img_vis,cmap=\"gray\")",
"_____no_output_____"
],
[
"masked_img = cv2.bitwise_and(golden_gate,golden_gate,mask = mask)\n\nmasked_img_hist = cv2.calcHist([golden_gate],channels = [0], mask=mask,histSize=[256],ranges=[0,256])\n\nplt.figure(),plt.plot(masked_img_hist)",
"_____no_output_____"
]
],
[
[
"## Histogram Eşitleme\n\n## Karşıtlık Arttırma (Kontraslık)",
"_____no_output_____"
]
],
[
[
"img = cv2.imread(\"histogram.jpg\",0)\nplt.figure(),plt.imshow(img,cmap=\"gray\")",
"_____no_output_____"
],
[
"img_hist = cv2.calcHist([img],channels=[0],mask=None,histSize=[256],ranges=[0,256])\nplt.figure(), plt.plot(img_hist)",
"_____no_output_____"
],
[
"eq_hist = cv2.equalizeHist(img)\nplt.figure(),plt.imshow(eq_hist,cmap=\"gray\")",
"_____no_output_____"
]
],
[
[
"#### Açık renkliler 255'e koyu renkliler 0'a çekildi. ",
"_____no_output_____"
]
],
[
[
"eq_img_hist = cv2.calcHist([eq_hist],channels=[0],mask=None,histSize=[256],ranges=[0,256])\nplt.figure(),plt.plot(eq_img_hist)\nplt.figure(),plt.plot(img_hist)",
"_____no_output_____"
]
],
[
[
"#### 100,200 arasındaki bölgeyi 0 ile 255 arasına genişletip kontrast uygulandı. Yani açık olanlar daha açık koyu olanlar daha koyulaştırıldı",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e74298573e8117fba27f5e20f4c1e11e83a43560 | 16,228 | ipynb | Jupyter Notebook | 03_control_flow.ipynb | glow-mdsol/phuse_eu_connect_python | 9bab0ea24cd737eb62eacce21427a330edf9335a | [
"MIT"
] | null | null | null | 03_control_flow.ipynb | glow-mdsol/phuse_eu_connect_python | 9bab0ea24cd737eb62eacce21427a330edf9335a | [
"MIT"
] | null | null | null | 03_control_flow.ipynb | glow-mdsol/phuse_eu_connect_python | 9bab0ea24cd737eb62eacce21427a330edf9335a | [
"MIT"
] | 1 | 2019-11-14T10:23:13.000Z | 2019-11-14T10:23:13.000Z | 26.603279 | 812 | 0.516761 | [
[
[
"## Control Flow\n\nTo be able to do anything useful we need to control the flow of data by applying logic to the program. This section will look at the common control flow directives available in Python. As you read through this, try to compare to existing languages and tools.",
"_____no_output_____"
],
[
"### Conditional Statements: if, elif and else\n\nWe can use the `if` keyword to conditionally execute code; \n* the action of `if`, `elif` and `else` are the same as those in other languages ",
"_____no_output_____"
]
],
[
[
"# Setup the scenario\n\nvalue = 6\n\n# Note: the execution will only take one of the paths here\nif value < 5:\n print(\"Value is less than 5\")\nelif 6 < value <= 10:\n # this is short-hand for if value > 6 and value <= 10\n print(\"Value is between 5 and 10\")\nelse:\n print(\"Value is greater than 10\")\n",
"_____no_output_____"
]
],
[
[
"This sample code has a bug - can you spot it and fix it?\n\nA comment on indentation:\n* Python uses indentation to designate code blocks, this is usually managed by the tools you use so it is much less of an issue than it once was.\n* Indentation controls scope; variables introduced and used within an indented block do **not** exist in the *global* scope.\n* If you need to modify a variable in a indented block and use the modified value elsewhere, just declare it outside. \n\n",
"_____no_output_____"
]
],
[
[
"# Setup the scenario\ntonk = 1\n# change this value\nblue = 1\n\nif blue == 1:\n new_tonk = tonk + 2\n tonk += 2\n print(\"new_tonk:\", new_tonk, \"tonk:\", tonk)\nelse:\n tonk += 3\n\nprint(tonk)\nprint(blue)\nprint(new_tonk)\n# Scope in Jupyter is a little more global than in a program\ndel(new_tonk)\n",
"4\n2\n"
]
],
[
[
"### Loops\n\nLoops in python operate in the same way as other languages; the keywords are `for` and `while`. Both keywords operate on `iterable` types\n",
"_____no_output_____"
]
],
[
[
"# Lists are iterable\nfor x in [1, 2, 3]:\n print(\"x is\", x)\n\n# Tuples are iterable\nfor y in (1, 2, 3):\n print(\"y is\", y)\n\nd = dict(a=1, b=2, c=3)\n# Dictionaries are also iterable\nfor p in d:\n print(\"p is \", p)",
"x is 1\nx is 2\nx is 3\ny is 1\ny is 2\ny is 3\np is a\np is b\np is c\n"
],
[
"# Setup scenario\nc = 0\n\nfor i in range(25):\n c += i\n print(c)\nelse:\n # else is a statement that is automatically run on the last iteration\n print(\"Total for {}! is {}\".format(i, c))",
"0\n1\n3\n6\n10\n15\n21\n28\n36\n45\n55\n66\n78\n91\n105\n120\n136\n153\n171\n190\n210\n231\n253\n276\n300\nTotal for 24! is 300\n"
]
],
[
[
"Check out the `help` for the `range` function\n\n`while` loops execute until the test defined on the loop is satisfied",
"_____no_output_____"
]
],
[
[
"# Setup the test\n\n# Carry over the totals\ntotal = 0 \n# use a counter for the factorial\ncounter = 0\n# define our threshold value\nthreshold = 200\n\nwhile total < threshold:\n total += counter\n counter += 1\nprint(\"The last factorial less than\",threshold,\"is\",counter)",
"The last factorial less than 200 is 21\n"
]
],
[
[
"You can use the `continue` keyword to skip onto the next iteration, and `break` to leave the loop ",
"_____no_output_____"
]
],
[
[
"import random\n\nsamples = [random.randint(-100, 100) for x in range(100)]\n\nfor smp in samples:\n if smp < 0:\n continue\n elif smp > 50:\n print(\"Exiting as over 50\")\n break\n else:\n print(smp)\n ",
"19\n5\n33\nExiting\n"
]
],
[
[
"## Uses of Control Flow and Operators ",
"_____no_output_____"
],
[
"### Searching for a value in a Tuple or List\n\nBoth lists and tuples are iterable elements. This means you can iterate over the set of values. \n\nLet's use this to check and see if a value is in a list (for the purposes of this exercise we'll consider tuples and lists interchangeably) using our control loops ",
"_____no_output_____"
]
],
[
[
"# define our list to search\nl = [1, 3, 4, 7, 12, 19, 25]\n\n# initialise our variable \nfound = False\nsearch_value = 12\n\n# now, iterate over the values using a for loop\nfor value in l:\n if value == search_value:\n found = True # found our value, mark the search as a success\n# use a conditional statement to trigger the switch\n\nif found is True: # comparison in the case of boolean variables should use is rather than ==\n print(\"Found value\", search_value, \"in\", l)\nelse: # didn't find the value, report that\n print(\"Didn't find value\", search_value, \"in\", l)",
"_____no_output_____"
]
],
[
[
"We can short circuit the search somewhat!",
"_____no_output_____"
]
],
[
[
"# define our list to search\nl = [1, 3, 4, 7, 12, 19, 25]\n\n# initialise our variable \nfound = False\nsearch_value = 12\n\n# now, iterate over the values using a for loop\nfor value in l:\n if value == search_value:\n found = True # found our value, mark the search as a success\n break # break stops the iteration\n\nif found is True: # comparison in the case of boolean variables should use is rather than ==\n print(\"Found value\", search_value, \"in\", l)\nelse: # didn't find the value, report that\n print(\"Didn't find value\", search_value, \"in\", l)",
"_____no_output_____"
]
],
[
[
"And for the simplest",
"_____no_output_____"
]
],
[
[
"# define our list to search\nl = [1, 3, 4, 7, 12, 19, 25]\n\n# initialise our variable \nsearch_value = 12\n\n# now, iterate over the values using a for loop\nfor value in l:\n if value == search_value:\n print(\"Found value\", search_value, \"in\", l)\n break # break stops the iteration\nelse:\n # else runs at the end of the iteration\n print(\"Didn't find value\", search_value, \"in\", l)",
"_____no_output_____"
]
],
[
[
"Say we wanted to know whereabouts the value we searched for is; we can use the `enumerate` function ",
"_____no_output_____"
]
],
[
[
"# define our list to search\nl = [1, 3, 4, 7, 12, 19, 25]\n\n# initialise our variable \nsearch_value = 12\n\n# the enumerate function wraps the iteration, and returns a tuple; the index of the current value and the value\nfor i, value in enumerate(l):\n if value == search_value:\n print(\"Found value\", search_value, \"at position\", i)\n break # break stops the iteration\nelse:\n # else runs at the end of the iteration\n print(\"Didn't find value\", search_value, \"in\", l)",
"Found value 12 at position 4\n"
]
],
[
[
"`enumerate` takes a `start` argument, which tells the interpreter what value to start on - by default it is 0\n\nThose of you who have read ahead will know an easier way...",
"_____no_output_____"
]
],
[
[
"# define our list to search\nl = [1, 3, 4, 7, 12, 19, 25]\n\n# the in operator implements a search for a value\nif 12 in l:\n # the index accessor on an iterable returns the first location the value is found\n print(\"Found value\", search_value, \"at position\", l.index(12)) \nelse:\n print(\"Didn't find value\", search_value, \"in\", l)",
"Found value 12 at 4\n"
]
],
[
[
"Now, an exercise for you! Using what we've discussed prior, create the code to work out the mean for the following list:",
"_____no_output_____"
]
],
[
[
"c = [23, -57, -87, -17, 29, -5, 22, 66, -52, -9, 63, -47, 64, -83, 55, -15, 91, 39, -66, -28, 34, -65, 42, -94, 62, 1, 71, -79, -29, -32, 45, -50, -51, 5, -39, 45, -29, -38, -70, -58, -57, 35, -18, -72, -43, -34, -63, 74, -36, 70]\n",
"_____no_output_____"
]
],
[
[
"### List comprehensions\nList comprehensions are a bit of **syntatic sugar**, it allows you to create a list according to a function. As an example; if we wanted to get all the positive values for `c` we can use the following list comprehension",
"_____no_output_____"
]
],
[
[
"positive_c = [x for x in c if x >= 0]",
"_____no_output_____"
]
],
[
[
"Or, get the absolute values for all the elements (note, this doesn't change the original list)",
"_____no_output_____"
]
],
[
[
"abs_val = [abs(x) for x in c] # abs is a python builtin to take the absolute value",
"_____no_output_____"
]
],
[
[
"Loops with dictionaries are a little different",
"_____no_output_____"
]
],
[
[
"# items returns a tuple of key and value pairs\nfor category, values in t.items():\n print(category, \"->\", values)\n\n# keys returns the list of keys\nfor category in t.keys():\n print(category)\n\n# values returns a list of the values\nfor values in t.values():\n print(values)\n",
"Fruit -> ['Tomato', 'Pear', 'Apple']\nVegetable -> ['Carrot', 'Parsnip']\nPet -> ['Dog', 'Cat', 'Budgie', 'Dog', 'Cat', 'Budgie']\nComputer -> ['Mac', 'PC', 'Commodore64']\nFruit\nVegetable\nPet\nComputer\n['Tomato', 'Pear', 'Apple']\n['Carrot', 'Parsnip']\n['Dog', 'Cat', 'Budgie', 'Dog', 'Cat', 'Budgie']\n['Mac', 'PC', 'Commodore64']\n"
]
],
[
[
"The `in` accessor defaults to using the keys",
"_____no_output_____"
]
],
[
[
"print(\"Computer\" in t)\nprint(\"Astronaut\" in t)\nprint(\"Parsnip\" in t)",
"True\nFalse\nFalse\n"
]
],
[
[
"## Next\n\nNext, we're going to look at functions and libraries. Click [here](./04_functions_and_libraries.ipynb) to continue.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7429d44364a657dbbac24e61ea03f2f73cc2303 | 86,348 | ipynb | Jupyter Notebook | logistic_regression/multivariate2.ipynb | Redcxx/LearnML | 6b26dcdd1ac52e0a4d26372cd1a8bd56b83f9b6d | [
"BSD-3-Clause"
] | 1 | 2021-08-18T05:36:21.000Z | 2021-08-18T05:36:21.000Z | logistic_regression/multivariate2.ipynb | Redcxx/LearnML | 6b26dcdd1ac52e0a4d26372cd1a8bd56b83f9b6d | [
"BSD-3-Clause"
] | null | null | null | logistic_regression/multivariate2.ipynb | Redcxx/LearnML | 6b26dcdd1ac52e0a4d26372cd1a8bd56b83f9b6d | [
"BSD-3-Clause"
] | null | null | null | 294.703072 | 64,932 | 0.918307 | [
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport math\nimport time",
"_____no_output_____"
],
[
"data = pd.read_csv('fashion-mnist.csv')\ndata.head()\n\ntrain_data = data.sample(frac=0.8)\ntest_data = data.drop(train_data.index)\n\ntrain_labels = train_data['label'].values\ntrain_data = train_data.drop('label', axis=1).values\ntest_labels = test_data['label'].values\ntest_data = test_data.drop('label', axis=1).values",
"_____no_output_____"
],
[
"num_im = 25\n\nnum_cells = math.ceil(math.sqrt(num_im))\nplt.figure(figsize=(10, 10))\n\nfor i in range(num_im):\n pixels = train_data[i]\n size = math.ceil(math.sqrt(pixels.size))\n pixels = pixels.reshape(size, size)\n \n plt.subplot(num_cells, num_cells, i+1)\n plt.title(train_labels[i])\n plt.tick_params(axis='both', which='both', bottom=False, left=False, labelbottom=False, labelleft=False)\n plt.imshow(pixels)",
"_____no_output_____"
],
[
"def sigmoid(x):\n return 1 / (1 + np.e ** -x)\n\nx = np.linspace(-10, 10, 50)\ny = sigmoid(x)\nplt.plot(x, y)\nplt.title('Sigmoid Function')\nplt.show()",
"_____no_output_____"
],
[
"num_samples, num_features = train_data.shape\nW = None\n\ntrain_data = (train_data - train_data.min()) / (train_data.max() - train_data.min())\nX = np.c_[np.ones(num_samples), train_data]\n\nnum_iters = 5000\nlr = 0.001\nlambda_ = 0.01\n\nstart = time.time()\n\n\n# train each label using one vs all\nfor label in range(10):\n w = np.random.rand(num_features+1)\n y = (train_labels == label).astype(float)\n for i in range(num_iters):\n diff = sigmoid(X @ w) - y\n for wi in range(len(w)):\n if wi == 0:\n t = diff\n reg_term = 0\n else:\n t = diff @ X[:, wi]\n reg_term = lambda_ * X[:, wi]\n \n w[wi] -= lr * np.sum(t - reg_term) / num_samples\n \n if (i + 1) % 1000 == 0:\n reg_term = lambda_ * np.sum(w[1:] @ w[1:].T) / num_samples\n t = sigmoid(X @ w)\n cost = np.sum(-y * np.log(t) - (1-y) * np.log(1-t)) / 2 / num_samples - reg_term\n print(f'label={label} iteration={i+1} cost={cost:.5f}')\n \n W = w if W is None else np.vstack((W, w))\n \nprint(f'Training Finished | Time Taken = {time.time() - start:.2f}s')",
"C:\\Users\\wweilue\\AppData\\Local\\Programs\\Python\\Python37\\lib\\site-packages\\ipykernel_launcher.py:33: RuntimeWarning: divide by zero encountered in log\nC:\\Users\\wweilue\\AppData\\Local\\Programs\\Python\\Python37\\lib\\site-packages\\ipykernel_launcher.py:33: RuntimeWarning: invalid value encountered in multiply\n"
],
[
"train_pred = np.argmax(X @ W.T, axis=1)\ntrain_acc = np.sum(train_pred == train_labels) / num_samples\nprint(f'Train Accuracy = {train_acc:.5f}')\n\ntest_X = np.c_[np.ones(len(test_data)), test_data]\ntest_pred = np.argmax(test_X @ W.T, axis=1)\ntest_acc = np.sum(test_pred == test_labels) / len(test_data)\nprint(f'Test Accuracy = {test_acc:.5f}')",
"_____no_output_____"
],
[
"num_im = 9\n\nnum_cells = math.ceil(math.sqrt(num_im))\nplt.figure(figsize=(10, 10))\n\nfor i in range(num_im):\n pixels = W[i][1:]\n size = math.ceil(math.sqrt(pixels.size))\n pixels = pixels.reshape(size, size)\n \n plt.subplot(num_cells, num_cells, i+1)\n plt.title(i)\n plt.tick_params(axis='both', which='both', bottom=False, left=False, labelbottom=False, labelleft=False)\n plt.imshow(pixels, cmap='Greys')",
"_____no_output_____"
],
[
"num_im = 64\n\nnum_cells = math.ceil(math.sqrt(num_im))\nplt.figure(figsize=(15, 15))\n\nfor i in range(num_im):\n label = test_labels[i]\n pixels = test_data[i]\n size = math.ceil(math.sqrt(pixels.size))\n pixels = pixels.reshape(size, size)\n x = np.concatenate(([1], test_data[i]))\n pred = np.argmax(x @ W.T)\n \n plt.subplot(num_cells, num_cells, i+1)\n plt.title(pred)\n plt.tick_params(axis='both', which='both', bottom=False, left=False, labelbottom=False, labelleft=False)\n plt.imshow(pixels, cmap='Greens' if pred == label else 'Reds')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7429f02d3b59b6c324f356e8aef52618b8957ee | 7,324 | ipynb | Jupyter Notebook | CH_16_machine_learning/T_07_scikit_lego.ipynb | mastering-python/code_2 | 441af8b67402c8216c482cca7c002e1d7f0f1baa | [
"MIT"
] | null | null | null | CH_16_machine_learning/T_07_scikit_lego.ipynb | mastering-python/code_2 | 441af8b67402c8216c482cca7c002e1d7f0f1baa | [
"MIT"
] | null | null | null | CH_16_machine_learning/T_07_scikit_lego.ipynb | mastering-python/code_2 | 441af8b67402c8216c482cca7c002e1d7f0f1baa | [
"MIT"
] | null | null | null | 109.313433 | 6,068 | 0.895003 | [
[
[
"import collections\n\nfrom matplotlib import pyplot as plt\nfrom sklego import datasets\n\nX, y = datasets.load_penguins(return_X_y=True)\ncounter = collections.Counter(y)\nplt.bar(counter.keys(), counter.values())\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code"
]
] |
e742b0ce8a7d14378bc1cbe810286bf5c3328020 | 53,848 | ipynb | Jupyter Notebook | lectures/intro_to_tensorflow/intro_to_tensorflow.ipynb | mohnkhan/deep-learning-nano-foundation | 286aa6594c660e8dc07b6bc71c00e115af55e25c | [
"MIT"
] | 419 | 2017-02-01T18:24:24.000Z | 2022-03-05T16:26:41.000Z | lectures/intro_to_tensorflow/intro_to_tensorflow.ipynb | mohnkhan/deep-learning-nano-foundation | 286aa6594c660e8dc07b6bc71c00e115af55e25c | [
"MIT"
] | 2 | 2017-03-16T07:44:10.000Z | 2018-05-10T16:37:15.000Z | lectures/intro_to_tensorflow/intro_to_tensorflow.ipynb | mohnkhan/deep-learning-nano-foundation | 286aa6594c660e8dc07b6bc71c00e115af55e25c | [
"MIT"
] | 185 | 2017-02-01T18:24:26.000Z | 2022-02-18T20:05:22.000Z | 69.213368 | 26,206 | 0.7591 | [
[
[
"<h1 align=\"center\">TensorFlow Neural Network Lab</h1>",
"_____no_output_____"
],
[
"<img src=\"image/notmnist.png\">\nIn this lab, you'll use all the tools you learned from *Introduction to TensorFlow* to label images of English letters! The data you are using, <a href=\"http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html\">notMNIST</a>, consists of images of a letter from A to J in different fonts.\n\nThe above images are a few examples of the data you'll be training on. After training the network, you will compare your prediction model against test data. Your goal, by the end of this lab, is to make predictions against that test set with at least an 80% accuracy. Let's jump in!",
"_____no_output_____"
],
[
"To start this lab, you first need to import all the necessary modules. Run the code below. If it runs successfully, it will print \"`All modules imported`\".",
"_____no_output_____"
]
],
[
[
"import hashlib\nimport os\nimport pickle\nfrom urllib.request import urlretrieve\n\nimport numpy as np\nfrom PIL import Image\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.utils import resample\nfrom tqdm import tqdm\nfrom zipfile import ZipFile\n\nprint('All modules imported.')",
"All modules imported.\n"
]
],
[
[
"The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).",
"_____no_output_____"
]
],
[
[
"def download(url, file):\n \"\"\"\n Download file from <url>\n :param url: URL to file\n :param file: Local file path\n \"\"\"\n if not os.path.isfile(file):\n print('Downloading ' + file + '...')\n urlretrieve(url, file)\n print('Download Finished')\n\n# Download the training and test dataset.\ndownload('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')\ndownload('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')\n\n# Make sure the files aren't corrupted\nassert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\\\n 'notMNIST_train.zip file is corrupted. Remove the file and try again.'\nassert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\\\n 'notMNIST_test.zip file is corrupted. Remove the file and try again.'\n\n# Wait until you see that all files have been downloaded.\nprint('All files downloaded.')",
"All files downloaded.\n"
],
[
"def uncompress_features_labels(file):\n \"\"\"\n Uncompress features and labels from a zip file\n :param file: The zip file to extract the data from\n \"\"\"\n features = []\n labels = []\n\n with ZipFile(file) as zipf:\n # Progress Bar\n filenames_pbar = tqdm(zipf.namelist(), unit='files')\n \n # Get features and labels from all files\n for filename in filenames_pbar:\n # Check if the file is a directory\n if not filename.endswith('/'):\n with zipf.open(filename) as image_file:\n image = Image.open(image_file)\n image.load()\n # Load image data as 1 dimensional array\n # We're using float32 to save on memory space\n feature = np.array(image, dtype=np.float32).flatten()\n\n # Get the the letter from the filename. This is the letter of the image.\n label = os.path.split(filename)[1][0]\n\n features.append(feature)\n labels.append(label)\n return np.array(features), np.array(labels)\n\n# Get the features and labels from the zip files\ntrain_features, train_labels = uncompress_features_labels('notMNIST_train.zip')\ntest_features, test_labels = uncompress_features_labels('notMNIST_test.zip')\n\n# Limit the amount of data to work with a docker container\ndocker_size_limit = 150000\ntrain_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)\n\n# Set flags for feature engineering. This will prevent you from skipping an important step.\nis_features_normal = False\nis_labels_encod = False\n\n# Wait until you see that all features and labels have been uncompressed.\nprint('All features and labels uncompressed.')",
"100%|██████████| 210001/210001 [00:43<00:00, 4773.45files/s]\n100%|██████████| 10001/10001 [00:02<00:00, 4577.99files/s]\n"
]
],
[
[
"<img src=\"image/Mean Variance - Image.png\" style=\"height: 75%;width: 75%; position: relative; right: 5%\">\n## Problem 1\nThe first problem involves normalizing the features for your training and test data.\n\nImplement Min-Max scaling in the `normalize_grayscale()` function to a range of `a=0.1` and `b=0.9`. After scaling, the values of the pixels in the input data should range from 0.1 to 0.9.\n\nSince the raw notMNIST image data is in [grayscale](https://en.wikipedia.org/wiki/Grayscale), the current values range from a min of 0 to a max of 255.\n\nMin-Max Scaling:\n$\nX'=a+{\\frac {\\left(X-X_{\\min }\\right)\\left(b-a\\right)}{X_{\\max }-X_{\\min }}}\n$\n\n*If you're having trouble solving problem 1, you can view the solution [here](https://github.com/udacity/deep-learning/blob/master/intro-to-tensorFlow/intro_to_tensorflow_solution.ipynb).*",
"_____no_output_____"
]
],
[
[
"# Problem 1 - Implement Min-Max scaling for grayscale image data\ndef normalize_grayscale(image_data):\n \"\"\"\n Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]\n :param image_data: The image data to be normalized\n :return: Normalized image data\n \"\"\"\n a = 0.1\n b = 0.9\n X_min = 0\n X_max = 255\n return a + (((image_data - X_min)*(b - a)) / (X_max - X_min))\n\n# Test Cases\nnp.testing.assert_array_almost_equal(\n normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),\n [0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,\n 0.125098039216, 0.128235294118, 0.13137254902, 0.9],\n decimal=3)\nnp.testing.assert_array_almost_equal(\n normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),\n [0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,\n 0.896862745098, 0.9])\n\nif not is_features_normal:\n train_features = normalize_grayscale(train_features)\n test_features = normalize_grayscale(test_features)\n is_features_normal = True\n\nprint('Tests Passed!')",
"Tests Passed!\n"
],
[
"if not is_labels_encod:\n # Turn labels into numbers and apply One-Hot Encoding\n encoder = LabelBinarizer()\n encoder.fit(train_labels)\n train_labels = encoder.transform(train_labels)\n test_labels = encoder.transform(test_labels)\n\n # Change to float32, so it can be multiplied against the features in TensorFlow, which are float32\n train_labels = train_labels.astype(np.float32)\n test_labels = test_labels.astype(np.float32)\n is_labels_encod = True\n\nprint('Labels One-Hot Encoded')",
"Labels One-Hot Encoded\n"
],
[
"assert is_features_normal, 'You skipped the step to normalize the features'\nassert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'\n\n# Get randomized datasets for training and validation\ntrain_features, valid_features, train_labels, valid_labels = train_test_split(\n train_features,\n train_labels,\n test_size=0.05,\n random_state=832289)\n\nprint('Training features and labels randomized and split.')",
"Training features and labels randomized and split.\n"
],
[
"# Save the data for easy access\npickle_file = 'notMNIST.pickle'\nif not os.path.isfile(pickle_file):\n print('Saving data to pickle file...')\n try:\n with open('notMNIST.pickle', 'wb') as pfile:\n pickle.dump(\n {\n 'train_dataset': train_features,\n 'train_labels': train_labels,\n 'valid_dataset': valid_features,\n 'valid_labels': valid_labels,\n 'test_dataset': test_features,\n 'test_labels': test_labels,\n },\n pfile, pickle.HIGHEST_PROTOCOL)\n except Exception as e:\n print('Unable to save data to', pickle_file, ':', e)\n raise\n\nprint('Data cached in pickle file.')",
"Saving data to pickle file...\nData cached in pickle file.\n"
]
],
[
[
"# Checkpoint\nAll your progress is now saved to the pickle file. If you need to leave and comeback to this lab, you no longer have to start from the beginning. Just run the code block below and it will load all the data and modules required to proceed.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n\n# Load the modules\nimport pickle\nimport math\n\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\n# Reload the data\npickle_file = 'notMNIST.pickle'\nwith open(pickle_file, 'rb') as f:\n pickle_data = pickle.load(f)\n train_features = pickle_data['train_dataset']\n train_labels = pickle_data['train_labels']\n valid_features = pickle_data['valid_dataset']\n valid_labels = pickle_data['valid_labels']\n test_features = pickle_data['test_dataset']\n test_labels = pickle_data['test_labels']\n del pickle_data # Free up memory\n\nprint('Data and modules loaded.')",
"Data and modules loaded.\n"
]
],
[
[
"\n## Problem 2\n\nNow it's time to build a simple neural network using TensorFlow. Here, your network will be just an input layer and an output layer.\n\n<img src=\"image/network_diagram.png\" style=\"height: 40%;width: 40%; position: relative; right: 10%\">\n\nFor the input here the images have been flattened into a vector of $28 \\times 28 = 784$ features. Then, we're trying to predict the image digit so there are 10 output units, one for each label. Of course, feel free to add hidden layers if you want, but this notebook is built to guide you through a single layer network. \n\nFor the neural network to train on your data, you need the following <a href=\"https://www.tensorflow.org/resources/dims_types.html#data-types\">float32</a> tensors:\n - `features`\n - Placeholder tensor for feature data (`train_features`/`valid_features`/`test_features`)\n - `labels`\n - Placeholder tensor for label data (`train_labels`/`valid_labels`/`test_labels`)\n - `weights`\n - Variable Tensor with random numbers from a truncated normal distribution.\n - See <a href=\"https://www.tensorflow.org/api_docs/python/constant_op.html#truncated_normal\">`tf.truncated_normal()` documentation</a> for help.\n - `biases`\n - Variable Tensor with all zeros.\n - See <a href=\"https://www.tensorflow.org/api_docs/python/constant_op.html#zeros\"> `tf.zeros()` documentation</a> for help.\n\n*If you're having trouble solving problem 2, review \"TensorFlow Linear Function\" section of the class. If that doesn't help, the solution for this problem is available [here](intro_to_tensorflow_solution.ipynb).*",
"_____no_output_____"
]
],
[
[
"# All the pixels in the image (28 * 28 = 784)\nfeatures_count = 784\n# All the labels\nlabels_count = 10\n\n# Set the features and labels tensors\nfeatures = tf.placeholder(tf.float32)\nlabels = tf.placeholder(tf.float32)\n\n# Set the weights and biases tensors\nweights = tf.Variable(tf.truncated_normal((features_count, labels_count)))\nbiases = tf.Variable(tf.zeros(labels_count))\n\n# Test Cases\nfrom tensorflow.python.ops.variables import Variable\n\nassert features._op.name.startswith('Placeholder'), 'features must be a placeholder'\nassert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'\nassert isinstance(weights, Variable), 'weights must be a TensorFlow variable'\nassert isinstance(biases, Variable), 'biases must be a TensorFlow variable'\n\nassert features._shape == None or (\\\n features._shape.dims[0].value is None and\\\n features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'\nassert labels._shape == None or (\\\n labels._shape.dims[0].value is None and\\\n labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'\nassert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'\nassert biases._variable._shape == (10), 'The shape of biases is incorrect'\n\nassert features._dtype == tf.float32, 'features must be type float32'\nassert labels._dtype == tf.float32, 'labels must be type float32'\n\n# Feed dicts for training, validation, and test session\ntrain_feed_dict = {features: train_features, labels: train_labels}\nvalid_feed_dict = {features: valid_features, labels: valid_labels}\ntest_feed_dict = {features: test_features, labels: test_labels}\n\n# Linear Function WX + b\nlogits = tf.matmul(features, weights) + biases\n\nprediction = tf.nn.softmax(logits)\n\n# Cross entropy\ncross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)\n\n# Training loss\nloss = tf.reduce_mean(cross_entropy)\n\n# Create an operation that initializes all variables\ninit = tf.global_variables_initializer()\n\n# Test Cases\nwith tf.Session() as session:\n session.run(init)\n session.run(loss, feed_dict=train_feed_dict)\n session.run(loss, feed_dict=valid_feed_dict)\n session.run(loss, feed_dict=test_feed_dict)\n biases_data = session.run(biases)\n\nassert not np.count_nonzero(biases_data), 'biases must be zeros'\n\nprint('Tests Passed!')",
"Tests Passed!\n"
],
[
"# Determine if the predictions are correct\nis_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))\n# Calculate the accuracy of the predictions\naccuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))\n\nprint('Accuracy function created.')",
"Accuracy function created.\n"
]
],
[
[
"<img src=\"image/Learn Rate Tune - Image.png\" style=\"height: 70%;width: 70%\">\n## Problem 3\nBelow are 2 parameter configurations for training the neural network. In each configuration, one of the parameters has multiple options. For each configuration, choose the option that gives the best acccuracy.\n\nParameter configurations:\n\nConfiguration 1\n* **Epochs:** 1\n* **Learning Rate:**\n * 0.8\n * 0.5\n * 0.1\n * 0.05\n * 0.01\n\nConfiguration 2\n* **Epochs:**\n * 1\n * 2\n * 3\n * 4\n * 5\n* **Learning Rate:** 0.2\n\nThe code will print out a Loss and Accuracy graph, so you can see how well the neural network performed.\n\n*If you're having trouble solving problem 3, you can view the solution [here](intro_to_tensorflow_solution.ipynb).*",
"_____no_output_____"
]
],
[
[
"# Change if you have memory restrictions\nbatch_size = 128\n\n# epochs = 1\n# learning_rate = 0.1\nepochs = 5\nlearning_rate = 0.2\n\n# Gradient Descent\noptimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) \n\n# The accuracy measured against the validation set\nvalidation_accuracy = 0.0\n\n# Measurements use for graphing loss and accuracy\nlog_batch_step = 50\nbatches = []\nloss_batch = []\ntrain_acc_batch = []\nvalid_acc_batch = []\n\nwith tf.Session() as session:\n session.run(init)\n batch_count = int(math.ceil(len(train_features)/batch_size))\n\n for epoch_i in range(epochs):\n \n # Progress bar\n batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')\n \n # The training cycle\n for batch_i in batches_pbar:\n # Get a batch of training features and labels\n batch_start = batch_i * batch_size\n batch_features = train_features[batch_start:batch_start + batch_size]\n batch_labels = train_labels[batch_start:batch_start + batch_size]\n\n # Run optimizer and get loss\n _, l = session.run(\n [optimizer, loss],\n feed_dict={features: batch_features, labels: batch_labels})\n\n # Log every 50 batches\n if not batch_i % log_batch_step:\n # Calculate Training and Validation accuracy\n training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)\n validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)\n\n # Log batches\n previous_batch = batches[-1] if batches else 0\n batches.append(log_batch_step + previous_batch)\n loss_batch.append(l)\n train_acc_batch.append(training_accuracy)\n valid_acc_batch.append(validation_accuracy)\n\n # Check accuracy against Validation data\n validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)\n\nloss_plot = plt.subplot(211)\nloss_plot.set_title('Loss')\nloss_plot.plot(batches, loss_batch, 'g')\nloss_plot.set_xlim([batches[0], batches[-1]])\nacc_plot = plt.subplot(212)\nacc_plot.set_title('Accuracy')\nacc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')\nacc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')\nacc_plot.set_ylim([0, 1.0])\nacc_plot.set_xlim([batches[0], batches[-1]])\nacc_plot.legend(loc=4)\nplt.tight_layout()\nplt.show()\n\nprint('Validation accuracy at {}'.format(validation_accuracy))",
"Epoch 1/5: 100%|██████████| 1114/1114 [00:15<00:00, 71.51batches/s]\nEpoch 2/5: 100%|██████████| 1114/1114 [00:17<00:00, 64.98batches/s]\nEpoch 3/5: 100%|██████████| 1114/1114 [00:15<00:00, 70.32batches/s]\nEpoch 4/5: 100%|██████████| 1114/1114 [00:16<00:00, 69.16batches/s]\nEpoch 5/5: 100%|██████████| 1114/1114 [00:14<00:00, 76.61batches/s]\n"
]
],
[
[
"## Test\nYou're going to test your model against your hold out dataset/testing data. This will give you a good indicator of how well the model will do in the real world. You should have a test accuracy of at least 80%.",
"_____no_output_____"
]
],
[
[
"# The accuracy measured against the test set\ntest_accuracy = 0.0\n\nwith tf.Session() as session:\n \n session.run(init)\n batch_count = int(math.ceil(len(train_features)/batch_size))\n\n for epoch_i in range(epochs):\n \n # Progress bar\n batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')\n \n # The training cycle\n for batch_i in batches_pbar:\n # Get a batch of training features and labels\n batch_start = batch_i * batch_size\n batch_features = train_features[batch_start:batch_start + batch_size]\n batch_labels = train_labels[batch_start:batch_start + batch_size]\n\n # Run optimizer\n _ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})\n\n # Check accuracy against Test data\n test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)\n\n\nassert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)\nprint('Nice Job! Test Accuracy is {}'.format(test_accuracy))",
"Epoch 1/5: 100%|██████████| 1114/1114 [00:01<00:00, 857.59batches/s]\nEpoch 2/5: 100%|██████████| 1114/1114 [00:01<00:00, 859.69batches/s]\nEpoch 3/5: 100%|██████████| 1114/1114 [00:01<00:00, 861.03batches/s]\nEpoch 4/5: 100%|██████████| 1114/1114 [00:01<00:00, 860.91batches/s]\nEpoch 5/5: 100%|██████████| 1114/1114 [00:01<00:00, 833.97batches/s]"
]
],
[
[
"# Multiple layers\nGood job! You built a one layer TensorFlow network! However, you might want to build more than one layer. This is deep learning after all! In the next section, you will start to satisfy your need for more layers.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e742b395e18777d06a0c42eaea3ebbc515521c30 | 569,711 | ipynb | Jupyter Notebook | assignments/ndvi_subscene_assignment-PA_long_way.ipynb | Pearl-Ayem/ATSC_Course_Work | c075d166c235ac4e68a4b77750e02b2a5e77abd0 | [
"MIT"
] | null | null | null | assignments/ndvi_subscene_assignment-PA_long_way.ipynb | Pearl-Ayem/ATSC_Course_Work | c075d166c235ac4e68a4b77750e02b2a5e77abd0 | [
"MIT"
] | null | null | null | assignments/ndvi_subscene_assignment-PA_long_way.ipynb | Pearl-Ayem/ATSC_Course_Work | c075d166c235ac4e68a4b77750e02b2a5e77abd0 | [
"MIT"
] | null | null | null | 1,700.629851 | 551,556 | 0.963034 | [
[
[
"<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Introduction\" data-toc-modified-id=\"Introduction-1\"><span class=\"toc-item-num\">1 </span>Introduction</a></span></li><li><span><a href=\"#Get-bands-3,-4,-5-fullsize-(green,-red,-near-ir)\" data-toc-modified-id=\"Get-bands-3,-4,-5-fullsize-(green,-red,-near-ir)-2\"><span class=\"toc-item-num\">2 </span>Get bands 3, 4, 5 fullsize (green, red, near-ir)</a></span></li><li><span><a href=\"#This-cell-reads-in-your-affine-transform,-metadata-and-profile\" data-toc-modified-id=\"This-cell-reads-in-your-affine-transform,-metadata-and-profile-3\"><span class=\"toc-item-num\">3 </span>This cell reads in your affine transform, metadata and profile</a></span></li><li><span><a href=\"#This-cell-gets-the-right-reflection-function-for-your-satellite\" data-toc-modified-id=\"This-cell-gets-the-right-reflection-function-for-your-satellite-4\"><span class=\"toc-item-num\">4 </span>This cell gets the right reflection function for your satellite</a></span></li><li><span><a href=\"#Read-only-the-window-pixels-from-the-band-3,-4-files\" data-toc-modified-id=\"Read-only-the-window-pixels-from-the-band-3,-4-files-5\"><span class=\"toc-item-num\">5 </span>Read only the window pixels from the band 3, 4 files</a></span></li><li><span><a href=\"#In-the-next-cell-plot-a-mapped-ndvi-image-with-a-red-dot-in-your-ul-corner-and-a-white-dot-in-your-lr-corner\" data-toc-modified-id=\"In-the-next-cell-plot-a-mapped-ndvi-image-with-a-red-dot-in-your-ul-corner-and-a-white-dot-in-your-lr-corner-6\"><span class=\"toc-item-num\">6 </span>In the next cell plot a mapped ndvi image with a red dot in your ul corner and a white dot in your lr corner</a></span></li></ul></div>",
"_____no_output_____"
],
[
"# Introduction\n\nThere are 4 cells that ask for changes below, the rest should run as long as you\nuse the variable names I ask for in the questions.",
"_____no_output_____"
]
],
[
[
"import rasterio\nimport a301\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib.colors import Normalize\nfrom a301.landsat.landsat_metadata import landsat_metadata\nimport cartopy\nfrom rasterio import windows\nfrom pyproj import transform as proj_transform\nfrom pyproj import Proj\nfrom a301.landsat.toa_reflectance import calc_reflc_8\nimport pprint\nfrom a301.utils.data_read import download\nfrom pathlib import Path\nfrom affine import Affine\nfrom IPython.display import Image\nfrom a301.landsat.toa_reflectance import calc_refl_457, calc_reflc_8\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable",
"_____no_output_____"
]
],
[
[
"# Get bands 3, 4, 5 fullsize (green, red, near-ir)\n\nAt the end of this cell you shiould have the following path objects for your spring scene:\n\nmeta_bigfile, band3_bigfile, band4_bigfile, band5_bigfile\n\nthat point to your landsat TIF and mtl.txt files.",
"_____no_output_____"
]
],
[
[
"filenames=[\"LC08_L1TP_190031_20170528_20170615_01_T1_B3.TIF\",\n \"LC08_L1TP_190031_20170528_20170615_01_T1_B4.TIF\",\n \"LC08_L1TP_190031_20170528_20170615_01_T1_B5.TIF\",\n \"LC08_L1TP_190031_20170528_20170615_01_T1_MTL.txt\"]\ndest_folder=a301.data_dir / Path(\"landsat8/italy\")\n\nband3_bigfile=list(dest_folder.glob(\"*_B3.TIF\"))[0]\nband4_bigfile=list(dest_folder.glob(\"*_B4.TIF\"))[0]\nband5_bigfile=list(dest_folder.glob(\"*_B5.TIF\"))[0]\nmeta_bigfile=list(dest_folder.glob(\"*MTL.txt\"))[0]",
"_____no_output_____"
]
],
[
[
"# This cell reads in your affine transform, metadata and profile\n\nUsing band4_bigfile (arbitrary)",
"_____no_output_____"
]
],
[
[
"metadata=landsat_metadata(meta_bigfile)\nwith rasterio.open(str(band4_bigfile)) as raster:\n big_transform=raster.affine\n big_profile=raster.profile\n\nzone = metadata.UTM_ZONE \ncrs = cartopy.crs.UTM(zone, southern_hemisphere=False)\np_utm=Proj(crs.proj4_init)\np_lonlat=Proj(proj='latlong',datum='WGS84')",
"Scene LC81900312017148LGN00 center time is 2017-05-28 09:46:46\n"
]
],
[
[
"# This cell gets the right reflection function for your satellite",
"_____no_output_____"
]
],
[
[
"refl_dict={'LANDSAT_7':calc_refl_457,'LANDSAT_8':calc_reflc_8} \nsatellite=metadata.SPACECRAFT_ID\nrefl_fun=refl_dict[satellite]",
"_____no_output_____"
]
],
[
[
"# Define a subscene window and a transform\n\nIn the cell below, get the upper left col,row (ul_col,ul_row) and upper left and lower\nright x,y (ul_x,ul_y,lr_x,lr_y)\ncoordinates the upper left corner of \nyour subscene as in the image_zoom notebook. Use ul_col, ul_row, ul_x, ul_y plus your subscene\nwidth and height to make a rasterio window and new transform.\n\n window=Window(ul_col, ul_row, small_width, small_height)\n new_affine=Affine(30.,0.,ul_x,0.,-30.,ul_y)\n extent = [ul_x,lr_x,lr_y,ul_y]\n",
"_____no_output_____"
]
],
[
[
"italy_lon = 13.66477\nitaly_lat = 41.75983\nitaly_x, italy_y =proj_transform(p_lonlat,p_utm,italy_lon, italy_lat) \n\n\nfull_ul_xy=np.array(big_transform*(0,0))\nprint(f\"orig ul corner x,y (km)={full_ul_xy*1.e-3}\")\n\nul_col, ul_row = ~big_transform*(italy_x,italy_y)\nul_col, ul_row = int(ul_col), int(ul_row)\n\nl_col_offset= -1300\nr_col_offset= +2000\nb_row_offset= +2600\nt_row_offset= -100\ncol_slice=slice(ul_col+l_col_offset,ul_col+r_col_offset)\nrow_slice=slice(ul_row + t_row_offset, ul_row + b_row_offset)\nitaly_ul_xy = big_transform*(col_slice.start,row_slice.start)\nitaly_lr_xy = big_transform*(col_slice.stop,row_slice.stop)\n\nsmall_height, small_width = 2700,3300\nul_x, ul_y = italy_ul_xy[0], italy_ul_xy[1]\n\n# window=Window(ul_col, ul_row, small_width, small_height)\nnew_affine=Affine(30.,0.,ul_x,0.,-30.,ul_y)\nimage_extent=[italy_ul_xy[0],italy_lr_xy[0],italy_ul_xy[1],italy_lr_xy[1]]",
"orig ul corner x,y (km)=[ 271.185 4742.715]\n"
]
],
[
[
"# Read only the window pixels from the band 3, 4, 5 files",
"_____no_output_____"
]
],
[
[
"from a301.landsat.toa_reflectance import toa_reflectance_8\nrefl_vals=toa_reflectance_8([3,4,5],meta_bigfile)\nrefl_dict=dict()\nfor bandnum,filepath in zip([3,4,5],[band3_bigfile,band4_bigfile,band5_bigfile]):\n with rasterio.open(str(filepath)) as src:\n refl_dict[bandnum]=refl_vals",
"Scene LC81900312017148LGN00 center time is 2017-05-28 09:46:46\n"
]
],
[
[
"# In the next cell calculate your ndvi\n\nSave it in a variable called ndvi",
"_____no_output_____"
]
],
[
[
"# YOUR CODE HERE\nndvi = (refl_vals[5] - refl_vals[4])/(refl_vals[5] + refl_vals[4])",
"_____no_output_____"
],
[
"plt.hist(ndvi[~np.isnan(ndvi)].flat);\nplt.title('spring ndvi')\nplt.savefig('spring_ndvi.png')",
"_____no_output_____"
]
],
[
[
"# In the next cell plot a mapped ndvi image with a red dot in your ul corner and a white dot in your lr corner\n\nAdjust this plot to fit your image. Just delete the bottom line and work with the provided commands",
"_____no_output_____"
]
],
[
[
"vmin=0.0\nvmax=0.8\nthe_norm=Normalize(vmin=vmin,vmax=vmax,clip=False)\npalette='viridis'\npal = plt.get_cmap(palette)\npal.set_bad('0.75') #75% grey for out-of-map cells\npal.set_over('w') #color cells > vmax red\npal.set_under('k') #color cells < vmin black\nfig, ax = plt.subplots(1, 1,figsize=[10,15],\n subplot_kw={'projection': crs})\ncol=ax.imshow(ndvi,origin=\"upper\",\n extent=image_extent,transform=crs)\nax.plot(italy_ul_xy[0],italy_ul_xy[1],'wo',markersize=50)\nax.plot(italy_lr_xy[0],italy_lr_xy[1],'ro',markersize=50)\nax.set(title=\"spring ndvi\")\ncbar_ax = fig.add_axes([0.95, 0.2, 0.05, 0.6])\ncbar=ax.figure.colorbar(col,extend='both',cax=cbar_ax,orientation='vertical')\ncbar.set_label('ndvi index')\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e742d1ba4d861209ec25d4e12fa3afc827eae75b | 24,415 | ipynb | Jupyter Notebook | notebooks/boto3.ipynb | nhm-usgs/S2S_Explore | b80968744dac56732cf12bb61843d427fe825920 | [
"MIT"
] | null | null | null | notebooks/boto3.ipynb | nhm-usgs/S2S_Explore | b80968744dac56732cf12bb61843d427fe825920 | [
"MIT"
] | null | null | null | notebooks/boto3.ipynb | nhm-usgs/S2S_Explore | b80968744dac56732cf12bb61843d427fe825920 | [
"MIT"
] | null | null | null | 262.526882 | 1,668 | 0.710178 | [
[
[
"import boto3",
"_____no_output_____"
],
[
"BUCKETNAME = 'noaa-gefs-retrospective'\nOBJECT_NAME = 'GEFSv12/reforecast/2000/2000010100/c00/Days:1-10/tmin_2m_2000010100_c00.grib2'\nFILE_NAME = 'tmin_2m_2000010100_c00.grib2'",
"_____no_output_____"
],
[
"s3 = boto3.client('s3')\ns3.download_file('BUCKET_NAME', 'OBJECT_NAME', 'FILE_NAME')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
e742d5558c24845b81a3e919fe1ca525ae486ecb | 2,317 | ipynb | Jupyter Notebook | cs224w/chapter15-3.ipynb | kidrabit/Data-Visualization-Lab-RND | baa19ee4e9f3422a052794e50791495632290b36 | [
"Apache-2.0"
] | 1 | 2022-01-18T01:53:34.000Z | 2022-01-18T01:53:34.000Z | cs224w/chapter15-3.ipynb | kidrabit/Data-Visualization-Lab-RND | baa19ee4e9f3422a052794e50791495632290b36 | [
"Apache-2.0"
] | null | null | null | cs224w/chapter15-3.ipynb | kidrabit/Data-Visualization-Lab-RND | baa19ee4e9f3422a052794e50791495632290b36 | [
"Apache-2.0"
] | null | null | null | 28.604938 | 162 | 0.572292 | [
[
[
"## Speeding-up Hill-Climbing Lazy Evaluations\n - 위에서 너무 느리다는 단점이 있었다.\n - 이를 극복하기 위해 사용하는 방법을 소개\n \n ### Speeding up Hill- Climbing\n - <img src = \"attachment:image.png\" width = 40%>\n - 기존의 방법은 i 번째 라운드에서 센서 집합 S가 있을 때, marginal gain을 최대화하는 센서로 다음 센서를 골랐다.\n - 하지만 여기서 submodularity property를 다시 살펴보면 A가 B의 subset일 때 A의 margin gain은 항상 크다는 공식이 있다. 그러니까 round가 흐를 수m센서 u를 뭘 고르든 marginal benefit은 항상 작아진다는 점에 주목하자\n - <img src = \"attachment:image-2.png\" width = 40%>\n - 이진 탐색에서 사용되는 upper bound 원리를 사용\n - 먼저 S1 = {a}인 상태에서 gain을 구했고, 나머지에서 탐색을 할 차례이다. 남은 것들 중에서 내림차순으로 sort하자.\n - 그리고 첫 째 노드의 margin을 구한다.\n - d노드까지 S에 포함시킬 때 실제 효용량 다른 노드들의 gain보다 작아졌다. 이러면 알고리즘의 해가 아니게 되었으니 지금 구한 gain을 기준으로 다시 sort하자. 이번엔 b가 제일 위에 올라왔다.\n - gain을 구해보니 b의 값이 다른 노드들과 비교했을 때 가장 컸다. 아까 말한 upper-bound를 따져보면 분명 gain은 무조건 감소하는 값이다. 따라서 다른 후보군 중에서 b보다 gain이 클 경우는 아예 없기 때문에 b를 채택하여 S에 추가한다. \n - 위의 과정을 반복하면 계산량이 줄어든다.",
"_____no_output_____"
],
[
"## Data Dependent Bound on the Solution Quality\nGreedy 알고리즘의 solution quality를 얘기할 차례이다. 이 알고리즘은 항상 정답이 아닌 근사치를 제공하곤 한다. submodular의 경우 (1-1/e) bound가 존재한다. ",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"증명에 따라서 f(OPT)는 항상 f(S) + 델타 i들의 합이다. 포인트는 우항의 값들은 실제로 우리가 구할 수 있으니까 최적해는 무조건 이 값보다 같거나 작을 것이라고 추론할 수 있다.",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e742d99c928b48e173bc0e74c5ffa6d1e24edd08 | 438,180 | ipynb | Jupyter Notebook | content/ch-states/representing-qubit-states.ipynb | achieveordie/qiskit-textbook | ff380913b413b9095e51502653b32457a2fbd875 | [
"Apache-2.0"
] | null | null | null | content/ch-states/representing-qubit-states.ipynb | achieveordie/qiskit-textbook | ff380913b413b9095e51502653b32457a2fbd875 | [
"Apache-2.0"
] | null | null | null | content/ch-states/representing-qubit-states.ipynb | achieveordie/qiskit-textbook | ff380913b413b9095e51502653b32457a2fbd875 | [
"Apache-2.0"
] | null | null | null | 49.714091 | 81,303 | 0.607618 | [
[
[
"# Representing Qubit States",
"_____no_output_____"
],
[
"You now know something about bits, and about how our familiar digital computers work. All the complex variables, objects and data structures used in modern software are basically all just big piles of bits. Those of us who work on quantum computing call these *classical variables.* The computers that use them, like the one you are using to read this article, we call *classical computers*.\n\nIn quantum computers, our basic variable is the _qubit:_ a quantum variant of the bit. These have exactly the same restrictions as normal bits do: they can store only a single binary piece of information, and can only ever give us an output of `0` or `1`. However, they can also be manipulated in ways that can only be described by quantum mechanics. This gives us new gates to play with, allowing us to find new ways to design algorithms.\n\nTo fully understand these new gates, we first need understand how to write down qubit states. For this we will use the mathematics of vectors, matrices and complex numbers. Though we will introduce these concepts as we go, it would be best if you are comfortable with them already. If you need a more in-depth explanation or refresher, you can find a guide [here](../ch-prerequisites/linear_algebra.html).\n\n\n\n\n## Contents\n\n1. [Classical vs Quantum Bits](#cvsq) \n 1.1 [Statevectors](#statevectors) \n 1.2 [Qubit Notation](#notation) \n 1.3 [Exploring Qubits with Qiskit](#exploring-qubits) \n2. [The Rules of Measurement](#rules-measurement) \n 2.1 [A Very Important Rule](#important-rule) \n 2.2 [The Implications of this Rule](#implications)\n3. [The Bloch Sphere](#bloch-sphere) \n 3.1 [Describing the Restricted Qubit State](#bloch-sphere-1) \n 3.2 [Visually Representing a Qubit State](#bloch-sphere-2) \n",
"_____no_output_____"
],
[
"## 1. Classical vs Quantum Bits <a id=\"cvsq\"></a>\n\n### 1.1 Statevectors<a id=\"statevectors\"></a>\n\nIn quantum physics we use _statevectors_ to describe the state of our system. Say we wanted to describe the position of a car along a track, this is a classical system so we could use a number $x$:\n\n\n\n$$ x=4 $$\n\nAlternatively, we could instead use a collection of numbers in a vector called a _statevector._ Each element in the statevector contains the probability of finding the car in a certain place:\n\n\n\n$$\n|x\\rangle = \\begin{bmatrix} 0\\\\ \\vdots \\\\ 0 \\\\ 1 \\\\ 0 \\\\ \\vdots \\\\ 0 \\end{bmatrix} \n \\begin{matrix} \\\\ \\\\ \\\\ \\leftarrow \\\\ \\\\ \\\\ \\\\ \\end{matrix}\n \\begin{matrix} \\\\ \\\\ \\text{Probability of} \\\\ \\text{car being at} \\\\ \\text{position 4} \\\\ \\\\ \\\\ \\end{matrix} \n$$\n\nThis isn’t limited to position, we could also keep a statevector of all the possible speeds the car could have, and all the possible colours the car could be. With classical systems (like the car example above), this is a silly thing to do as it requires keeping huge vectors when we only really need one number. But as we will see in this chapter, statevectors happen to be a very good way of keeping track of quantum systems, including quantum computers.\n\n\n### 1.2 Qubit Notation <a id=\"notation\"></a>\n\nClassical bits always have a completely well-defined state: they are either `0` or `1` at every point during a computation. There is no more detail we can add to the state of a bit than this. So to write down the state of a of classical bit (`c`), we can just use these two binary values. For example:\n\n c = 0\n\nThis restriction is lifted for quantum bits. Whether we get a `0` or a `1` from a qubit only needs to be well-defined when a measurement is made to extract an output. At that point, it must commit to one of these two options. At all other times, its state will be something more complex than can be captured by a simple binary value.\n\nTo see how to describe these, we can first focus on the two simplest cases. As we saw in the last section, it is possible to prepare a qubit in a state for which it definitely gives the outcome `0` when measured.\n\nWe need a name for this state. Let's be unimaginative and call it $0$ . Similarly, there exists a qubit state that is certain to output a `1`. We'll call this $1$. These two states are completely mutually exclusive. Either the qubit definitely outputs a ```0```, or it definitely outputs a ```1```. There is no overlap. One way to represent this with mathematics is to use two orthogonal vectors.\n\n$$\n|0\\rangle = \\begin{bmatrix} 1 \\\\ 0 \\end{bmatrix} \\, \\, \\, \\, |1\\rangle =\\begin{bmatrix} 0 \\\\ 1 \\end{bmatrix}.\n$$\n\nThis is a lot of notation to take in all at once. First, let's unpack the weird $|$ and $\\rangle$. Their job is essentially just to remind us that we are talking about the vectors that represent qubit states labelled $0$ and $1$. This helps us distinguish them from things like the bit values ```0``` and ```1``` or the numbers 0 and 1. It is part of the bra-ket notation, introduced by Dirac.\n\nIf you are not familiar with vectors, you can essentially just think of them as lists of numbers which we manipulate using certain rules. If you are familiar with vectors from your high school physics classes, you'll know that these rules make vectors well-suited for describing quantities with a magnitude and a direction. For example, the velocity of an object is described perfectly with a vector. However, the way we use vectors for quantum states is slightly different to this, so don't hold on too hard to your previous intuition. It's time to do something new!\n\nWith vectors we can describe more complex states than just $|0\\rangle$ and $|1\\rangle$. For example, consider the vector\n\n$$\n|q_0\\rangle = \\begin{bmatrix} \\tfrac{1}{\\sqrt{2}} \\\\ \\tfrac{i}{\\sqrt{2}} \\end{bmatrix} .\n$$\n\nTo understand what this state means, we'll need to use the mathematical rules for manipulating vectors. Specifically, we'll need to understand how to add vectors together and how to multiply them by scalars.\n\n<p>\n <details>\n <summary>Reminder: Matrix Addition and Multiplication by Scalars (Click here to expand)</summary>\n <p>To add two vectors, we add their elements together:\n $$|a\\rangle = \\begin{bmatrix}a_0 \\\\ a_1 \\\\ \\vdots \\\\ a_n \\end{bmatrix}, \\quad\n |b\\rangle = \\begin{bmatrix}b_0 \\\\ b_1 \\\\ \\vdots \\\\ b_n \\end{bmatrix}$$\n $$|a\\rangle + |b\\rangle = \\begin{bmatrix}a_0 + b_0 \\\\ a_1 + b_1 \\\\ \\vdots \\\\ a_n + b_n \\end{bmatrix} $$\n </p>\n <p>And to multiply a vector by a scalar, we multiply each element by the scalar:\n $$x|a\\rangle = \\begin{bmatrix}x \\times a_0 \\\\ x \\times a_1 \\\\ \\vdots \\\\ x \\times a_n \\end{bmatrix}$$\n </p>\n <p>These two rules are used to rewrite the vector $|q_0\\rangle$ (as shown above):\n $$\n \\begin{aligned} \n |q_0\\rangle & = \\tfrac{1}{\\sqrt{2}}|0\\rangle + \\tfrac{i}{\\sqrt{2}}|1\\rangle \\\\\n & = \\tfrac{1}{\\sqrt{2}}\\begin{bmatrix}1\\\\0\\end{bmatrix} + \\tfrac{i}{\\sqrt{2}}\\begin{bmatrix}0\\\\1\\end{bmatrix}\\\\\n & = \\begin{bmatrix}\\tfrac{1}{\\sqrt{2}}\\\\0\\end{bmatrix} + \\begin{bmatrix}0\\\\\\tfrac{i}{\\sqrt{2}}\\end{bmatrix}\\\\\n & = \\begin{bmatrix}\\tfrac{1}{\\sqrt{2}} \\\\ \\tfrac{i}{\\sqrt{2}} \\end{bmatrix}\\\\\n \\end{aligned}\n $$\n </details>\n</p>\n<p>\n <details>\n <summary>Reminder: Orthonormal Bases (Click here to expand)</summary>\n <p>\n It was stated before that the two vectors $|0\\rangle$ and $|1\\rangle$ are orthonormal, this means they are both <i>orthogonal</i> and <i>normalised</i>. Orthogonal means the vectors are at right angles:\n </p><p><img src=\"images/basis.svg\"></p>\n <p>And normalised means their magnitudes (length of the arrow) is equal to 1. The two vectors $|0\\rangle$ and $|1\\rangle$ are <i>linearly independent</i>, which means we cannot describe $|0\\rangle$ in terms of $|1\\rangle$, and vice versa. However, using both the vectors $|0\\rangle$ and $|1\\rangle$, and our rules of addition and multiplication by scalars, we can describe all possible vectors in 2D space:\n </p><p><img src=\"images/basis2.svg\"></p>\n <p>Because the vectors $|0\\rangle$ and $|1\\rangle$ are linearly independent, and can be used to describe any vector in 2D space using vector addition and scalar multiplication, we say the vectors $|0\\rangle$ and $|1\\rangle$ form a <i>basis</i>. In this case, since they are both orthogonal and normalised, we call it an <i>orthonormal basis</i>.\n </details>\n</p>\n\nSince the states $|0\\rangle$ and $|1\\rangle$ form an orthonormal basis, we can represent any 2D vector with a combination of these two states. This allows us to write the state of our qubit in the alternative form:\n\n$$ |q_0\\rangle = \\tfrac{1}{\\sqrt{2}}|0\\rangle + \\tfrac{i}{\\sqrt{2}}|1\\rangle $$\n\nThis vector, $|q_0\\rangle$ is called the qubit's _statevector,_ it tells us everything we could possibly know about this qubit. For now, we are only able to draw a few simple conclusions about this particular example of a statevector: it is not entirely $|0\\rangle$ and not entirely $|1\\rangle$. Instead, it is described by a linear combination of the two. In quantum mechanics, we typically describe linear combinations such as this using the word 'superposition'.\n\nThough our example state $|q_0\\rangle$ can be expressed as a superposition of $|0\\rangle$ and $|1\\rangle$, it is no less a definite and well-defined qubit state than they are. To see this, we can begin to explore how a qubit can be manipulated.\n\n### 1.3 Exploring Qubits with Qiskit <a id=\"exploring-qubits\"></a>\n\nFirst, we need to import all the tools we will need:",
"_____no_output_____"
]
],
[
[
"from qiskit import QuantumCircuit, execute, Aer\nfrom qiskit.visualization import plot_histogram, plot_bloch_vector\nfrom math import sqrt, pi",
"_____no_output_____"
]
],
[
[
"In Qiskit, we use the `QuantumCircuit` object to store our circuits, this is essentially a list of the quantum gates in our circuit and the qubits they are applied to.",
"_____no_output_____"
]
],
[
[
"qc = QuantumCircuit(1) # Create a quantum circuit with one qubit",
"_____no_output_____"
]
],
[
[
"In our quantum circuits, our qubits always start out in the state $|0\\rangle$. We can use the `initialize()` method to transform this into any state. We give `initialize()` the vector we want in the form of a list, and tell it which qubit(s) we want to initialise in this state:",
"_____no_output_____"
]
],
[
[
"qc = QuantumCircuit(1) # Create a quantum circuit with one qubit\ninitial_state = [0,1] # Define initial_state as |1>\nqc.initialize(initial_state, 0) # Apply initialisation operation to the 0th qubit\nqc.draw() # Let's view our circuit",
"_____no_output_____"
]
],
[
[
"We can then use one of Qiskit’s simulators to view the resulting state of our qubit. To begin with we will use the statevector simulator, but we will explain the different simulators and their uses later.",
"_____no_output_____"
]
],
[
[
"backend = Aer.get_backend('statevector_simulator') # Tell Qiskit how to simulate our circuit",
"_____no_output_____"
]
],
[
[
"To get the results from our circuit, we use `execute` to run our circuit, giving the circuit and the backend as arguments. We then use `.result()` to get the result of this:",
"_____no_output_____"
]
],
[
[
"qc = QuantumCircuit(1) # Create a quantum circuit with one qubit\ninitial_state = [0,1] # Define initial_state as |1>\nqc.initialize(initial_state, 0) # Apply initialisation operation to the 0th qubit\nresult = execute(qc,backend).result() # Do the simulation, returning the result",
"_____no_output_____"
]
],
[
[
"from `result`, we can then get the final statevector using `.get_statevector()`:",
"_____no_output_____"
]
],
[
[
"qc = QuantumCircuit(1) # Create a quantum circuit with one qubit\ninitial_state = [0,1] # Define initial_state as |1>\nqc.initialize(initial_state, 0) # Apply initialisation operation to the 0th qubit\nresult = execute(qc,backend).result() # Do the simulation, returning the result\nout_state = result.get_statevector()\nprint(out_state) # Display the output state vector",
"[0.+0.j 1.+0.j]\n"
]
],
[
[
"**Note:** Python uses `j` to represent $i$ in complex numbers. We see a vector with two complex elements: `0.+0.j` = 0, and `1.+0.j` = 1.\n\nLet’s now measure our qubit as we would in a real quantum computer and see the result:",
"_____no_output_____"
]
],
[
[
"qc.measure_all()\nqc.draw()",
"_____no_output_____"
]
],
[
[
"This time, instead of the statevector we will get the counts for the `0` and `1` results using `.get_counts()`:",
"_____no_output_____"
]
],
[
[
"result = execute(qc,backend).result()\ncounts = result.get_counts()\nplot_histogram(counts)",
"_____no_output_____"
]
],
[
[
"We can see that we (unsurprisingly) have a 100% chance of measuring $|1\\rangle$. This time, let’s instead put our qubit into a superposition and see what happens. We will use the state $|q_0\\rangle$ from earlier in this section:\n\n$$ |q_0\\rangle = \\tfrac{1}{\\sqrt{2}}|0\\rangle + \\tfrac{i}{\\sqrt{2}}|1\\rangle $$\n\nWe need to add these amplitudes to a python list. To add a complex amplitude we use `complex`, giving the real and imaginary parts as arguments:",
"_____no_output_____"
]
],
[
[
"initial_state = [1/sqrt(2), 1j/sqrt(2)] # Define state |q>",
"_____no_output_____"
]
],
[
[
"And we then repeat the steps for initialising the qubit as before:",
"_____no_output_____"
]
],
[
[
"qc = QuantumCircuit(1) # Must redefine qc\nqc.initialize(initial_state, 0) # Initialise the 0th qubit in the state `initial_state`\nstate = execute(qc,backend).result().get_statevector() # Execute the circuit\nprint(state) # Print the result",
"[0.70710678+0.j 0. +0.70710678j]\n"
],
[
"results = execute(qc,backend).result().get_counts()\nplot_histogram(results)",
"_____no_output_____"
]
],
[
[
"We can see we have equal probability of measuring either $|0\\rangle$ or $|1\\rangle$. To explain this, we need to talk about measurement.\n\n## 2. The Rules of Measurement <a id=\"rules-measurement\"></a>\n### 2.1 A Very Important Rule <a id=\"important-rule\"></a>\n\nThere is a simple rule for measurement. To find the probability of measuring a state $|\\psi \\rangle$ in the state $|x\\rangle$ we do:\n\n$$p(|x\\rangle) = | \\langle \\psi| x \\rangle|^2$$\n\nThe symbols $\\langle$ and $|$ tell us $\\langle \\psi |$ is a row vector. In quantum mechanics we call the column vectors _kets_ and the row vectors _bras._ Together they make up _bra-ket_ notation. Any ket $|a\\rangle$ has a corresponding bra $\\langle a|$, and we convert between them using the conjugate transpose.\n\n<details>\n <summary>Reminder: The Inner Product (Click here to expand)</summary>\n <p>There are different ways to multiply vectors, here we use the <i>inner product</i>. The inner product is a generalisation of the <i>dot product</i> which you may already be familiar with. In this guide, we use the inner product between a bra (row vector) and a ket (column vector), and it follows this rule:\n \n$$\\langle a| = \\begin{bmatrix}a_0^*, & a_1^*, & \\dots & a_n^* \\end{bmatrix}, \\quad\n |b\\rangle = \\begin{bmatrix}b_0 \\\\ b_1 \\\\ \\vdots \\\\ b_n \\end{bmatrix}$$\n $$\\langle a|b\\rangle = a_0^* b_0 + a_1^* b_1 \\dots a_n^* b_n$$\n </p>\n <p>We can see that the inner product of two vectors always gives us a scalar. A useful thing to remember is that the inner product of two orthogonal vectors is 0, for example if we have the orthogonal vectors $|0\\rangle$ and $|1\\rangle$:\n $$\\langle1|0\\rangle = \\begin{bmatrix} 0 , & 1\\end{bmatrix}\\begin{bmatrix}1 \\\\ 0\\end{bmatrix} = 0$$\n </p>\n <p>Additionally, remember that the vectors $|0\\rangle$ and $|1\\rangle$ are also normalised (magnitudes are equal to 1):\n \n$$\n \\begin{aligned} \n \\langle0|0\\rangle & = \\begin{bmatrix} 1 , & 0\\end{bmatrix}\\begin{bmatrix}1 \\\\ 0\\end{bmatrix} = 1 \\\\\n \\langle1|1\\rangle & = \\begin{bmatrix} 0 , & 1\\end{bmatrix}\\begin{bmatrix}0 \\\\ 1\\end{bmatrix} = 1\n \\end{aligned}\n$$\n </p>\n</details>\n\nIn the equation above, $|x\\rangle$ can be any qubit state. To find the probability of measuring $|x\\rangle$, we take the inner product of $|x\\rangle$ and the state we are measuring (in this case $|\\psi\\rangle$), then square the magnitude. This may seem a little convoluted, but it will soon become second nature.\n\nIf we look at the state $|q_0\\rangle$ from before, we can see the probability of measuring $|0\\rangle$ is indeed $0.5$:\n\n$$\n\\begin{aligned}\n|q_0\\rangle & = \\tfrac{1}{\\sqrt{2}}|0\\rangle + \\tfrac{i}{\\sqrt{2}}|1\\rangle \\\\\n\\langle q_0| & = \\tfrac{1}{\\sqrt{2}}\\langle0| - \\tfrac{i}{\\sqrt{2}}\\langle 1| \\\\\n\\langle q_0| 0 \\rangle & = \\tfrac{1}{\\sqrt{2}}\\langle 0|0\\rangle - \\tfrac{i}{\\sqrt{2}}\\langle 1|0\\rangle \\\\\n\\langle q_0| 0 \\rangle & = \\tfrac{1}{\\sqrt{2}}\\cdot 1 - \\tfrac{i}{\\sqrt{2}} \\cdot 0\\\\\n\\langle q_0| 0 \\rangle & = \\tfrac{1}{\\sqrt{2}}\\\\\n|\\langle q_0| 0 \\rangle|^2 & = \\tfrac{1}{2}\n\\end{aligned}\n$$\n\nYou should verify the probability of measuring $|1\\rangle$ as an exercise.\n\nThis rule governs how we get information out of quantum states. It is therefore very important for everything we do in quantum computation. It also immediately implies several important facts.\n\n### 2.2 The Implications of this Rule <a id=\"implications\"></a>\n### #1 Normalisation\n\nThe rule shows us that amplitudes are related to probabilities. If we want the probabilities to add up to 1 (which they should!), we need to ensure that the statevector is properly normalized. Specifically, we need the magnitude of the state vector to be 1.\n\n$$ \\langle\\psi|\\psi\\rangle = 1 \\\\ $$\n\nThus if:\n\n$$ |\\psi\\rangle = \\alpha|0\\rangle + \\beta|1\\rangle $$\n\nThen:\n\n$$ \\sqrt{|\\alpha|^2 + |\\beta|^2} = 1 $$\n\nThis explains the factors of $\\sqrt{2}$ you have seen throughout this chapter. In fact, if we try to give `initialize()` a vector that isn’t normalised, it will give us an error:",
"_____no_output_____"
]
],
[
[
"vector = [1,1]\nqc.initialize(vector, 0)",
"_____no_output_____"
]
],
[
[
"#### Quick Exercise\n1. Create a state vector that will give a $1/3$ probability of measuring $|0\\rangle$.\n2. Create a different state vector that will give the same measurement probabilities.\n3. Verify that the probability of measuring $|1\\rangle$ for these two states is $2/3$.",
"_____no_output_____"
],
[
"You can check your answer in the widget below (you can use 'pi' and 'sqrt' in the vector):",
"_____no_output_____"
]
],
[
[
"# Run the code in this cell to interact with the widget\nfrom qiskit_textbook.widgets import state_vector_exercise\nstate_vector_exercise(target=1/3)",
"_____no_output_____"
]
],
[
[
"### #2 Alternative measurement\n\nThe measurement rule gives us the probability $p(|x\\rangle)$ that a state $|\\psi\\rangle$ is measured as $|x\\rangle$. Nowhere does it tell us that $|x\\rangle$ can only be either $|0\\rangle$ or $|1\\rangle$.\n\nThe measurements we have considered so far are in fact only one of an infinite number of possible ways to measure a qubit. For any orthogonal pair of states, we can define a measurement that would cause a qubit to choose between the two.\n\nThis possibility will be explored more in the next section. For now, just bear in mind that $|x\\rangle$ is not limited to being simply $|0\\rangle$ or $|1\\rangle$.",
"_____no_output_____"
],
[
"### #3 Global Phase\n\nWe know that measuring the state $|1\\rangle$ will give us the output `1` with certainty. But we are also able to write down states such as \n\n$$\\begin{bmatrix}0 \\\\ i\\end{bmatrix} = i|1\\rangle.$$\n\nTo see how this behaves, we apply the measurement rule.\n\n$$ |\\langle x| (i|1\\rangle) |^2 = | i \\langle x|1\\rangle|^2 = |\\langle x|1\\rangle|^2 $$\n\nHere we find that the factor of $i$ disappears once we take the magnitude of the complex number. This effect is completely independent of the measured state $|x\\rangle$. It does not matter what measurement we are considering, the probabilities for the state $i|1\\rangle$ are identical to those for $|1\\rangle$. Since measurements are the only way we can extract any information from a qubit, this implies that these two states are equivalent in all ways that are physically relevant.\n\nMore generally, we refer to any overall factor $\\gamma$ on a state for which $|\\gamma|=1$ as a 'global phase'. States that differ only by a global phase are physically indistinguishable.\n\n$$ |\\langle x| ( \\gamma |a\\rangle) |^2 = | \\gamma \\langle x|a\\rangle|^2 = |\\langle x|a\\rangle|^2 $$\n\nNote that this is distinct from the phase difference _between_ terms in a superposition, which is known as the 'relative phase'. This becomes relevant once we consider different types of measurement and multiple qubits.\n\n\n### #4 The Observer Effect\n\nWe know that the amplitudes contain information about the probability of us finding the qubit in a specific state, but once we have measured the qubit, we know with certainty what the state of the qubit is. For example, if we measure a qubit in the state:\n\n$$ |q\\rangle = \\alpha|0\\rangle + \\beta|1\\rangle$$\n\nAnd find it in the state $|0\\rangle$, if we measure again, there is a 100% chance of finding the qubit in the state $|0\\rangle$. This means the act of measuring _changes_ the state of our qubits.\n\n$$ |q\\rangle = \\begin{bmatrix} \\alpha \\\\ \\beta \\end{bmatrix} \\xrightarrow{\\text{Measure }|0\\rangle} |q\\rangle = |0\\rangle = \\begin{bmatrix} 1 \\\\ 0 \\end{bmatrix}$$\n\nWe sometimes refer to this as _collapsing_ the state of the qubit. It is a potent effect, and so one that must be used wisely. For example, were we to constantly measure each of our qubits to keep track of their value at each point in a computation, they would always simply be in a well-defined state of either $|0\\rangle$ or $|1\\rangle$. As such, they would be no different from classical bits and our computation could be easily replaced by a classical computation. To acheive truly quantum computation we must allow the qubits to explore more complex states. Measurements are therefore only used when we need to extract an output. This means that we often place the all measurements at the end of our quantum circuit. \n\nWe can demonstrate this using Qiskit’s statevector simulator. Let's initialise a qubit in superposition:",
"_____no_output_____"
]
],
[
[
"qc = QuantumCircuit(1) # Redefine qc\ninitial_state = [0.+1.j/sqrt(2),1/sqrt(2)+0.j]\nqc.initialize(initial_state, 0)\nqc.draw()",
"_____no_output_____"
]
],
[
[
"This should initialise our qubit in the state:\n\n$$ |q\\rangle = \\tfrac{i}{\\sqrt{2}}|0\\rangle + \\tfrac{1}{\\sqrt{2}}|1\\rangle $$\n\nWe can verify this using the simulator:",
"_____no_output_____"
]
],
[
[
"state = execute(qc, backend).result().get_statevector()\nprint(\"Qubit State = \" + str(state))",
"Qubit State = [0. +0.70710678j 0.70710678+0.j ]\n"
]
],
[
[
"We can see here the qubit is initialised in the state `[0.+0.70710678j 0.70710678+0.j]`, which is the state we expected.\n\nLet’s now measure this qubit:",
"_____no_output_____"
]
],
[
[
"qc.measure_all()\nqc.draw()",
"_____no_output_____"
]
],
[
[
"When we simulate this entire circuit, we can see that one of the amplitudes is _always_ 0:",
"_____no_output_____"
]
],
[
[
"state = execute(qc, backend).result().get_statevector()\nprint(\"State of Measured Qubit = \" + str(state))",
"State of Measured Qubit = [0.+0.j 1.+0.j]\n"
]
],
[
[
"You can re-run this cell a few times to reinitialise the qubit and measure it again. You will notice that either outcome is equally probable, but that the state of the qubit is never a superposition of $|0\\rangle$ and $|1\\rangle$. Somewhat interestingly, the global phase on the state $|0\\rangle$ survives, but since this is global phase, we can never measure it on a real quantum computer.\n\n### A Note about Quantum Simulators\n\nWe can see that writing down a qubit’s state requires keeping track of two complex numbers, but when using a real quantum computer we will only ever receive a yes-or-no (`0` or `1`) answer for each qubit. The output of a 10-qubit quantum computer will look like this:\n\n`0110111110`\n\nJust 10 bits, no superposition or complex amplitudes. When using a real quantum computer, we cannot see the states of our qubits mid-computation, as this would destroy them! This behaviour is not ideal for learning, so Qiskit provides different quantum simulators: The `qasm_simulator` behaves as if you are interacting with a real quantum computer, and will not allow you to use `.get_statevector()`. Alternatively, `statevector_simulator`, (which we have been using in this chapter) does allow peeking at the quantum states before measurement, as we have seen. \n\n\n",
"_____no_output_____"
],
[
"## 3. The Bloch Sphere <a id=\"bloch-sphere\"></a>\n### 3.1 Describing the Restricted Qubit State <a id=\"bloch-sphere-1\"></a>\n\nWe saw earlier in this chapter that the general state of a qubit ($|q\\rangle$) is:\n\n$$\n|q\\rangle = \\alpha|0\\rangle + \\beta|1\\rangle\n$$\n\n$$\n\\alpha, \\beta \\in \\mathbb{C}\n$$\n\n(The second line tells us $\\alpha$ and $\\beta$ are complex numbers). The first two implications in section 2 tell us that we cannot differentiate between some of these states. This means we can be more specific in our description of the qubit. \n\nFirstly, since we cannot measure global phase, we can only measure the difference in phase between the states $|0\\rangle$ and $|1\\rangle$. Instead of having $\\alpha$ and $\\beta$ be complex, we can confine them to the real numbers and add a term to tell us the relative phase between them:\n\n$$\n|q\\rangle = \\alpha|0\\rangle + e^{i\\phi}\\beta|1\\rangle\n$$\n\n$$\n\\alpha, \\beta, \\phi \\in \\mathbb{R}\n$$\n\nFinally, since the qubit state must be normalised, i.e.\n\n$$\n\\sqrt{\\alpha^2 + \\beta^2} = 1\n$$\n\nwe can use the trigonometric identity:\n\n$$\n\\sqrt{\\sin^2{x} + \\cos^2{x}} = 1\n$$\n\nto describe the real $\\alpha$ and $\\beta$ in terms of one variable, $\\theta$:\n\n$$\n\\alpha = \\cos{\\tfrac{\\theta}{2}}, \\quad \\beta=\\sin{\\tfrac{\\theta}{2}}\n$$\n\nFrom this we can describe the state of any qubit using the two variables $\\phi$ and $\\theta$:\n\n$$\n|q\\rangle = \\cos{\\tfrac{\\theta}{2}}|0\\rangle + e^{i\\phi}\\sin{\\tfrac{\\theta}{2}}|1\\rangle\n$$\n\n$$\n\\theta, \\phi \\in \\mathbb{R}\n$$\n\n### 3.2 Visually Representing a Qubit State <a id=\"bloch-sphere-2\"></a>\n\nWe want to plot our general qubit state:\n\n$$\n|q\\rangle = \\cos{\\tfrac{\\theta}{2}}|0\\rangle + e^{i\\phi}\\sin{\\tfrac{\\theta}{2}}|1\\rangle\n$$\n\nIf we interpret $\\theta$ and $\\phi$ as spherical co-ordinates ($r = 1$, since the magnitude of the qubit state is $1$), we can plot any qubit state on the surface of a sphere, known as the _Bloch sphere._\n\nBelow we have plotted a qubit in the state $|{+}\\rangle$. In this case, $\\theta = \\pi/2$ and $\\phi = 0$.\n\n(Qiskit has a function to plot a bloch sphere, `plot_bloch_vector()`, but at the time of writing it only takes cartesian coordinates. We have included a function that does the conversion automatically).\n",
"_____no_output_____"
]
],
[
[
"from qiskit_textbook.widgets import plot_bloch_vector_spherical\ncoords = [pi/2,0,1] # [Theta, Phi, Radius]\nplot_bloch_vector_spherical(coords) # Bloch Vector with spherical coordinates",
"_____no_output_____"
]
],
[
[
"#### Warning!\nWhen first learning about qubit states, it's easy to confuse the qubits _statevector_ with its _Bloch vector_. Remember the statevector is the vector disucssed in [1.1](#notation), that holds the amplitudes for the two states our qubit can be in. The Bloch vector is a visualisation tool that maps the 2D, complex statevector onto real, 3D space.",
"_____no_output_____"
],
[
"#### Quick Exercise\nUse `plot_bloch_vector()` or `plot_bloch_sphere_spherical()` to plot a qubit in the states:\n1. $|0\\rangle$\n2. $|1\\rangle$\n3. $\\tfrac{1}{\\sqrt{2}}(|0\\rangle + |1\\rangle)$\n4. $\\tfrac{1}{\\sqrt{2}}(|0\\rangle - i|1\\rangle)$\n5. $\\tfrac{1}{\\sqrt{2}}\\begin{bmatrix}i\\\\1\\end{bmatrix}$",
"_____no_output_____"
],
[
"We have also included below a widget that converts from spherical co-ordinates to cartesian, for use with `plot_bloch_vector()`:",
"_____no_output_____"
]
],
[
[
"from qiskit_textbook.widgets import bloch_calc\nbloch_calc()",
"_____no_output_____"
],
[
"import qiskit\nqiskit.__qiskit_version__",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
e742de8faeff1d0a05d01e00e725f3138bc4ee0c | 876,044 | ipynb | Jupyter Notebook | nbs/dl1/lesson3-planet_20181210.ipynb | cedrickchee/fastai-course-v3 | 2db01356254cc3cdbc0e954ed7a7357d09becf94 | [
"Apache-2.0"
] | 5 | 2019-01-12T08:31:47.000Z | 2019-06-05T03:58:14.000Z | nbs/dl1/lesson3-planet_20181210.ipynb | cedrickchee/fastai-course-v3 | 2db01356254cc3cdbc0e954ed7a7357d09becf94 | [
"Apache-2.0"
] | null | null | null | nbs/dl1/lesson3-planet_20181210.ipynb | cedrickchee/fastai-course-v3 | 2db01356254cc3cdbc0e954ed7a7357d09becf94 | [
"Apache-2.0"
] | 3 | 2019-01-24T17:20:39.000Z | 2020-02-07T19:02:13.000Z | 328.845345 | 434,588 | 0.91533 | [
[
[
"## Multi-label prediction with Planet Amazon dataset",
"_____no_output_____"
]
],
[
[
"%reload_ext autoreload\n%autoreload 2\n%matplotlib inline",
"_____no_output_____"
],
[
"from fastai import *\nfrom fastai.vision import *",
"_____no_output_____"
]
],
[
[
"## Getting the data",
"_____no_output_____"
],
[
"The planet dataset isn't available on the [fastai dataset page](https://course.fast.ai/datasets) due to copyright restrictions. You can download it from Kaggle however. Let's see how to do this by using the [Kaggle API](https://github.com/Kaggle/kaggle-api) as it's going to be pretty useful to you if you want to join a competition or use other Kaggle datasets later on.\n\nFirst, install the Kaggle API by uncommenting the following line and executing it, or by executing it in your terminal (depending on your platform you may need to modify this slightly to either add `source activate fastai` or similar, or prefix `pip` with a path. Have a look at how `conda install` is called for your platform in the appropriate *Returning to work* section of https://course-v3.fast.ai/. (Depending on your environment, you may also need to append \"--user\" to the command.)",
"_____no_output_____"
]
],
[
[
"! pip install kaggle --upgrade",
"Collecting kaggle\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/9e/94/5370052b9cbc63a927bda08c4f7473a35d3bb27cc071baa1a83b7f783352/kaggle-1.5.1.1.tar.gz (53kB)\n\u001b[K 100% |████████████████████████████████| 61kB 2.6MB/s ta 0:00:01\n\u001b[?25hCollecting urllib3<1.23.0,>=1.15 (from kaggle)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/63/cb/6965947c13a94236f6d4b8223e21beb4d576dc72e8130bd7880f600839b8/urllib3-1.22-py2.py3-none-any.whl (132kB)\n\u001b[K 100% |████████████████████████████████| 133kB 8.3MB/s ta 0:00:01\n\u001b[?25hRequirement not upgraded as not directly required: six>=1.10 in /home/cedric/anaconda3/lib/python3.7/site-packages (from kaggle) (1.11.0)\nRequirement not upgraded as not directly required: certifi in /home/cedric/anaconda3/lib/python3.7/site-packages (from kaggle) (2018.8.24)\nRequirement not upgraded as not directly required: python-dateutil in /home/cedric/anaconda3/lib/python3.7/site-packages (from kaggle) (2.7.3)\nRequirement not upgraded as not directly required: requests in /home/cedric/anaconda3/lib/python3.7/site-packages (from kaggle) (2.19.1)\nRequirement not upgraded as not directly required: tqdm in /home/cedric/anaconda3/lib/python3.7/site-packages (from kaggle) (4.26.0)\nCollecting python-slugify (from kaggle)\n Downloading https://files.pythonhosted.org/packages/00/ad/c778a6df614b6217c30fe80045b365bfa08b5dd3cb02e8b37a6d25126781/python-slugify-1.2.6.tar.gz\nRequirement not upgraded as not directly required: chardet<3.1.0,>=3.0.2 in /home/cedric/anaconda3/lib/python3.7/site-packages (from requests->kaggle) (3.0.4)\nRequirement not upgraded as not directly required: idna<2.8,>=2.5 in /home/cedric/anaconda3/lib/python3.7/site-packages (from requests->kaggle) (2.7)\nCollecting Unidecode>=0.04.16 (from python-slugify->kaggle)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/31/39/53096f9217b057cb049fe872b7fc7ce799a1a89b76cf917d9639e7a558b5/Unidecode-1.0.23-py2.py3-none-any.whl (237kB)\n\u001b[K 100% |████████████████████████████████| 245kB 35.5MB/s ta 0:00:01\n\u001b[?25hBuilding wheels for collected packages: kaggle, python-slugify\n Running setup.py bdist_wheel for kaggle ... \u001b[?25ldone\n\u001b[?25h Stored in directory: /home/cedric/.cache/pip/wheels/5a/2d/0c/9fc539e558586b9ed9127916a7f4e620163c24cc97460b1188\n Running setup.py bdist_wheel for python-slugify ... \u001b[?25ldone\n\u001b[?25h Stored in directory: /home/cedric/.cache/pip/wheels/e3/65/da/2045deea3098ed7471eca0e2460cfbd3fdfe8c1d6fa6fcac92\nSuccessfully built kaggle python-slugify\n\u001b[31mtwisted 18.7.0 requires PyHamcrest>=1.9.0, which is not installed.\u001b[0m\nInstalling collected packages: urllib3, Unidecode, python-slugify, kaggle\n Found existing installation: urllib3 1.23\n Uninstalling urllib3-1.23:\n Successfully uninstalled urllib3-1.23\nSuccessfully installed Unidecode-1.0.23 kaggle-1.5.1.1 python-slugify-1.2.6 urllib3-1.22\n\u001b[33mYou are using pip version 10.0.1, however version 18.1 is available.\nYou should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n"
]
],
[
[
"Then you need to upload your credentials from Kaggle on your instance. Login to kaggle and click on your profile picture on the top left corner, then 'My account'. Scroll down until you find a button named 'Create New API Token' and click on it. This will trigger the download of a file named 'kaggle.json'.\n\nUpload this file to the directory this notebook is running in, by clicking \"Upload\" on your main Jupyter page, then uncomment and execute the next two commands (or run them in a terminal).",
"_____no_output_____"
]
],
[
[
"! mkdir -p ~/.kaggle/\n! mv kaggle.json ~/.kaggle/",
"_____no_output_____"
]
],
[
[
"You're all set to download the data from [planet competition](https://www.kaggle.com/c/planet-understanding-the-amazon-from-space). You **first need to go to its main page and accept its rules**, and run the two cells below (uncomment the shell commands to download and unzip the data). If you get a `403 forbidden` error it means you haven't accepted the competition rules yet (you have to go to the competition page, click on *Rules* tab, and then scroll to the bottom to find the *accept* button).",
"_____no_output_____"
]
],
[
[
"path = Config.data_path()/'planet'\npath.mkdir(parents=True, exist_ok=True)\npath",
"_____no_output_____"
],
[
"! kaggle --version",
"Kaggle API 1.5.1.1\r\n"
],
[
"! kaggle competitions download -c planet-understanding-the-amazon-from-space -f train-jpg.tar.7z -p {path}\n! kaggle competitions download -c planet-understanding-the-amazon-from-space -f train_v2.csv -p {path}",
"Downloading train-jpg.tar.7z to /home/cedric/.fastai/data/planet\n100%|█████████████████████████████████████████| 600M/600M [00:04<00:00, 140MB/s]\n\nDownloading train_v2.csv.zip to /home/cedric/.fastai/data/planet\n 0%| | 0.00/159k [00:00<?, ?B/s]\n100%|████████████████████████████████████████| 159k/159k [00:00<00:00, 70.7MB/s]\n"
],
[
"! unzip -q -n {path}/train_v2.csv.zip -d {path}",
"_____no_output_____"
]
],
[
[
"To extract the content of this file, we'll need 7zip, so uncomment the following line if you need to install it (or run `sudo apt install p7zip` in your terminal).",
"_____no_output_____"
]
],
[
[
"! conda install -y -c haasad eidl7zip",
"Solving environment: done\n\n## Package Plan ##\n\n environment location: /home/cedric/anaconda3\n\n added / updated specs: \n - eidl7zip\n\n\nThe following packages will be downloaded:\n\n package | build\n ---------------------------|-----------------\n eidl7zip-1.0.0 | 1 565 KB haasad\n\nThe following NEW packages will be INSTALLED:\n\n eidl7zip: 1.0.0-1 haasad\n\nThe following packages will be UPDATED:\n\n certifi: 2018.8.24-py37_1 --> 2018.10.15-py37_0\n\n\nDownloading and Extracting Packages\neidl7zip-1.0.0 | 565 KB | ##################################### | 100% \nPreparing transaction: done\nVerifying transaction: done\nExecuting transaction: done\n"
]
],
[
[
"And now we can unpack the data (uncomment to run - this might take a few minutes to complete).",
"_____no_output_____"
]
],
[
[
"! 7za -bd -y -so x {path}/train-jpg.tar.7z | tar xf - -C {path}",
"_____no_output_____"
],
[
"!ls {path}/train-jpg | head -n10",
"train_0.jpg\r\ntrain_1.jpg\r\ntrain_10.jpg\r\ntrain_100.jpg\r\ntrain_1000.jpg\r\ntrain_10000.jpg\r\ntrain_10001.jpg\r\ntrain_10002.jpg\r\ntrain_10003.jpg\r\ntrain_10004.jpg\r\nls: write error: Broken pipe\r\n"
]
],
[
[
"## Multiclassification",
"_____no_output_____"
],
[
"Contrary to the pets dataset studied in last lesson, here each picture can have multiple labels. If we take a look at the csv file containing the labels (in 'train_v2.csv' here) we see that each 'image_name' is associated to several tags separated by spaces.",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv(path/'train_v2.csv')\ndf.head()",
"_____no_output_____"
]
],
[
[
"To put this in a `DataBunch` while using the [data block API](https://docs.fast.ai/data_block.html), we then need to using `ImageMultiDataset` (and not `ImageClassificationDataset`). This will make sure the model created has the proper loss function to deal with the multiple classes.",
"_____no_output_____"
]
],
[
[
"# This is a set of transformation which is pretty good for satellite images\ntfms = get_transforms(flip_vert=True, max_lighting=0.1, max_zoom=1.05, max_warp=0.)",
"_____no_output_____"
]
],
[
[
"We use parentheses around the data block pipeline below, so that we can use a multiline statement without needing to add '\\\\'.",
"_____no_output_____"
]
],
[
[
"np.random.seed(42)\nsrc = (ImageItemList.from_csv(path, 'train_v2.csv', folder='train-jpg', suffix='.jpg')\n .random_split_by_pct(0.2)\n .label_from_df(sep=' '))",
"_____no_output_____"
],
[
"data = (src.transform(tfms, size=128)\n .databunch().normalize(imagenet_stats))",
"_____no_output_____"
]
],
[
[
"`show_batch` still works, and show us the different labels separated by `;`.",
"_____no_output_____"
]
],
[
[
"data.show_batch(rows=3, figsize=(12,9))",
"_____no_output_____"
]
],
[
[
"### Initial Model",
"_____no_output_____"
],
[
"To create a `Learner` we use the same function as in lesson 1. Our base architecture is resnet50, but the metrics are a little bit different: we use `accuracy_thresh` instead of `accuracy`. In lesson 1, we determined the predicition for a given class by picking the final activation that was the biggest, but here, each activation can be 0. or 1. `accuracy_thresh` selects the ones that are above a certain threshold (0.5 by default) and compares them to the ground truth.\n\nAs for Fbeta, it's the metric that was used by Kaggle on this competition. See [here](https://en.wikipedia.org/wiki/F1_score) for more details.",
"_____no_output_____"
]
],
[
[
"arch = models.resnet50",
"_____no_output_____"
],
[
"acc_02 = partial(accuracy_thresh, thresh=0.2)\nf_score = partial(fbeta, thresh=0.2)",
"_____no_output_____"
],
[
"learn = create_cnn(data, arch, metrics=[acc_02, f_score])",
"_____no_output_____"
]
],
[
[
"We use the LR Finder to pick a good learning rate.",
"_____no_output_____"
]
],
[
[
"learn.lr_find()",
"LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.\n"
],
[
"learn.recorder.plot()",
"_____no_output_____"
]
],
[
[
"Then we can fit the head of our network.",
"_____no_output_____"
]
],
[
[
"lr = 0.01",
"_____no_output_____"
],
[
"learn.fit_one_cycle(5, slice(lr))",
"_____no_output_____"
],
[
"learn.save('stage-1-rn50')",
"_____no_output_____"
]
],
[
[
"...And fine-tune the whole model:",
"_____no_output_____"
]
],
[
[
"learn.unfreeze()",
"_____no_output_____"
],
[
"learn.lr_find()",
"LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.\n"
],
[
"learn.recorder.plot()",
"_____no_output_____"
],
[
"learn.fit_one_cycle(5, slice(1e-5, lr/5))",
"_____no_output_____"
],
[
"learn.save('stage-2-rn50')",
"_____no_output_____"
],
[
"learn.load('stage-2-rn50')",
"_____no_output_____"
]
],
[
[
"### Use Full Size Images",
"_____no_output_____"
],
[
"We've used the image size of 128px in the initial model. That's simply because we want to try it out very quickly.\n\nNow, let's try to use the full size images.",
"_____no_output_____"
]
],
[
[
"data = (src.transform(tfms, size=256)\n .databunch(bs=32).normalize(imagenet_stats))",
"_____no_output_____"
],
[
"learn.data = data\ndata.train_ds[0][0].shape",
"_____no_output_____"
],
[
"learn.freeze()",
"_____no_output_____"
]
],
[
[
"Notice that we are using **transfer learning**. Instead of training from the beginning, we just start from model we trained with smaller images.",
"_____no_output_____"
]
],
[
[
"learn.lr_find()\nlearn.recorder.plot()",
"LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.\n"
]
],
[
[
"**Training Stage 1 - Freeze**",
"_____no_output_____"
]
],
[
[
"lr = 1e-3/2",
"_____no_output_____"
],
[
"learn.fit_one_cycle(5, slice(lr))",
"_____no_output_____"
],
[
"learn.save('stage-1-256-rn50')",
"_____no_output_____"
],
[
"learn.recorder.plot_losses()",
"_____no_output_____"
],
[
"learn.recorder.plot_lr()",
"_____no_output_____"
]
],
[
[
"**Training Stage 2 - Unfreeze**",
"_____no_output_____"
]
],
[
[
"learn.unfreeze()",
"_____no_output_____"
],
[
"learn.lr_find()\nlearn.recorder.plot()",
"LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.\n"
],
[
"learn.fit_one_cycle(5, slice(1e-5, lr/5))",
"_____no_output_____"
],
[
"learn.recorder.plot_losses()",
"_____no_output_____"
],
[
"learn.save('stage-2-256-rn50')",
"_____no_output_____"
]
],
[
[
"You won't really know how you're going until you submit to Kaggle, since the leaderboard isn't using the same subset as we have for training. But as a guide, 50th place (out of 938 teams) on the private leaderboard was a score of `0.930`.",
"_____no_output_____"
],
[
"## fin",
"_____no_output_____"
],
[
"(We'll look at this section later - please don't ask about it just yet! :) )",
"_____no_output_____"
]
],
[
[
"# ! kaggle competitions download -c planet-understanding-the-amazon-from-space -f test-jpg.tar.7z -p {path} \n# ! 7za -bd -y -so x {path}/test-jpg.tar.7z | tar xf - -C {path}",
"Downloading test-jpg.tar.7z to /home/jhoward/.fastai/data/planet\n 99%|███████████████████████████████████████▋| 599M/603M [00:11<00:00, 88.6MB/s]\n100%|████████████████████████████████████████| 603M/603M [00:11<00:00, 53.2MB/s]\n"
],
[
"learn.load('stage-2-256-rn50')",
"_____no_output_____"
]
],
[
[
"## Test",
"_____no_output_____"
],
[
"### Download test dataset",
"_____no_output_____"
],
[
"Use Kaggle API to download the test dataset:",
"_____no_output_____"
]
],
[
[
"! kaggle competitions download -c planet-understanding-the-amazon-from-space -f test-jpg.tar.7z -p {path}\n! kaggle competitions download -c planet-understanding-the-amazon-from-space -f test-jpg-additional.tar.7z -p {path}",
"Downloading test-jpg.tar.7z to /home/cedric/.fastai/data/planet\n 99%|████████████████████████████████████████▋| 598M/603M [00:04<00:00, 131MB/s]\n100%|█████████████████████████████████████████| 603M/603M [00:04<00:00, 142MB/s]\nDownloading test-jpg-additional.tar.7z to /home/cedric/.fastai/data/planet\n 98%|████████████████████████████████████████ | 297M/304M [00:02<00:00, 110MB/s]\n100%|█████████████████████████████████████████| 304M/304M [00:02<00:00, 115MB/s]\n"
],
[
"! 7za -bd -y -so x {path}/test-jpg.tar.7z | tar xf - -C {path}",
"_____no_output_____"
],
[
"! 7za -bd -y -so x {path}/test-jpg-additional.tar.7z | tar xf - -C {path}",
"_____no_output_____"
],
[
"! mv {path}/test-jpg-additional/* {path}/test-jpg",
"_____no_output_____"
],
[
"! ls {path}/test-jpg | wc -l",
"61191\r\n"
],
[
"! rm -rf {path}/test-jpg-additional",
"_____no_output_____"
]
],
[
[
"### Add test data to ImageItemList and ImageDataBunch",
"_____no_output_____"
]
],
[
[
"type(src)",
"_____no_output_____"
],
[
"learn.data =(src.add_test_folder('test-jpg')\n .transform(tfms, size=256)\n .databunch(bs=8).normalize(imagenet_stats))",
"_____no_output_____"
],
[
"# Sanity check\nlen(learn.data.train_ds), len(learn.data.valid_ds), len(learn.data.test_ds)",
"_____no_output_____"
],
[
"# Sanity check\nlen(learn.data.train_dl), len(learn.data.valid_dl), len(learn.data.test_dl)",
"_____no_output_____"
],
[
"# Sanity check\nlearn.data.test_ds",
"_____no_output_____"
]
],
[
[
"### Kaggle Submission",
"_____no_output_____"
],
[
"Applies fastai Test-Time-Augmentation ([TTA](https://docs.fast.ai/tta.html)) to predict on test set:",
"_____no_output_____"
]
],
[
[
"preds = learn.TTA(ds_type=DatasetType.Test) # TTA brings test time functionality to the Learner class.",
"_____no_output_____"
],
[
"torch.save(preds, path/'preds-tta-256-rn50.pt')",
"_____no_output_____"
]
],
[
[
"Get final predictions:",
"_____no_output_____"
]
],
[
[
"final_preds = preds[0] # note, preds[1] is y, which is the ground truth/target\nfinal_preds.shape",
"_____no_output_____"
],
[
"# Sanity check\nlen(final_preds[1])",
"_____no_output_____"
],
[
"# Sanity check\nfinal_preds[0][0]",
"_____no_output_____"
],
[
"# PS: I have taken these parts of code from Arunoda's notebook.\n\ndef find_tags(pred, thresh, show_probs):\n classes = ''\n for idx, val in enumerate(pred):\n if val > thresh:\n if show_probs == True:\n classes = f'{classes} {learn.data.classes[idx]} ({val})'\n else:\n classes = f'{classes} {learn.data.classes[idx]}'\n return classes.strip()\n\ndef predict(f_preds, idx, thresh):\n pred_vals = f_preds[idx]\n tags = find_tags(pred_vals, thresh, True)\n print(tags)\n img = learn.data.test_ds[idx][0]\n return img",
"_____no_output_____"
],
[
"predict(final_preds, 0, 0.2)",
"agriculture (0.8172218799591064) haze (0.4136252701282501) partly_cloudy (0.37629154324531555) primary (0.9813070297241211)\n"
],
[
"predict(final_preds, 20, 0.2)",
"cloudy (0.3825205862522125) haze (0.5826690196990967) primary (0.6680986285209656)\n"
],
[
"def get_row(f_preds, idx, thresh):\n pred = f_preds[idx]\n tags = find_tags(pred, thresh, False)\n image_path = learn.data.test_ds.x.items[idx]\n image_name = re.search(r'([^/]+)$', f'{image_path}')[0].replace('.jpg', '')\n \n return image_name, tags",
"_____no_output_____"
],
[
"get_row(final_preds, 0, 0.2)",
"_____no_output_____"
],
[
"get_row(final_preds, 20, 0.2)",
"_____no_output_____"
]
],
[
[
"Create data frame for Kaggle submission file:",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame(columns=['image_name', 'tags'])\n\nfor idx in range(len(final_preds)):\n if idx % 1000 == 0:\n print(f'Progress: {idx}')\n\n image_name, tags = get_row(final_preds, idx, 0.2)\n df.loc[idx] = [image_name, tags]",
"Progress: 0\nProgress: 1000\nProgress: 2000\nProgress: 3000\nProgress: 4000\nProgress: 5000\nProgress: 6000\nProgress: 7000\nProgress: 8000\nProgress: 9000\nProgress: 10000\nProgress: 11000\nProgress: 12000\nProgress: 13000\nProgress: 14000\nProgress: 15000\nProgress: 16000\nProgress: 17000\nProgress: 18000\nProgress: 19000\nProgress: 20000\nProgress: 21000\nProgress: 22000\nProgress: 23000\nProgress: 24000\nProgress: 25000\nProgress: 26000\nProgress: 27000\nProgress: 28000\nProgress: 29000\nProgress: 30000\nProgress: 31000\nProgress: 32000\nProgress: 33000\nProgress: 34000\nProgress: 35000\nProgress: 36000\nProgress: 37000\nProgress: 38000\nProgress: 39000\nProgress: 40000\nProgress: 41000\nProgress: 42000\nProgress: 43000\nProgress: 44000\nProgress: 45000\nProgress: 46000\nProgress: 47000\nProgress: 48000\nProgress: 49000\nProgress: 50000\nProgress: 51000\nProgress: 52000\nProgress: 53000\nProgress: 54000\nProgress: 55000\nProgress: 56000\nProgress: 57000\nProgress: 58000\nProgress: 59000\nProgress: 60000\nProgress: 61000\n"
],
[
"df.head()",
"_____no_output_____"
],
[
"subm_path = path/'subm_fastai_1.0.34_tta_stage2_sz_256_rn50_val_0.2.csv'\ndf.to_csv(subm_path, index=False)",
"_____no_output_____"
],
[
"# Sanity check\n! head {subm_path}",
"image_name,tags\r\nfile_19658,agriculture haze partly_cloudy primary\r\ntest_18775,agriculture bare_ground clear habitation primary road\r\nfile_20453,agriculture haze primary\r\ntest_23183,clear primary water\r\ntest_28867,partly_cloudy primary\r\ntest_17746,clear primary\r\ntest_11747,agriculture clear primary water\r\ntest_21382,clear primary\r\ntest_10914,agriculture clear haze primary road water\r\n"
]
],
[
[
"**Upload submission file to Kaggle**",
"_____no_output_____"
],
[
"Kaggle allows late submission to check your score. You can use the following command to do that:",
"_____no_output_____"
]
],
[
[
"! kaggle competitions submit -c planet-understanding-the-amazon-from-space -f {subm_path} -m \"fastai: 1.0.34, train: stage2, sz: 256, arch: resnet50, val split: 0.2, TTA\"",
"100%|██████████████████████████████████████| 2.19M/2.19M [00:00<00:00, 7.36MB/s]\nSuccessfully submitted to Planet: Understanding the Amazon from Space"
]
],
[
[
"---",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e742e43f1db67fb6eccd51f75b8885530d4e4fa9 | 18,525 | ipynb | Jupyter Notebook | notebooks/train_model.ipynb | Apidwalin/python-web-scraping-master | 696d20e173a060f2bccbf1c3020057d616ca06ce | [
"CC-BY-4.0",
"MIT"
] | null | null | null | notebooks/train_model.ipynb | Apidwalin/python-web-scraping-master | 696d20e173a060f2bccbf1c3020057d616ca06ce | [
"CC-BY-4.0",
"MIT"
] | null | null | null | notebooks/train_model.ipynb | Apidwalin/python-web-scraping-master | 696d20e173a060f2bccbf1c3020057d616ca06ce | [
"CC-BY-4.0",
"MIT"
] | null | null | null | 38.75523 | 1,094 | 0.567287 | [
[
[
"# Solving Captcha using Tensorflow",
"_____no_output_____"
]
],
[
[
"# Import all the packages\nimport cv2\nimport pickle\nimport os.path\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport imutils\nfrom imutils import paths\nfrom sklearn.preprocessing import LabelBinarizer\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom helpers import resize_to_fit\n\ntrain_graph = tf.Graph()",
"_____no_output_____"
],
[
"# Store all file names and folder names\nLETTER_IMAGES_FOLDER = \"extracted_letter_images\"\nMODEL_LABELS_FILENAME = \"model_labels.dat\"\nTEST_DATA_FOLDER = 'test_captcha'\nCHECKPOINT = \"./train_model.ckpt\"",
"_____no_output_____"
]
],
[
[
"## Getting preprocessed train images and it's labels",
"_____no_output_____"
]
],
[
[
"# Initialize the data and labels\ndata = []\nlabels = []\n\n# loop over the input images\nfor image_file in paths.list_images(LETTER_IMAGES_FOLDER):\n # Load the image and convert it to grayscale\n image = cv2.imread(image_file)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # Resize the letter so it fits in a 20x20 pixel box\n image = resize_to_fit(image, 20, 20)\n\n # Add a third channel dimension to the image\n image = np.expand_dims(image, axis=2)\n\n # Grab the name of the letter based on the folder it was in\n label = image_file.split(os.path.sep)[-2]\n\n # Add the letter image and it's label to our training data\n data.append(image)\n labels.append(label)",
"_____no_output_____"
],
[
"# Scale the raw pixel intensities to the range [0, 1] (this improves training)\ndata = np.array(data, dtype=\"float\") / 255.0\nlabels = np.array(np.expand_dims(labels, axis=1))\n\nprint(data.shape)\nprint(labels.shape)",
"_____no_output_____"
],
[
"# Convert the labels (letters) into one-hot encodings\nlb = LabelBinarizer().fit(labels)\nlabels = lb.transform(labels)\n\nprint(labels.shape)",
"_____no_output_____"
],
[
"# Save the mapping from labels to one-hot encodings\n# We'll need this later when we use the model to decode what it's predictions mean\nwith open(MODEL_LABELS_FILENAME, \"wb\") as f:\n pickle.dump(lb, f)",
"_____no_output_____"
],
[
"m = data.shape[0] # Number of training examples\nn_H = data.shape[1] # Images' height\nn_W = data.shape[2] # Images' width\nn_C = data.shape[3] # number of channels\nn_cls = labels.shape[1] # number of classes\n\n# Create placeholders for the train data and label\nwith train_graph.as_default():\n X = tf.placeholder(tf.float32, [None, n_H, n_W, n_C], name = 'input')\n Y = tf.placeholder(tf.float32, [None, n_cls], name = 'output')",
"_____no_output_____"
],
[
"# Initialize the weights for the convolution layers\n# shape = [filter_size, filter_size, num_input_channels, num_filters]\nwith train_graph.as_default():\n W1 = tf.get_variable(\"W1\", [5, 5, 1, 20], initializer = tf.contrib.layers.xavier_initializer(seed=0))\n W2 = tf.get_variable(\"W2\", [5, 5, 20, 50], initializer = tf.contrib.layers.xavier_initializer(seed=0))",
"_____no_output_____"
]
],
[
[
"## CNN Architecture",
"_____no_output_____"
]
],
[
[
"# Create convolutional neural network\nwith train_graph.as_default():\n # Layer1 - Convolutional\n conv_layer1 = tf.nn.conv2d(X, W1, strides=[1, 1, 1, 1], padding='SAME', name = 'conv1')\n relu_layer1 = tf.nn.relu(conv_layer1, name = 'relu1')\n max_pool_layer1 = tf.nn.max_pool(relu_layer1, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding='SAME', name = 'pool1')\n\n # Layer2 - Convolutional\n conv_layer2 = tf.nn.conv2d(max_pool_layer1, W2, strides=[1, 1, 1, 1], padding='SAME', name = 'conv2')\n relu_layer2 = tf.nn.relu(conv_layer2, name = 'relu2')\n max_pool_layer2 = tf.nn.max_pool(relu_layer2, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding='SAME', name = 'pool2')\n\n # Layer3 - Fully_Connected (Don't forget to flatten the previous layer)\n flatten_layer3 = tf.contrib.layers.flatten(max_pool_layer2)\n fc_layer3 = tf.contrib.layers.fully_connected(flatten_layer3, 500, activation_fn=tf.nn.relu, scope = 'fc1')\n\n # Layer4 - Fully_Connected\n fc_layer4 = tf.contrib.layers.fully_connected(fc_layer3, n_cls, activation_fn=None, scope = 'fc2')\n print(fc_layer4)",
"_____no_output_____"
],
[
"# Use cross entropy cost function\nwith train_graph.as_default():\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=fc_layer4, labels=Y)\n cost = tf.reduce_mean(cross_entropy)\n\n # Use adam optimizer\n optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)",
"_____no_output_____"
],
[
"# Funcion: To pick random minibatches to train the model\ndef random_mini_batches(train, labels, batch_size, seed):\n # Always change the seed so that we randomize in different order\n np.random.seed(seed)\n # Make sure we shuffle both the train data and the label in the same order\n p = np.random.permutation(len(train))\n train = train[p]\n labels = labels[p]\n train_batches = []\n label_batches = []\n # Dividing the train data into minibatches\n for batch_i in range(0, len(train)//batch_size):\n start_i = batch_i * batch_size\n train_batch = train[start_i:start_i + batch_size]\n label_batch = labels[start_i:start_i + batch_size]\n train_batches.append(train_batch)\n label_batches.append(label_batch)\n \n return train_batches, label_batches ",
"_____no_output_____"
]
],
[
[
"## Training the model",
"_____no_output_____"
]
],
[
[
"ops.reset_default_graph()\ntf.set_random_seed(1)\n# Initialize all the hyperparameters\nseed = 3\nnum_epochs=10\nminibatch_size=64\ncosts = [] \n\n# Training the model\nwith tf.Session(graph=train_graph) as sess:\n # Initialize all variables\n sess.run(tf.global_variables_initializer())\n \n # If we want to continue training a previous session\n # loader = tf.train.import_meta_graph(\"./\" + CHECKPOINT + '.meta')\n # loader.restore(sess, CHECKPOINT)\n \n # Loop over number of epochs\n for epoch in range(num_epochs):\n \n start_time = time.time()\n minibatch_cost = 0\n num_minibatches = int(m / minibatch_size)\n seed = seed + 1\n # Calling the random_mini_batches function to get the batches\n train_batches, label_batches = random_mini_batches(data, labels, minibatch_size, seed)\n \n # Now train the model for each of that batches and calculate the minibatch cost\n for batch_i in range(num_minibatches):\n \n # Choose the minibatches\n minibatch_X = train_batches[batch_i]\n minibatch_Y = label_batches[batch_i]\n \n _ , temp_cost = sess.run([optimizer, cost], feed_dict={X:minibatch_X, Y:minibatch_Y})\n \n minibatch_cost += temp_cost / num_minibatches\n \n # Print the cost every 2 epoch\n if epoch % 2 == 0:\n print(\"Epoch \"+str(epoch)+\" completed : Time usage \"+str(int(time.time()-start_time))+\" seconds\")\n print(\"\\t- Cost after epoch %i: %f\" % (epoch, minibatch_cost))\n # Don't forget to save the model\n saver = tf.train.Saver() \n saver.save(sess, CHECKPOINT)\n if epoch % 1 == 0:\n costs.append(minibatch_cost)\n \n \n # plot the cost\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per tens)')\n plt.show() \n \n # Calculate the correct predictions\n predict_op = tf.argmax(fc_layer4, 1)\n correct_prediction = tf.equal(predict_op, tf.argmax(Y, 1))\n\n # Calculate accuracy for the training data\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n train_accuracy = accuracy.eval({X: data, Y: labels}) \n print(\"Train Accuracy:\", train_accuracy)",
"_____no_output_____"
],
[
"# Let's check the model on few tesrt data\ntest_data_files = list(paths.list_images(TEST_DATA_FOLDER))\nprint(test_data_files)",
"_____no_output_____"
]
],
[
[
"## Preprocessing the test images and making predicitons",
"_____no_output_____"
]
],
[
[
"# Load up the model labels (so we can translate model predictions to actual letters)\nwith open(MODEL_LABELS_FILENAME, \"rb\") as f:\n lb = pickle.load(f)\n\n# Ignoring the INFO from the tensorflow\ntf.logging.set_verbosity(tf.logging.ERROR)\n\nloaded_graph = tf.Graph() \n\n# loop over the image paths\nfor image_file in test_data_files:\n \n # Name of the image file is the ground truth for our predictions.\n filename = os.path.basename(image_file)\n captcha_correct_text = os.path.splitext(filename)[0]\n \n # Load the image and convert it to grayscale\n image = cv2.imread(image_file)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # Add some extra padding around the image\n image = cv2.copyMakeBorder(image, 20, 20, 20, 20, cv2.BORDER_REPLICATE)\n\n # threshold the image (convert it to pure black and white)\n thresh = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n\n # find the contours (continuous blobs of pixels) the image\n contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # Hack for compatibility with different OpenCV versions\n contours = contours[0] if imutils.is_cv2() else contours[1]\n\n letter_image_regions = []\n\n # Now we can loop through each of the four contours and extract the letter\n # inside of each one\n for contour in contours:\n # Get the rectangle that contains the contour\n (x, y, w, h) = cv2.boundingRect(contour)\n\n # Compare the width and height of the contour to detect letters that\n # are conjoined into one chunk\n if w / h > 1.25:\n # This contour is too wide to be a single letter!\n # Split it in half into two letter regions!\n half_width = int(w / 2)\n letter_image_regions.append((x, y, half_width, h))\n letter_image_regions.append((x + half_width, y, half_width, h))\n else:\n # This is a normal letter by itself\n letter_image_regions.append((x, y, w, h))\n\n # If we found more or less than 6 letters in the captcha, our letter extraction\n # didn't work correcly. Skip the image.\n if len(letter_image_regions) != 6:\n continue\n\n # Sort the detected letter images based on the x coordinate to make sure\n # we are processing them from left-to-right so we match the right image\n # with the right letter\n letter_image_regions = sorted(letter_image_regions, key=lambda x: x[0])\n\n # Create an output image and a list to hold our predicted letters\n output = cv2.merge([image] * 3)\n predictions = []\n\n # loop over the letters\n for n,letter_bounding_box in enumerate(letter_image_regions):\n # Grab the coordinates of the letter in the image\n x, y, w, h = letter_bounding_box\n\n # Extract the letter from the original image with a 2-pixel margin around the edge\n letter_image = image[y - 2:y + h + 2, x - 2:x + w + 2]\n\n # Re-size the letter image to 20x20 pixels to match training data\n letter_image = resize_to_fit(letter_image, 20, 20)\n\n # Turn the single image into a 4d list of images so that the Tensorflow can handle\n letter_image = np.expand_dims(letter_image, axis=2)\n letter_image = np.expand_dims(letter_image, axis=0)\n \n # Load the Tensorflow session\n with tf.Session(graph=loaded_graph) as sess:\n \n # Load the saved model\n loader = tf.train.import_meta_graph(CHECKPOINT + '.meta')\n loader.restore(sess, CHECKPOINT)\n \n # Load the required parameters from the graph\n final_layer = loaded_graph.get_tensor_by_name('fc2/BiasAdd:0')\n input_layer = loaded_graph.get_tensor_by_name('input:0')\n \n # Making the predicitons\n predict = tf.argmax(final_layer, 1)\n output = predict.eval({input_layer: letter_image})\n \n # Append the correct letters to a list\n predictions.append(lb.classes_[output[0]])\n \n # Let's print our results and determine if it's correct or not\n print(\"Original Captcha - \" + captcha_correct_text)\n print(\"Predicted Captcha - \" + ''.join(predictions))\n if captcha_correct_text == ''.join(predictions):\n print(\"---CORRECT---\")\n else:\n print(\"---WRONG---\")\n \n # Plotting the captcha image as well\n plt.imshow(image)\n plt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e742e5993a908784fe837311c6adccf6b60a1fbf | 162,509 | ipynb | Jupyter Notebook | scripts/d21-en/mxnet/chapter_appendix-mathematics-for-deep-learning/eigendecomposition.ipynb | lucmertins/CapDeepLearningBook | e5959b552c8716e7fc65a21ae9c13c58509544c1 | [
"MIT"
] | null | null | null | scripts/d21-en/mxnet/chapter_appendix-mathematics-for-deep-learning/eigendecomposition.ipynb | lucmertins/CapDeepLearningBook | e5959b552c8716e7fc65a21ae9c13c58509544c1 | [
"MIT"
] | null | null | null | scripts/d21-en/mxnet/chapter_appendix-mathematics-for-deep-learning/eigendecomposition.ipynb | lucmertins/CapDeepLearningBook | e5959b552c8716e7fc65a21ae9c13c58509544c1 | [
"MIT"
] | null | null | null | 41.183224 | 401 | 0.496976 | [
[
[
"# Eigendecompositions\n:label:`sec_eigendecompositions`\n\nEigenvalues are often one of the most useful notions \nwe will encounter when studying linear algebra, \nhowever, as a beginner, it is easy to overlook their importance.\nBelow, we introduce eigendecomposition and \ntry to convey some sense of just why it is so important. \n\nSuppose that we have a matrix $A$ with the following entries:\n\n$$\n\\mathbf{A} = \\begin{bmatrix}\n2 & 0 \\\\\n0 & -1\n\\end{bmatrix}.\n$$\n\nIf we apply $A$ to any vector $\\mathbf{v} = [x, y]^\\top$, \nwe obtain a vector $\\mathbf{A}\\mathbf{v} = [2x, -y]^\\top$.\nThis has an intuitive interpretation:\nstretch the vector to be twice as wide in the $x$-direction,\nand then flip it in the $y$-direction.\n\nHowever, there are *some* vectors for which something remains unchanged.\nNamely $[1, 0]^\\top$ gets sent to $[2, 0]^\\top$\nand $[0, 1]^\\top$ gets sent to $[0, -1]^\\top$.\nThese vectors are still in the same line,\nand the only modification is that the matrix stretches them\nby a factor of $2$ and $-1$ respectively.\nWe call such vectors *eigenvectors*\nand the factor they are stretched by *eigenvalues*.\n\nIn general, if we can find a number $\\lambda$ \nand a vector $\\mathbf{v}$ such that \n\n$$\n\\mathbf{A}\\mathbf{v} = \\lambda \\mathbf{v}.\n$$\n\nWe say that $\\mathbf{v}$ is an eigenvector for $A$ and $\\lambda$ is an eigenvalue.\n\n## Finding Eigenvalues\nLet us figure out how to find them. By subtracting off the $\\lambda \\mathbf{v}$ from both sides,\nand then factoring out the vector,\nwe see the above is equivalent to:\n\n$$(\\mathbf{A} - \\lambda \\mathbf{I})\\mathbf{v} = 0.$$\n:eqlabel:`eq_eigvalue_der`\n\nFor :eqref:`eq_eigvalue_der` to happen, we see that $(\\mathbf{A} - \\lambda \\mathbf{I})$ \nmust compress some direction down to zero, \nhence it is not invertible, and thus the determinant is zero.\nThus, we can find the *eigenvalues* \nby finding for what $\\lambda$ is $\\det(\\mathbf{A}-\\lambda \\mathbf{I}) = 0$.\nOnce we find the eigenvalues, we can solve \n$\\mathbf{A}\\mathbf{v} = \\lambda \\mathbf{v}$ \nto find the associated *eigenvector(s)*.\n\n### An Example\nLet us see this with a more challenging matrix\n\n$$\n\\mathbf{A} = \\begin{bmatrix}\n2 & 1\\\\\n2 & 3 \n\\end{bmatrix}.\n$$\n\nIf we consider $\\det(\\mathbf{A}-\\lambda \\mathbf{I}) = 0$, \nwe see this is equivalent to the polynomial equation\n$0 = (2-\\lambda)(3-\\lambda)-2 = (4-\\lambda)(1-\\lambda)$.\nThus, two eigenvalues are $4$ and $1$.\nTo find the associated vectors, we then need to solve\n\n$$\n\\begin{bmatrix}\n2 & 1\\\\\n2 & 3 \n\\end{bmatrix}\\begin{bmatrix}x \\\\ y\\end{bmatrix} = \\begin{bmatrix}x \\\\ y\\end{bmatrix} \\; \\text{and} \\;\n\\begin{bmatrix}\n2 & 1\\\\\n2 & 3 \n\\end{bmatrix}\\begin{bmatrix}x \\\\ y\\end{bmatrix} = \\begin{bmatrix}4x \\\\ 4y\\end{bmatrix} .\n$$\n\nWe can solve this with the vectors $[1, -1]^\\top$ and $[1, 2]^\\top$ respectively.\n\nWe can check this in code using the built-in `numpy.linalg.eig` routine.\n",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport numpy as np\nfrom IPython import display\nfrom d2l import mxnet as d2l\n\nnp.linalg.eig(np.array([[2, 1], [2, 3]]))\n",
"_____no_output_____"
]
],
[
[
"Note that `numpy` normalizes the eigenvectors to be of length one,\nwhereas we took ours to be of arbitrary length.\nAdditionally, the choice of sign is arbitrary.\nHowever, the vectors computed are parallel \nto the ones we found by hand with the same eigenvalues.\n\n## Decomposing Matrices\nLet us continue the previous example one step further. Let\n\n$$\n\\mathbf{W} = \\begin{bmatrix}\n1 & 1 \\\\\n-1 & 2\n\\end{bmatrix},\n$$\n\nbe the matrix where the columns are the eigenvectors of the matrix $\\mathbf{A}$. Let\n\n$$\n\\boldsymbol{\\Sigma} = \\begin{bmatrix}\n1 & 0 \\\\\n0 & 4\n\\end{bmatrix},\n$$\n\nbe the matrix with the associated eigenvalues on the diagonal.\nThen the definition of eigenvalues and eigenvectors tells us that\n\n$$\n\\mathbf{A}\\mathbf{W} =\\mathbf{W} \\boldsymbol{\\Sigma} .\n$$\n\nThe matrix $W$ is invertible, so we may multiply both sides by $W^{-1}$ on the right,\nwe see that we may write\n\n$$\\mathbf{A} = \\mathbf{W} \\boldsymbol{\\Sigma} \\mathbf{W}^{-1}.$$\n:eqlabel:`eq_eig_decomp`\n\nIn the next section we will see some nice consequences of this,\nbut for now we need only know that such a decomposition \nwill exist as long as we can find a full collection \nof linearly independent eigenvectors (so that $W$ is invertible).\n\n## Operations on Eigendecompositions\nOne nice thing about eigendecompositions :eqref:`eq_eig_decomp` is that \nwe can write many operations we usually encounter cleanly \nin terms of the eigendecomposition. As a first example, consider:\n\n$$\n\\mathbf{A}^n = \\overbrace{\\mathbf{A}\\cdots \\mathbf{A}}^{\\text{$n$ times}} = \\overbrace{(\\mathbf{W}\\boldsymbol{\\Sigma} \\mathbf{W}^{-1})\\cdots(\\mathbf{W}\\boldsymbol{\\Sigma} \\mathbf{W}^{-1})}^{\\text{$n$ times}} = \\mathbf{W}\\overbrace{\\boldsymbol{\\Sigma}\\cdots\\boldsymbol{\\Sigma}}^{\\text{$n$ times}}\\mathbf{W}^{-1} = \\mathbf{W}\\boldsymbol{\\Sigma}^n \\mathbf{W}^{-1}.\n$$\n\nThis tells us that for any positive power of a matrix,\nthe eigendecomposition is obtained by just raising the eigenvalues to the same power.\nThe same can be shown for negative powers,\nso if we want to invert a matrix we need only consider\n\n$$\n\\mathbf{A}^{-1} = \\mathbf{W}\\boldsymbol{\\Sigma}^{-1} \\mathbf{W}^{-1},\n$$\n\nor in other words, just invert each eigenvalue.\nThis will work as long as each eigenvalue is non-zero,\nso we see that invertible is the same as having no zero eigenvalues. \n\nIndeed, additional work can show that if $\\lambda_1, \\ldots, \\lambda_n$ \nare the eigenvalues of a matrix, then the determinant of that matrix is\n\n$$\n\\det(\\mathbf{A}) = \\lambda_1 \\cdots \\lambda_n,\n$$\n\nor the product of all the eigenvalues.\nThis makes sense intuitively because whatever stretching $\\mathbf{W}$ does, \n$W^{-1}$ undoes it, so in the end the only stretching that happens is \nby multiplication by the diagonal matrix $\\boldsymbol{\\Sigma}$, \nwhich stretches volumes by the product of the diagonal elements.\n\nFinally, recall that the rank was the maximum number \nof linearly independent columns of your matrix.\nBy examining the eigendecomposition closely,\nwe can see that the rank is the same \nas the number of non-zero eigenvalues of $\\mathbf{A}$.\n\nThe examples could continue, but hopefully the point is clear:\neigendecomposition can simplify many linear-algebraic computations\nand is a fundamental operation underlying many numerical algorithms\nand much of the analysis that we do in linear algebra. \n\n## Eigendecompositions of Symmetric Matrices\nIt is not always possible to find enough linearly independent eigenvectors \nfor the above process to work. For instance the matrix\n\n$$\n\\mathbf{A} = \\begin{bmatrix}\n1 & 1 \\\\\n0 & 1\n\\end{bmatrix},\n$$\n\nhas only a single eigenvector, namely $(1, 0)^\\top$. \nTo handle such matrices, we require more advanced techniques \nthan we can cover (such as the Jordan Normal Form, or Singular Value Decomposition).\nWe will often need to restrict our attention to those matrices \nwhere we can guarantee the existence of a full set of eigenvectors.\n\nThe most commonly encountered family are the *symmetric matrices*,\nwhich are those matrices where $\\mathbf{A} = \\mathbf{A}^\\top$. \nIn this case, we may take $W$ to be an *orthogonal matrix*—a matrix whose columns are all length one vectors that are at right angles to one another, where \n$\\mathbf{W}^\\top = \\mathbf{W}^{-1}$—and all the eigenvalues will be real. \nThus, in this special case, we can write :eqref:`eq_eig_decomp` as\n\n$$\n\\mathbf{A} = \\mathbf{W}\\boldsymbol{\\Sigma}\\mathbf{W}^\\top .\n$$\n\n## Gershgorin Circle Theorem\nEigenvalues are often difficult to reason with intuitively.\nIf presented an arbitrary matrix, there is little that can be said\nabout what the eigenvalues are without computing them.\nThere is, however, one theorem that can make it easy to approximate well \nif the largest values are on the diagonal.\n\nLet $\\mathbf{A} = (a_{ij})$ be any square matrix ($n\\times n$).\nWe will define $r_i = \\sum_{j \\neq i} |a_{ij}|$.\nLet $\\mathcal{D}_i$ represent the disc in the complex plane \nwith center $a_{ii}$ radius $r_i$.\nThen, every eigenvalue of $\\mathbf{A}$ is contained in one of the $\\mathcal{D}_i$.\n\nThis can be a bit to unpack, so let us look at an example. \nConsider the matrix:\n\n$$\n\\mathbf{A} = \\begin{bmatrix}\n1.0 & 0.1 & 0.1 & 0.1 \\\\\n0.1 & 3.0 & 0.2 & 0.3 \\\\\n0.1 & 0.2 & 5.0 & 0.5 \\\\\n0.1 & 0.3 & 0.5 & 9.0\n\\end{bmatrix}.\n$$\n\nWe have $r_1 = 0.3$, $r_2 = 0.6$, $r_3 = 0.8$ and $r_4 = 0.9$.\nThe matrix is symmetric, so all eigenvalues are real.\nThis means that all of our eigenvalues will be in one of the ranges of \n\n$$[a_{11}-r_1, a_{11}+r_1] = [0.7, 1.3], $$\n\n$$[a_{22}-r_2, a_{22}+r_2] = [2.4, 3.6], $$\n\n$$[a_{33}-r_3, a_{33}+r_3] = [4.2, 5.8], $$\n\n$$[a_{44}-r_4, a_{44}+r_4] = [8.1, 9.9]. $$\n\n\nPerforming the numerical computation shows \nthat the eigenvalues are approximately $0.99$, $2.97$, $4.95$, $9.08$,\nall comfortably inside the ranges provided.\n",
"_____no_output_____"
]
],
[
[
"A = np.array([[1.0, 0.1, 0.1, 0.1], [0.1, 3.0, 0.2, 0.3],\n [0.1, 0.2, 5.0, 0.5], [0.1, 0.3, 0.5, 9.0]])\n\nv, _ = np.linalg.eig(A)\nv",
"_____no_output_____"
]
],
[
[
"In this way, eigenvalues can be approximated, \nand the approximations will be fairly accurate \nin the case that the diagonal is \nsignificantly larger than all the other elements. \n\nIt is a small thing, but with a complex \nand subtle topic like eigendecomposition, \nit is good to get any intuitive grasp we can.\n\n## A Useful Application: The Growth of Iterated Maps\n\nNow that we understand what eigenvectors are in principle,\nlet us see how they can be used to provide a deep understanding \nof a problem central to neural network behavior: proper weight initialization. \n\n### Eigenvectors as Long Term Behavior\n\nThe full mathematical investigation of the initialization \nof deep neural networks is beyond the scope of the text, \nbut we can see a toy version here to understand\nhow eigenvalues can help us see how these models work.\nAs we know, neural networks operate by interspersing layers \nof linear transformations with non-linear operations.\nFor simplicity here, we will assume that there is no non-linearity,\nand that the transformation is a single repeated matrix operation $A$,\nso that the output of our model is\n\n$$\n\\mathbf{v}_{out} = \\mathbf{A}\\cdot \\mathbf{A}\\cdots \\mathbf{A} \\mathbf{v}_{in} = \\mathbf{A}^N \\mathbf{v}_{in}.\n$$\n\nWhen these models are initialized, $A$ is taken to be \na random matrix with Gaussian entries, so let us make one of those. \nTo be concrete, we start with a mean zero, variance one Gaussian distributed $5 \\times 5$ matrix.\n",
"_____no_output_____"
]
],
[
[
"np.random.seed(8675309)\n\nk = 5\nA = np.random.randn(k, k)\nA",
"_____no_output_____"
]
],
[
[
"### Behavior on Random Data\nFor simplicity in our toy model, \nwe will assume that the data vector we feed in $\\mathbf{v}_{in}$ \nis a random five dimensional Gaussian vector.\nLet us think about what we want to have happen.\nFor context, lets think of a generic ML problem,\nwhere we are trying to turn input data, like an image, into a prediction, \nlike the probability the image is a picture of a cat.\nIf repeated application of $\\mathbf{A}$ \nstretches a random vector out to be very long, \nthen small changes in input will be amplified \ninto large changes in output---tiny modifications of the input image\nwould lead to vastly different predictions.\nThis does not seem right!\n\nOn the flip side, if $\\mathbf{A}$ shrinks random vectors to be shorter,\nthen after running through many layers, the vector will essentially shrink to nothing, \nand the output will not depend on the input. This is also clearly not right either!\n\nWe need to walk the narrow line between growth and decay \nto make sure that our output changes depending on our input, but not much!\n\nLet us see what happens when we repeatedly multiply our matrix $\\mathbf{A}$ \nagainst a random input vector, and keep track of the norm.\n",
"_____no_output_____"
]
],
[
[
"# Calculate the sequence of norms after repeatedly applying `A`\nv_in = np.random.randn(k, 1)\n\nnorm_list = [np.linalg.norm(v_in)]\nfor i in range(1, 100):\n v_in = A.dot(v_in)\n norm_list.append(np.linalg.norm(v_in))\n\nd2l.plot(np.arange(0, 100), norm_list, 'Iteration', 'Value')",
"_____no_output_____"
]
],
[
[
"The norm is growing uncontrollably! \nIndeed if we take the list of quotients, we will see a pattern.\n",
"_____no_output_____"
]
],
[
[
"# Compute the scaling factor of the norms\nnorm_ratio_list = []\nfor i in range(1, 100):\n norm_ratio_list.append(norm_list[i] / norm_list[i - 1])\n\nd2l.plot(np.arange(1, 100), norm_ratio_list, 'Iteration', 'Ratio')",
"_____no_output_____"
]
],
[
[
"If we look at the last portion of the above computation, \nwe see that the random vector is stretched by a factor of `1.974459321485[...]`,\nwhere the portion at the end shifts a little, \nbut the stretching factor is stable. \n\n### Relating Back to Eigenvectors\n\nWe have seen that eigenvectors and eigenvalues correspond \nto the amount something is stretched, \nbut that was for specific vectors, and specific stretches.\nLet us take a look at what they are for $\\mathbf{A}$.\nA bit of a caveat here: it turns out that to see them all,\nwe will need to go to complex numbers.\nYou can think of these as stretches and rotations.\nBy taking the norm of the complex number\n(square root of the sums of squares of real and imaginary parts)\nwe can measure that stretching factor. Let us also sort them.\n",
"_____no_output_____"
]
],
[
[
"# Compute the eigenvalues\neigs = np.linalg.eigvals(A).tolist()\nnorm_eigs = [np.absolute(x) for x in eigs]\nnorm_eigs.sort()\nprint(f'norms of eigenvalues: {norm_eigs}')",
"norms of eigenvalues: [0.8786205280381857, 1.2757952665062624, 1.4983381517710659, 1.4983381517710659, 1.974459321485074]\n"
]
],
[
[
"### An Observation\n\nWe see something a bit unexpected happening here: \nthat number we identified before for the \nlong term stretching of our matrix $\\mathbf{A}$ \napplied to a random vector is *exactly* \n(accurate to thirteen decimal places!) \nthe largest eigenvalue of $\\mathbf{A}$.\nThis is clearly not a coincidence!\n\nBut, if we now think about what is happening geometrically,\nthis starts to make sense. Consider a random vector. \nThis random vector points a little in every direction, \nso in particular, it points at least a little bit \nin the same direction as the eigenvector of $\\mathbf{A}$\nassociated with the largest eigenvalue.\nThis is so important that it is called \nthe *principle eigenvalue* and *principle eigenvector*.\nAfter applying $\\mathbf{A}$, our random vector \ngets stretched in every possible direction,\nas is associated with every possible eigenvector,\nbut it is stretched most of all in the direction \nassociated with this principle eigenvector.\nWhat this means is that after apply in $A$, \nour random vector is longer, and points in a direction \ncloser to being aligned with the principle eigenvector.\nAfter applying the matrix many times, \nthe alignment with the principle eigenvector becomes closer and closer until, \nfor all practical purposes, our random vector has been transformed \ninto the principle eigenvector!\nIndeed this algorithm is the basis \nfor what is known as the *power iteration*\nfor finding the largest eigenvalue and eigenvector of a matrix. For details see, for example, :cite:`Van-Loan.Golub.1983`.\n\n### Fixing the Normalization\n\nNow, from above discussions, we concluded \nthat we do not want a random vector to be stretched or squished at all,\nwe would like random vectors to stay about the same size throughout the entire process.\nTo do so, we now rescale our matrix by this principle eigenvalue \nso that the largest eigenvalue is instead now just one.\nLet us see what happens in this case.\n",
"_____no_output_____"
]
],
[
[
"# Rescale the matrix `A`\nA /= norm_eigs[-1]\n\n# Do the same experiment again\nv_in = np.random.randn(k, 1)\n\nnorm_list = [np.linalg.norm(v_in)]\nfor i in range(1, 100):\n v_in = A.dot(v_in)\n norm_list.append(np.linalg.norm(v_in))\n\nd2l.plot(np.arange(0, 100), norm_list, 'Iteration', 'Value')",
"_____no_output_____"
]
],
[
[
"We can also plot the ratio between consecutive norms as before and see that indeed it stabilizes.\n",
"_____no_output_____"
]
],
[
[
"# Also plot the ratio\nnorm_ratio_list = []\nfor i in range(1, 100):\n norm_ratio_list.append(norm_list[i] / norm_list[i - 1])\n\nd2l.plot(np.arange(1, 100), norm_ratio_list, 'Iteration', 'Ratio')",
"_____no_output_____"
]
],
[
[
"## Conclusions\n\nWe now see exactly what we hoped for!\nAfter normalizing the matrices by the principle eigenvalue,\nwe see that the random data does not explode as before,\nbut rather eventually equilibrates to a specific value.\nIt would be nice to be able to do these things from first principles,\nand it turns out that if we look deeply at the mathematics of it,\nwe can see that the largest eigenvalue \nof a large random matrix with independent mean zero,\nvariance one Gaussian entries is on average about $\\sqrt{n}$,\nor in our case $\\sqrt{5} \\approx 2.2$,\ndue to a fascinating fact known as the *circular law* :cite:`Ginibre.1965`.\nThe relationship between the eigenvalues (and a related object called singular values) of random matrices has been shown to have deep connections to proper initialization of neural networks as was discussed in :cite:`Pennington.Schoenholz.Ganguli.2017` and subsequent works.\n\n## Summary\n* Eigenvectors are vectors which are stretched by a matrix without changing direction.\n* Eigenvalues are the amount that the eigenvectors are stretched by the application of the matrix.\n* The eigendecomposition of a matrix can allow for many operations to be reduced to operations on the eigenvalues.\n* The Gershgorin Circle Theorem can provide approximate values for the eigenvalues of a matrix.\n* The behavior of iterated matrix powers depends primarily on the size of the largest eigenvalue. This understanding has many applications in the theory of neural network initialization.\n\n## Exercises\n1. What are the eigenvalues and eigenvectors of\n$$\n\\mathbf{A} = \\begin{bmatrix}\n2 & 1 \\\\\n1 & 2\n\\end{bmatrix}?\n$$\n1. What are the eigenvalues and eigenvectors of the following matrix, and what is strange about this example compared to the previous one?\n$$\n\\mathbf{A} = \\begin{bmatrix}\n2 & 1 \\\\\n0 & 2\n\\end{bmatrix}.\n$$\n1. Without computing the eigenvalues, is it possible that the smallest eigenvalue of the following matrix is less that $0.5$? *Note*: this problem can be done in your head.\n$$\n\\mathbf{A} = \\begin{bmatrix}\n3.0 & 0.1 & 0.3 & 1.0 \\\\\n0.1 & 1.0 & 0.1 & 0.2 \\\\\n0.3 & 0.1 & 5.0 & 0.0 \\\\\n1.0 & 0.2 & 0.0 & 1.8\n\\end{bmatrix}.\n$$\n",
"_____no_output_____"
],
[
"[Discussions](https://discuss.d2l.ai/t/411)\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e742ee6b8ae1a8754faabe7eeba1e4ef562cbee8 | 53,506 | ipynb | Jupyter Notebook | python/studentperformancemodel.ipynb | camara94/reseau-neurone-tensorflow-2 | e77624721672ae00b8ebc5fafd25b496a8a66c92 | [
"Apache-2.0"
] | null | null | null | python/studentperformancemodel.ipynb | camara94/reseau-neurone-tensorflow-2 | e77624721672ae00b8ebc5fafd25b496a8a66c92 | [
"Apache-2.0"
] | null | null | null | python/studentperformancemodel.ipynb | camara94/reseau-neurone-tensorflow-2 | e77624721672ae00b8ebc5fafd25b496a8a66c92 | [
"Apache-2.0"
] | null | null | null | 47.476486 | 14,688 | 0.604493 | [
[
[
"# Réseau de neurones\nNous allons, illustrer notre exemple de réseau de neurone sur une datase qui consiste predire si un étudiant va terminer ou pas \nses études en nous basant sur certains critères",
"_____no_output_____"
],
[
"## Importation des packages",
"_____no_output_____"
]
],
[
[
"import matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import StandardScaler, LabelEncoder\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense",
"_____no_output_____"
]
],
[
[
"## Création de la dataset",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('./../data/StudentsPerformance.csv')\ndf.head(2)",
"_____no_output_____"
]
],
[
[
"## Data preprocessing",
"_____no_output_____"
]
],
[
[
"df.describe()",
"_____no_output_____"
],
[
"import numpy as np\nfor col in df.columns:\n messing_val = 0\n for v in df[col]:\n if v == np.nan:\n messing_val += 1\n print(f'la colonne **{col}** à {messing_val} donnée(s) manquante(s)')",
"la colonne **gender** à 0 donnée(s) manquante(s)\nla colonne **race/ethnicity** à 0 donnée(s) manquante(s)\nla colonne **parental level of education** à 0 donnée(s) manquante(s)\nla colonne **lunch** à 0 donnée(s) manquante(s)\nla colonne **test preparation course** à 0 donnée(s) manquante(s)\nla colonne **math score** à 0 donnée(s) manquante(s)\nla colonne **reading score** à 0 donnée(s) manquante(s)\nla colonne **writing score** à 0 donnée(s) manquante(s)\n"
],
[
"df.head(3)",
"_____no_output_____"
],
[
"## Le rôle de cette methode est de transformer les variables catégorielles en variable numériques, les standardisées et \n## les retourner\ndef data_preprocessing(df=df):\n labelEncoder = LabelEncoder()\n standardScaler = StandardScaler()\n target = []\n result = df.copy()\n data = df.copy()\n resulat_value = []\n for i in range(0, 5):\n try:\n # Transformation des variable catégorielle en variable nuémerique\n data.iloc[:, i] = labelEncoder.fit_transform(data.iloc[:, i])\n # suppression de la colonne **test preparation course**\n data.drop(['test preparation course'], axis=1, inplace=True)\n except KeyError:\n pass\n [resulat_value.append(df.iloc[:, 4][i]) for i in range(1, 3) ]\n result.iloc[:, 4] = labelEncoder.fit_transform(result.iloc[:, 4])\n target = result.iloc[:, 4]\n # Standardisation\n data = standardScaler.fit_transform(data)\n return data, target, resulat_value",
"_____no_output_____"
],
[
"X, y, r = data_preprocessing()",
"_____no_output_____"
],
[
"r",
"_____no_output_____"
],
[
"print(f'Nous avons {X.shape[1]} variable(s) indépendante(s)')",
"Nous avons 7 variable(s) indépendante(s)\n"
],
[
"x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=.25, shuffle=True )",
"_____no_output_____"
],
[
"print(f'La taille de train set est: {x_train.shape[0]}\\n La taille de test set est: {x_test.shape[0]}')\nprint(f'La taille de test set est: {x_test.shape}')\nprint(f'La taille de test set est: {y_test.shape}')",
"La taille de train set est: 750\n La taille de test set est: 250\nLa taille de test set est: (250, 7)\nLa taille de test set est: (250,)\n"
]
],
[
[
"## Création de notre réseau de neuron",
"_____no_output_____"
]
],
[
[
"## Model simple avec Sequential\nmodel = Sequential()\n## création de la couche cachée\ncouche_cachee = Dense(30, input_dim=7, activation='relu')\ncouche_cachee2 = Dense(20, activation='relu')\ncouche_cachee3 = Dense(5, activation='relu')\ncouche_sortie = Dense(1, activation='sigmoid')\nmodel.add( couche_cachee )\nmodel.add( couche_cachee2 )\nmodel.add( couche_cachee3 )\n\nmodel.add( couche_sortie )\nmodel.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] )",
"_____no_output_____"
],
[
"model.summary()\nmodel.build()",
"Model: \"sequential_43\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_274 (Dense) (None, 30) 240 \n_________________________________________________________________\ndense_275 (Dense) (None, 20) 620 \n_________________________________________________________________\ndense_276 (Dense) (None, 5) 105 \n_________________________________________________________________\ndense_277 (Dense) (None, 1) 6 \n=================================================================\nTotal params: 971\nTrainable params: 971\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"history = model.fit(x_train, y_train, epochs=65)\nhistory",
"Epoch 1/65\n24/24 [==============================] - 0s 1ms/step - loss: 0.6780 - accuracy: 0.6507\nEpoch 2/65\n24/24 [==============================] - 0s 1ms/step - loss: 0.6577 - accuracy: 0.6507\nEpoch 3/65\n24/24 [==============================] - 0s 1ms/step - loss: 0.6309 - accuracy: 0.6507\nEpoch 4/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.6156 - accuracy: 0.6507\nEpoch 5/65\n24/24 [==============================] - 0s 1ms/step - loss: 0.6070 - accuracy: 0.6507\nEpoch 6/65\n24/24 [==============================] - 0s 1ms/step - loss: 0.6013 - accuracy: 0.6507\nEpoch 7/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5978 - accuracy: 0.6507\nEpoch 8/65\n24/24 [==============================] - 0s 1ms/step - loss: 0.5926 - accuracy: 0.6507\nEpoch 9/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5900 - accuracy: 0.6507\nEpoch 10/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5867 - accuracy: 0.6507\nEpoch 11/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5830 - accuracy: 0.6507\nEpoch 12/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5793 - accuracy: 0.6507\nEpoch 13/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5768 - accuracy: 0.6507\nEpoch 14/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5734 - accuracy: 0.6507\nEpoch 15/65\n24/24 [==============================] - ETA: 0s - loss: 0.5137 - accuracy: 0.71 - 0s 1ms/step - loss: 0.5711 - accuracy: 0.6507\nEpoch 16/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5674 - accuracy: 0.6987\nEpoch 17/65\n24/24 [==============================] - 0s 1ms/step - loss: 0.5657 - accuracy: 0.6933\nEpoch 18/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5609 - accuracy: 0.7093\nEpoch 19/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5590 - accuracy: 0.7107\nEpoch 20/65\n24/24 [==============================] - 0s 1ms/step - loss: 0.5582 - accuracy: 0.7040\nEpoch 21/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5525 - accuracy: 0.7080\nEpoch 22/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5495 - accuracy: 0.7147\nEpoch 23/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5464 - accuracy: 0.7240\nEpoch 24/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5435 - accuracy: 0.7293\nEpoch 25/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5420 - accuracy: 0.7333\nEpoch 26/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5387 - accuracy: 0.7347\nEpoch 27/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5374 - accuracy: 0.7413\nEpoch 28/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5357 - accuracy: 0.7373\nEpoch 29/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5318 - accuracy: 0.7467\nEpoch 30/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5297 - accuracy: 0.7387\nEpoch 31/65\n24/24 [==============================] - 0s 1ms/step - loss: 0.5282 - accuracy: 0.7387\nEpoch 32/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5254 - accuracy: 0.7453\nEpoch 33/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5239 - accuracy: 0.7453\nEpoch 34/65\n24/24 [==============================] - 0s 1ms/step - loss: 0.5222 - accuracy: 0.7467\nEpoch 35/65\n24/24 [==============================] - 0s 1ms/step - loss: 0.5193 - accuracy: 0.7493\nEpoch 36/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5193 - accuracy: 0.7453\nEpoch 37/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5160 - accuracy: 0.7627\nEpoch 38/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5149 - accuracy: 0.7547\nEpoch 39/65\n24/24 [==============================] - 0s 1ms/step - loss: 0.5146 - accuracy: 0.7573\nEpoch 40/65\n24/24 [==============================] - 0s 1ms/step - loss: 0.5136 - accuracy: 0.7653\nEpoch 41/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5118 - accuracy: 0.7440\nEpoch 42/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5087 - accuracy: 0.7587\nEpoch 43/65\n24/24 [==============================] - 0s 1ms/step - loss: 0.5080 - accuracy: 0.7573\nEpoch 44/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5062 - accuracy: 0.7573\nEpoch 45/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5053 - accuracy: 0.7680\nEpoch 46/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5041 - accuracy: 0.7573\nEpoch 47/65\n24/24 [==============================] - 0s 1ms/step - loss: 0.5025 - accuracy: 0.7573\nEpoch 48/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.5003 - accuracy: 0.7560\nEpoch 49/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.4983 - accuracy: 0.7760\nEpoch 50/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.4973 - accuracy: 0.7533\nEpoch 51/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.4976 - accuracy: 0.7693\nEpoch 52/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.4943 - accuracy: 0.7707\nEpoch 53/65\n24/24 [==============================] - 0s 1ms/step - loss: 0.4942 - accuracy: 0.7613\nEpoch 54/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.4921 - accuracy: 0.7720\nEpoch 55/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.4935 - accuracy: 0.7493\nEpoch 56/65\n24/24 [==============================] - 0s 1ms/step - loss: 0.4896 - accuracy: 0.7733\nEpoch 57/65\n24/24 [==============================] - 0s 1ms/step - loss: 0.4888 - accuracy: 0.7613\nEpoch 58/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.4871 - accuracy: 0.7640\nEpoch 59/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.4886 - accuracy: 0.7720\nEpoch 60/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.4876 - accuracy: 0.7667\nEpoch 61/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.4847 - accuracy: 0.7627\nEpoch 62/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.4832 - accuracy: 0.7680\nEpoch 63/65\n24/24 [==============================] - 0s 2ms/step - loss: 0.4851 - accuracy: 0.7693\nEpoch 64/65\n24/24 [==============================] - 0s 1ms/step - loss: 0.4839 - accuracy: 0.7720\nEpoch 65/65\n24/24 [==============================] - 0s 1ms/step - loss: 0.4795 - accuracy: 0.7680\n"
],
[
"plt.plot(history.history['loss'])",
"_____no_output_____"
],
[
"lost, acc = model.evaluate(x_test, y_test)",
"8/8 [==============================] - 0s 1ms/step - loss: 0.5507 - accuracy: 0.7280\n"
],
[
"lost",
"_____no_output_____"
],
[
"acc",
"_____no_output_____"
],
[
"model.predict(np.array([x_test[3]]))",
"_____no_output_____"
],
[
"for i,v in enumerate(y_test): \n print('modèle: {} -> dataset: {}'.format(r[int(np.round(model.predict(np.array([x_test[i]]))[0][0]))], r[v]))",
"modèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: completed\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: completed\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: completed -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: completed -> dataset: none\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: completed -> dataset: none\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: completed\nmodèle: none -> dataset: none\nmodèle: none -> dataset: completed\nmodèle: none -> dataset: completed\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: none\nmodèle: completed -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\nmodèle: none -> dataset: none\n"
],
[
"df.iloc[:, 4][101]",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e742efad84e3a07f611f53d3b41a04cc2db5594c | 119,299 | ipynb | Jupyter Notebook | Addressing Large Hadron Collider Challenges by Machine Learning/Week3/index.ipynb | Mohitkr95/Advanced-ML | 8f24aa986b6f53cb2d5072ce20a9a7be8c0f8560 | [
"MIT"
] | 252 | 2019-02-06T04:15:18.000Z | 2022-03-23T17:38:29.000Z | Addressing Large Hadron Collider Challenges by Machine Learning/Week3/index.ipynb | aaronsmoss3/Advanced-Machine-Learning-Specialization | 99694b44003d264d586d7c36aac76a5559d7236c | [
"MIT"
] | 1 | 2020-11-19T16:21:07.000Z | 2020-11-19T16:21:07.000Z | Addressing Large Hadron Collider Challenges by Machine Learning/Week3/index.ipynb | aaronsmoss3/Advanced-Machine-Learning-Specialization | 99694b44003d264d586d7c36aac76a5559d7236c | [
"MIT"
] | 308 | 2019-03-16T18:18:02.000Z | 2022-01-29T10:04:08.000Z | 76.180715 | 22,432 | 0.751725 | [
[
[
"%pylab inline",
"Populating the interactive namespace from numpy and matplotlib\n"
],
[
"import pandas\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom sklearn.metrics import roc_curve, roc_auc_score\n\nfrom hep_ml import metrics",
"_____no_output_____"
]
],
[
[
"# Load dataset and split into training / test\n\n`training.csv` is a mixture of simulated signal, real background.\nIt has the following columns.\n\n`test.csv` has the following columns:\n\n",
"_____no_output_____"
]
],
[
[
"train_ada = pandas.read_csv('reference/training.csv', sep=',')\ntest_ada = pandas.read_csv('reference/test.csv', sep=',', index_col='id')",
"_____no_output_____"
],
[
"print (\"Training full sample columns:\", \", \".join(train_ada.columns), \"\\nShape:\", train_ada.shape)",
"Training full sample columns: id, LifeTime, dira, FlightDistance, FlightDistanceError, IP, IPSig, VertexChi2, pt, DOCAone, DOCAtwo, DOCAthree, IP_p0p2, IP_p1p2, isolationa, isolationb, isolationc, isolationd, isolatione, isolationf, iso, CDF1, CDF2, CDF3, ISO_SumBDT, p0_IsoBDT, p1_IsoBDT, p2_IsoBDT, p0_track_Chi2Dof, p1_track_Chi2Dof, p2_track_Chi2Dof, p0_IP, p1_IP, p2_IP, p0_IPSig, p1_IPSig, p2_IPSig, p0_pt, p1_pt, p2_pt, p0_p, p1_p, p2_p, p0_eta, p1_eta, p2_eta, SPDhits, production, signal, mass, min_ANNmuon \nShape: (67553, 51)\n"
],
[
"print (\"Test full sample columns:\", \", \".join(test_ada.columns), \"\\nShape:\", test_ada.shape)\ntest_ada.head()",
"Test full sample columns: LifeTime, dira, FlightDistance, FlightDistanceError, IP, IPSig, VertexChi2, pt, DOCAone, DOCAtwo, DOCAthree, IP_p0p2, IP_p1p2, isolationa, isolationb, isolationc, isolationd, isolatione, isolationf, iso, CDF1, CDF2, CDF3, ISO_SumBDT, p0_IsoBDT, p1_IsoBDT, p2_IsoBDT, p0_track_Chi2Dof, p1_track_Chi2Dof, p2_track_Chi2Dof, p0_IP, p1_IP, p2_IP, p0_IPSig, p1_IPSig, p2_IPSig, p0_pt, p1_pt, p2_pt, p0_p, p1_p, p2_p, p0_eta, p1_eta, p2_eta, SPDhits \nShape: (855819, 46)\n"
]
],
[
[
"# Train simple model using part of the training sample",
"_____no_output_____"
]
],
[
[
"train, test = train_test_split(train_ada, test_size=0.3, random_state=13)",
"_____no_output_____"
]
],
[
[
"Let's chose features to train a model",
"_____no_output_____"
]
],
[
[
"variables = list(set(train_ada.columns) - {'id', 'signal', 'mass', 'production', 'min_ANNmuon'})\nprint (variables)",
"['IP', 'p1_IPSig', 'p0_p', 'FlightDistance', 'LifeTime', 'pt', 'ISO_SumBDT', 'iso', 'DOCAone', 'p1_IsoBDT', 'p2_IP', 'isolationb', 'isolationa', 'CDF1', 'p0_IP', 'p1_pt', 'CDF2', 'p2_pt', 'p2_track_Chi2Dof', 'CDF3', 'FlightDistanceError', 'DOCAtwo', 'p2_p', 'IP_p1p2', 'p1_IP', 'p0_pt', 'DOCAthree', 'p0_eta', 'IP_p0p2', 'VertexChi2', 'isolatione', 'p1_eta', 'SPDhits', 'p2_IPSig', 'p2_eta', 'p2_IsoBDT', 'p1_p', 'p0_track_Chi2Dof', 'p0_IsoBDT', 'isolationf', 'p1_track_Chi2Dof', 'dira', 'p0_IPSig', 'isolationd', 'IPSig', 'isolationc']\n"
],
[
"%%time\nclf = AdaBoostClassifier(n_estimators=120, learning_rate=0.009, random_state=13,\n base_estimator=DecisionTreeClassifier(max_depth=19, min_samples_leaf=40, max_features=10,\n random_state=13))\nclf.fit(train[variables], train['signal'])",
"Wall time: 49.8 s\n"
]
],
[
[
"# Check model quality on a half of the training sample\n",
"_____no_output_____"
]
],
[
[
"def plot_metrics(y_true, y_pred):\n fpr, tpr, thresholds = roc_curve(y_true, y_pred)\n roc_auc = roc_auc_score(y_true, y_pred)\n\n plt.plot(fpr, tpr, label='ROC AUC=%f' % roc_auc)\n plt.xlabel(\"FPR\")\n plt.ylabel(\"TPR\")\n plt.legend()\n plt.title(\"ROC Curve\")",
"_____no_output_____"
],
[
"y_pred = clf.predict_proba(test[variables])[:, 1]\n\nplot_metrics(test['signal'], y_pred)\ntest.shape, y_pred.shape",
"_____no_output_____"
]
],
[
[
"ROC AUC is just a part of the solution, you also have to make sure that\n\n- the classifier output is not correlated with the mass\n- classifier performs similarily on MC and real data of the normalization channel\n",
"_____no_output_____"
],
[
"### Mass correlation check",
"_____no_output_____"
]
],
[
[
"df_corr_check = pandas.read_csv(\"reference/check_correlation.csv\")",
"_____no_output_____"
],
[
"df_corr_check.shape",
"_____no_output_____"
],
[
"y_pred = clf.predict(df_corr_check[variables])",
"_____no_output_____"
],
[
" def efficiencies(features, thresholds=None, mask=None, bins=30, labels_dict=None, ignored_sideband=0.0,\n errors=False, grid_columns=2):\n \"\"\"\n Efficiencies for spectators\n :param features: using features (if None then use classifier's spectators)\n :type features: None or list[str]\n :param bins: bins for histogram\n :type bins: int or array-like\n :param mask: mask for data, which will be used\n :type mask: None or numbers.Number or array-like or str or function(pandas.DataFrame)\n :param list[float] thresholds: thresholds on prediction\n :param bool errors: if True then use errorbar, else interpolate function\n :param labels_dict: label -- name for class label\n if None then {0: 'bck', '1': 'signal'}\n :type labels_dict: None or OrderedDict(int: str)\n :param int grid_columns: count of columns in grid\n :param float ignored_sideband: (0, 1) percent of plotting data\n :rtype: plotting.GridPlot\n \"\"\"\n mask, data, class_labels, weight = self._apply_mask(\n mask, self._get_features(features), self.target, self.weight)\n labels_dict = self._check_labels(labels_dict, class_labels)\n\n plots = []\n for feature in data.columns:\n for name, prediction in self.prediction.items():\n prediction = prediction[mask]\n eff = OrderedDict()\n for label, label_name in labels_dict.items():\n label_mask = class_labels == label\n eff[label_name] = utils.get_efficiencies(prediction[label_mask, label],\n data[feature][label_mask].values,\n bins_number=bins,\n sample_weight=weight[label_mask],\n thresholds=thresholds, errors=errors,\n ignored_sideband=ignored_sideband)\n\n for label_name, eff_data in eff.items():\n if errors:\n plot_fig = plotting.ErrorPlot(eff_data)\n else:\n plot_fig = plotting.FunctionsPlot(eff_data)\n plot_fig.xlabel = feature\n plot_fig.ylabel = 'Efficiency for {}'.format(name)\n plot_fig.title = '{} flatness'.format(label_name)\n plot_fig.ylim = (0, 1)\n plots.append(plot_fig)\n\n return plotting.GridPlot(grid_columns, *plots)",
"_____no_output_____"
],
[
"def check_arrays(*arrays):\n \"\"\"\n Left for consistency, version of `sklearn.validation.check_arrays`\n :param list[iterable] arrays: arrays with same length of first dimension.\n \"\"\"\n assert len(arrays) > 0, 'The number of array must be greater than zero'\n checked_arrays = []\n shapes = []\n for arr in arrays:\n if arr is not None:\n checked_arrays.append(numpy.array(arr))\n shapes.append(checked_arrays[-1].shape[0])\n else:\n checked_arrays.append(None)\n assert numpy.sum(numpy.array(shapes) == shapes[0]) == len(shapes), 'Different shapes of the arrays {}'.format(\n shapes)\n return checked_arrays",
"_____no_output_____"
],
[
"def get_efficiencies(prediction, spectator, sample_weight=None, bins_number=20,\n thresholds=None, errors=False, ignored_sideband=0.0):\n \"\"\"\n Construct efficiency function dependent on spectator for each threshold\n Different score functions available: Efficiency, Precision, Recall, F1Score,\n and other things from sklearn.metrics\n :param prediction: list of probabilities\n :param spectator: list of spectator's values\n :param bins_number: int, count of bins for plot\n :param thresholds: list of prediction's threshold\n (default=prediction's cuts for which efficiency will be [0.2, 0.4, 0.5, 0.6, 0.8])\n :return:\n if errors=False\n OrderedDict threshold -> (x_values, y_values)\n if errors=True\n OrderedDict threshold -> (x_values, y_values, y_err, x_err)\n All the parts: x_values, y_values, y_err, x_err are numpy.arrays of the same length.\n \"\"\"\n prediction, spectator, sample_weight = \\\n check_arrays(prediction, spectator, sample_weight)\n\n spectator_min, spectator_max = weighted_quantile(spectator, [ignored_sideband, (1. - ignored_sideband)])\n mask = (spectator >= spectator_min) & (spectator <= spectator_max)\n spectator = spectator[mask]\n prediction = prediction[mask]\n bins_number = min(bins_number, len(prediction))\n sample_weight = sample_weight if sample_weight is None else numpy.array(sample_weight)[mask]\n\n if thresholds is None:\n thresholds = [weighted_quantile(prediction, quantiles=1 - eff, sample_weight=sample_weight)\n for eff in [0.2, 0.4, 0.5, 0.6, 0.8]]\n\n binner = Binner(spectator, bins_number=bins_number)\n if sample_weight is None:\n sample_weight = numpy.ones(len(prediction))\n bins_data = binner.split_into_bins(spectator, prediction, sample_weight)\n\n bin_edges = numpy.array([spectator_min] + list(binner.limits) + [spectator_max])\n xerr = numpy.diff(bin_edges) / 2.\n result = OrderedDict()\n for threshold in thresholds:\n x_values = []\n y_values = []\n N_in_bin = []\n for num, (masses, probabilities, weights) in enumerate(bins_data):\n y_values.append(numpy.average(probabilities > threshold, weights=weights))\n N_in_bin.append(numpy.sum(weights))\n if errors:\n x_values.append((bin_edges[num + 1] + bin_edges[num]) / 2.)\n else:\n x_values.append(numpy.mean(masses))\n\n x_values, y_values, N_in_bin = check_arrays(x_values, y_values, N_in_bin)\n if errors:\n result[threshold] = (x_values, y_values, numpy.sqrt(y_values * (1 - y_values) / N_in_bin), xerr)\n else:\n result[threshold] = (x_values, y_values)\n return result",
"_____no_output_____"
],
[
"def weighted_quantile(array, quantiles, sample_weight=None, array_sorted=False, old_style=False):\n \"\"\"Computing quantiles of array. Unlike the numpy.percentile, this function supports weights,\n but it is inefficient and performs complete sorting.\n :param array: distribution, array of shape [n_samples]\n :param quantiles: floats from range [0, 1] with quantiles of shape [n_quantiles]\n :param sample_weight: optional weights of samples, array of shape [n_samples]\n :param array_sorted: if True, the sorting step will be skipped\n :param old_style: if True, will correct output to be consistent with numpy.percentile.\n :return: array of shape [n_quantiles]\n Example:\n >>> weighted_quantile([1, 2, 3, 4, 5], [0.5])\n Out: array([ 3.])\n >>> weighted_quantile([1, 2, 3, 4, 5], [0.5], sample_weight=[3, 1, 1, 1, 1])\n Out: array([ 2.])\n \"\"\"\n array = numpy.array(array)\n quantiles = numpy.array(quantiles)\n sample_weight = check_sample_weight(array, sample_weight)\n assert numpy.all(quantiles >= 0) and numpy.all(quantiles <= 1), 'Percentiles should be in [0, 1]'\n\n if not array_sorted:\n array, sample_weight = reorder_by_first(array, sample_weight)\n\n weighted_quantiles = numpy.cumsum(sample_weight) - 0.5 * sample_weight\n if old_style:\n # To be convenient with numpy.percentile\n weighted_quantiles -= weighted_quantiles[0]\n weighted_quantiles /= weighted_quantiles[-1]\n else:\n weighted_quantiles /= numpy.sum(sample_weight)\n return numpy.interp(quantiles, weighted_quantiles, array)\n",
"_____no_output_____"
],
[
"def check_sample_weight(y_true, sample_weight):\n \"\"\"Checks the weights, if None, returns array.\n :param y_true: labels (or any array of length [n_samples])\n :param sample_weight: None or array of length [n_samples]\n :return: numpy.array of shape [n_samples]\n \"\"\"\n if sample_weight is None:\n return numpy.ones(len(y_true), dtype=numpy.float)\n else:\n sample_weight = numpy.array(sample_weight, dtype=numpy.float)\n assert len(y_true) == len(sample_weight), \\\n \"The length of weights is different: not {0}, but {1}\".format(len(y_true), len(sample_weight))\n return sample_weight\n\n",
"_____no_output_____"
],
[
"\ndef reorder_by_first(*arrays):\n \"\"\"\n Applies the same permutation to all passed arrays,\n permutation sorts the first passed array\n \"\"\"\n arrays = check_arrays(*arrays)\n order = numpy.argsort(arrays[0])\n return [arr[order] for arr in arrays]\n\nclass Binner(object):\n def __init__(self, values, bins_number):\n \"\"\"\n Binner is a class that helps to split the values into several bins.\n Initially an array of values is given, which is then splitted into 'bins_number' equal parts,\n and thus we are computing limits (boundaries of bins).\n \"\"\"\n percentiles = [i * 100.0 / bins_number for i in range(1, bins_number)]\n self.limits = numpy.percentile(values, percentiles)\n\n def get_bins(self, values):\n \"\"\"Given the values of feature, compute the index of bin\n :param values: array of shape [n_samples]\n :return: array of shape [n_samples]\n \"\"\"\n return numpy.searchsorted(self.limits, values)\n\n def set_limits(self, limits):\n \"\"\"Change the thresholds inside bins.\"\"\"\n self.limits = limits\n\n @property\n def bins_number(self):\n \"\"\":return: number of bins\"\"\"\n return len(self.limits) + 1\n\n def split_into_bins(self, *arrays):\n \"\"\"\n :param arrays: data to be splitted, the first array corresponds\n :return: sequence of length [n_bins] with values corresponding to each bin.\n \"\"\"\n values = arrays[0]\n for array in arrays:\n assert len(array) == len(values), \"passed arrays have different length\"\n bins = self.get_bins(values)\n result = []\n for bin in range(len(self.limits) + 1):\n indices = bins == bin\n result.append([numpy.array(array)[indices] for array in arrays])\n return result\nfrom collections import OrderedDict\n",
"_____no_output_____"
],
[
"eff = get_efficiencies(y_pred, df_corr_check.mass, thresholds=[0.5]) #, thresholds=[0.2, 0.4, 0.5, 0.6, 0.8])",
"_____no_output_____"
],
[
"eff.keys()",
"_____no_output_____"
],
[
"for label_name, eff_data in eff.items():\n pyplot.plot(eff_data[0], eff_data[1], label=\"global eff %.1f\" % label_name)\npyplot.xlabel('mass')\npyplot.ylabel('Efficiency')\npyplot.legend();",
"_____no_output_____"
],
[
"from utils import check_correlation",
"_____no_output_____"
],
[
"corr_metric = check_correlation(y_pred, df_corr_check['mass'])\nprint (corr_metric)",
"0.00019410562429501838\n"
]
],
[
[
"## MC vs Real difference",
"_____no_output_____"
]
],
[
[
"df_agreement = pandas.read_csv('reference/check_agreement.csv')",
"_____no_output_____"
],
[
"\nfrom sklearn.utils.validation import column_or_1d\ndef get_ks_metric(df_agree, df_test):\n sig_ind = df_agree[df_agree['signal'] == 1].index\n bck_ind = df_agree[df_agree['signal'] == 0].index\n\n mc_prob = numpy.array(df_test.loc[sig_ind]['prediction'])\n mc_weight = numpy.array(df_agree.loc[sig_ind]['weight'])\n data_prob = numpy.array(df_test.loc[bck_ind]['prediction'])\n data_weight = numpy.array(df_agree.loc[bck_ind]['weight'])\n val, agreement_metric = check_agreement_ks_sample_weighted(data_prob, mc_prob, data_weight, mc_weight)\n return agreement_metric['ks']",
"_____no_output_____"
],
[
"def check_agreement_ks_sample_weighted (data_prediction, mc_prediction, weights_data, weights_mc):\n data_prediction, weights_data = map(column_or_1d, [data_prediction, weights_data])\n mc_prediction, weights_mc = map(column_or_1d, [mc_prediction, weights_mc])\n\n assert numpy.all(data_prediction >= 0.) and numpy.all(data_prediction <= 1.), 'error in prediction'\n assert numpy.all(mc_prediction >= 0.) and numpy.all(mc_prediction <= 1.), 'error in prediction'\n\n weights_data = weights_data / numpy.sum(weights_data)\n weights_mc = weights_mc / numpy.sum(weights_mc)\n\n data_neg = data_prediction[weights_data < 0]\n weights_neg = -weights_data[weights_data < 0]\n mc_prediction = numpy.concatenate((mc_prediction, data_neg))\n weights_mc = numpy.concatenate((weights_mc, weights_neg))\n data_prediction = data_prediction[weights_data >= 0]\n weights_data = weights_data[weights_data >= 0]\n\n assert numpy.all(weights_data >= 0) and numpy.all(weights_mc >= 0)\n assert numpy.allclose(weights_data.sum(), weights_mc.sum())\n\n weights_data /= numpy.sum(weights_data)\n weights_mc /= numpy.sum(weights_mc)\n\n fpr, tpr, _ = roc_curve_splitted(data_prediction, mc_prediction, weights_data, weights_mc)\n\n Dnm = numpy.max(numpy.abs(fpr - tpr))\n Dnm_part = numpy.max(numpy.abs(fpr - tpr)[fpr + tpr < 1])\n\n result = {'ks': Dnm, 'ks_part': Dnm_part}\n return Dnm_part < 0.03, result",
"_____no_output_____"
],
[
"df_agreement.columns",
"_____no_output_____"
],
[
"df_agreement[variables].head()",
"_____no_output_____"
],
[
"def compute_ks(data_prediction, mc_prediction, weights_data, weights_mc):\n \"\"\"\n Compute Kolmogorov-Smirnov (ks) distance between real data predictions cdf and Monte Carlo one.\n :param data_prediction: array-like, real data predictions\n :param mc_prediction: array-like, Monte Carlo data predictions\n :param weights_data: array-like, real data weights\n :param weights_mc: array-like, Monte Carlo weights\n :return: ks value\n \"\"\"\n assert len(data_prediction) == len(weights_data), 'Data length and weight one must be the same'\n assert len(mc_prediction) == len(weights_mc), 'Data length and weight one must be the same'\n\n data_prediction, mc_prediction = numpy.array(data_prediction), numpy.array(mc_prediction)\n weights_data, weights_mc = numpy.array(weights_data), numpy.array(weights_mc)\n\n assert numpy.all(data_prediction >= 0.) and numpy.all(data_prediction <= 1.), 'Data predictions are out of range [0, 1]'\n assert numpy.all(mc_prediction >= 0.) and numpy.all(mc_prediction <= 1.), 'MC predictions are out of range [0, 1]'\n\n weights_data /= numpy.sum(weights_data)\n weights_mc /= numpy.sum(weights_mc)\n\n fpr, tpr = __roc_curve_splitted(data_prediction, mc_prediction, weights_data, weights_mc)\n\n Dnm = numpy.max(numpy.abs(fpr - tpr))\n return Dnm",
"_____no_output_____"
],
[
"from sklearn.metrics import roc_curve\ndef __roc_curve_splitted(data_zero, data_one, sample_weights_zero, sample_weights_one):\n \"\"\"\n Compute roc curve\n :param data_zero: 0-labeled data\n :param data_one: 1-labeled data\n :param sample_weights_zero: weights for 0-labeled data\n :param sample_weights_one: weights for 1-labeled data\n :return: roc curve\n \"\"\"\n labels = [0] * len(data_zero) + [1] * len(data_one)\n weights = numpy.concatenate([sample_weights_zero, sample_weights_one])\n data_all = numpy.concatenate([data_zero, data_one])\n fpr, tpr, _ = roc_curve(labels, data_all, sample_weight=weights)\n return fpr, tpr",
"_____no_output_____"
],
[
"agreement_probs = clf.predict_proba(df_agreement[variables])[:, 1]\n\nks = compute_ks(\n agreement_probs[df_agreement['signal'].values == 0],\n agreement_probs[df_agreement['signal'].values == 1],\n df_agreement[df_agreement['signal'] == 0]['weight'].values,\n df_agreement[df_agreement['signal'] == 1]['weight'].values)\nprint ('KS metric:', ks, \"is OK:\", ks < 0.09)",
"KS metric: 0.1731465885998545 is OK: False\n"
],
[
"def plot_ks(X_agreement, y_pred):\n sig_ind = X_agreement[X_agreement['signal'] == 1].index\n bck_ind = X_agreement[X_agreement['signal'] == 0].index\n\n mc_prob = y_pred[sig_ind]\n mc_weight = numpy.array(X_agreement.loc[sig_ind]['weight'])\n data_prob = y_pred[bck_ind]\n data_weight = numpy.array(X_agreement.loc[bck_ind]['weight'])\n inds = data_weight < 0\n mc_weight = numpy.array(list(mc_weight) + list(-data_weight[inds]))\n mc_prob = numpy.array(list(mc_prob) + list(data_prob[inds]))\n data_prob = data_prob[data_weight >= 0]\n data_weight = data_weight[data_weight >= 0]\n hist(data_prob, weights=data_weight, color='r', histtype='step', density=True, bins=60, label='data')\n hist(mc_prob, weights=mc_weight, color='b', histtype='step', density=True, bins=60, label='mc')\n xlabel(\"prediction\")\n legend(loc=2)\n show()",
"_____no_output_____"
],
[
"plot_ks(df_agreement, agreement_probs)",
"_____no_output_____"
]
],
[
[
"### Let's see if adding some noise can improve the agreement",
"_____no_output_____"
]
],
[
[
"def add_noise(array, level=0.15, random_seed=34):\n numpy.random.seed(random_seed)\n return level * numpy.random.random(size=array.size) + (1 - level) * array",
"_____no_output_____"
],
[
"agreement_probs_noise = add_noise(clf.predict_proba(df_agreement[variables])[:, 1])\n",
"_____no_output_____"
],
[
"ks_noise = compute_ks(\n agreement_probs_noise[df_agreement['signal'].values == 0],\n agreement_probs_noise[df_agreement['signal'].values == 1],\n df_agreement[df_agreement['signal'] == 0]['weight'].values,\n df_agreement[df_agreement['signal'] == 1]['weight'].values)\nprint ('KS metric:', ks_noise, \"is OK:\", ks_noise < 0.09)",
"KS metric: 0.08910102981105428 is OK: True\n"
],
[
"plot_ks(df_agreement, agreement_probs_noise)",
"_____no_output_____"
]
],
[
[
"### Check ROC with noise",
"_____no_output_____"
]
],
[
[
"test.shape",
"_____no_output_____"
],
[
"y_pred = add_noise(clf.predict_proba(test[variables])[:, 1])\n\nplot_metrics(test['signal'], y_pred)\ntest.shape, y_pred.shape",
"_____no_output_____"
]
],
[
[
"# Train the model using the whole training sample",
"_____no_output_____"
]
],
[
[
"%time clf.fit(train_ada[variables], train_ada['signal'])",
"Wall time: 1min 16s\n"
]
],
[
[
"Compute prediction and add noise",
"_____no_output_____"
]
],
[
[
"y_pred = add_noise(clf.predict_proba(test_ada[variables])[:, 1])",
"_____no_output_____"
]
],
[
[
"# Prepare submission file",
"_____no_output_____"
]
],
[
[
"def save_submission(y_pred, index, filename='result'):\n sep = ','\n filename = '{}.csv.gz'.format(filename)\n pandas.DataFrame({'id': index, 'prediction': y_pred}).to_csv(\n filename, sep=sep, index=False, compression='gzip')\n print (\"Saved file: \", filename, \"\\nShape:\", (y_pred.shape[0], 2))\n return filename",
"_____no_output_____"
],
[
"save_submission(y_pred, test_ada.index, \"sample_submission\")",
"Saved file: sample_submission.csv.gz \nShape: (855819, 2)\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e742efc15493df218edaa36abedcf7459e15f6dd | 131,451 | ipynb | Jupyter Notebook | examples/ex4-crystallization.ipynb | rajeshrinet/pystokes | 3ff1b9e3f631935206d58de426dd34e8a0169377 | [
"MIT"
] | 28 | 2015-05-03T12:33:11.000Z | 2022-01-06T06:34:54.000Z | binder/ex4-crystallization.ipynb | rajeshrinet/pystokes | 3ff1b9e3f631935206d58de426dd34e8a0169377 | [
"MIT"
] | 9 | 2020-06-09T21:22:37.000Z | 2020-10-29T21:22:38.000Z | examples/ex4-crystallization.ipynb | rajeshrinet/pystokes | 3ff1b9e3f631935206d58de426dd34e8a0169377 | [
"MIT"
] | 15 | 2016-10-26T02:35:18.000Z | 2022-01-21T21:00:25.000Z | 859.156863 | 66,704 | 0.953922 | [
[
[
"## ex4 : flow - induced phase separation of active colloids at a wall\n \n%matplotlib inline\nimport pystokes \nimport numpy as np, matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"# particle radius, self-propulsion speed, number and fluid viscosity\nb, vs, Np, eta = 1.0, 1.0, 128, 0.1\n\n#initialise\nr = pystokes.utils.initialCondition(Np) # initial random distribution of positions\np = np.zeros(3*Np); p[2*Np:3*Np] = -1 # initial orientation of the colloids",
"_____no_output_____"
],
[
"def rhs(rp):\n \"\"\"\n right hand side of the rigid body motion equation\n rp: is the array of position and orientations of the colloids\n returns the \\dot{rp} so that rp can be updated using an integrator\n orientations are not evolved in this example\n \"\"\"\n # assign fresh values at each time step\n r = rp[0:3*Np]; p = rp[3*Np:6*Np]\n F, v, o = np.zeros(3*Np), np.zeros(3*Np), np.zeros(3*Np)\n \n force.lennardJonesWall(F, r, lje=0.01, ljr=5, wlje=1.2, wljr=3.4)\n rbm.mobilityTT(v, r, F) \n \n V1s = vs*p; V3t=0.6*V1s;\n rbm.propulsionT3t(v, r, V3t); v = v + V1s\n return np.concatenate( (v,o) )",
"_____no_output_____"
]
],
[
[
"## Crystallization at a plane no-slip wall",
"_____no_output_____"
]
],
[
[
"rbm = pystokes.wallBounded.Rbm(radius=b, particles=Np, viscosity=eta)\nforce = pystokes.forceFields.Forces(particles=Np)\n\n# simulate the resulting system\nTf, Npts = 150, 200\npystokes.utils.simulate(np.concatenate((r,p)), Tf,Npts,rhs,integrator='odeint', filename='crystallization')\n\n# plot the data at specific time instants\npystokes.utils.plotConfigs(t=[1, 40, 100, 200], ms=60, tau=(Tf/Npts)/(b/vs), filename='crystallization')",
"_____no_output_____"
]
],
[
[
"## Crystallization at a plane no-shear interface",
"_____no_output_____"
]
],
[
[
"rbm = pystokes.interface.Rbm(radius=b, particles=Np, viscosity=eta)\n\nforce = pystokes.forceFields.Forces(particles=Np)\n\n# simulate the resulting system\nTf, Npts = 150, 200\npystokes.utils.simulate(np.concatenate((r,p)), Tf,Npts,rhs,integrator='odeint', filename='crystallization')\n\n# plot the data at specific time instants\npystokes.utils.plotConfigs(t=[1, 40, 100, 200], ms=60, tau=(Tf/Npts)/(b/vs), filename='crystallization')",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e742f15a4a0600308b0e0d30e1cca65e28565371 | 3,260 | ipynb | Jupyter Notebook | labs/load_item_by_id.ipynb | nitz21/arcpy | 36074b5d448c9cfdba166332e99100afb3390824 | [
"Apache-2.0"
] | 1 | 2019-03-14T22:59:30.000Z | 2019-03-14T22:59:30.000Z | labs/load_item_by_id.ipynb | josemartinsgeo/arcgis-python-api | 4c10bb1ce900060959829f7ac6c58d4d67037d56 | [
"Apache-2.0"
] | null | null | null | labs/load_item_by_id.ipynb | josemartinsgeo/arcgis-python-api | 4c10bb1ce900060959829f7ac6c58d4d67037d56 | [
"Apache-2.0"
] | 1 | 2020-06-06T21:21:18.000Z | 2020-06-06T21:21:18.000Z | 29.107143 | 215 | 0.511043 | [
[
[
"from arcgis.gis import GIS",
"_____no_output_____"
],
[
"trailheads_id = \"883cedb8c9fe4524b64d47666ed234a7\"",
"_____no_output_____"
],
[
"anon_gis = GIS()\nprint(\"Connected to {}\".format(anon_gis.properties.portalHostname))",
"Connected to www.arcgis.com\n"
],
[
"trailheads_itm = anon_gis.content.get(trailheads_id)\ntrailheads_itm",
"_____no_output_____"
],
[
"m1 = anon_gis.map(\"Los Angeles\", 8)\nm1.add_layer(trailheads_itm)\nm1",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
e742fc5fdc716d971758ddfede4fe9029d576783 | 621,422 | ipynb | Jupyter Notebook | fcis/InstanceSegmentation_Sentinel2/fcis_profile_nebraska.ipynb | ecohydro/CropMask_RCNN | 4657ed1d103acb37dc974aa6af2f0d3a3398e987 | [
"MIT"
] | 13 | 2019-03-01T23:41:27.000Z | 2021-07-12T06:28:31.000Z | fcis/InstanceSegmentation_Sentinel2/fcis_profile_nebraska.ipynb | ecohydro/CropMask_RCNN | 4657ed1d103acb37dc974aa6af2f0d3a3398e987 | [
"MIT"
] | 32 | 2019-02-21T21:14:18.000Z | 2020-12-31T19:48:41.000Z | fcis/InstanceSegmentation_Sentinel2/fcis_profile_nebraska.ipynb | ecohydro/CropMask_RCNN | 4657ed1d103acb37dc974aa6af2f0d3a3398e987 | [
"MIT"
] | 3 | 2018-11-19T23:02:01.000Z | 2021-12-01T15:52:06.000Z | 40.024604 | 2,443 | 0.538837 | [
[
[
"# --------------------------------------------------------\n# adaptd from the demo (RA)\n# Fully Convolutional Instance-aware Semantic Segmentation\n# Copyright (c) 2017 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Haochen Zhang, Yi Li, Haozhi Qi\n# --------------------------------------------------------\nimport pandas as pd\nimport os\nimport sys\nos.chdir(\"../FCIS/fcis\") # added because this script expects to be run where demo.py lcoated in fcis repo\nimport _init_paths\nimport utils.image as image\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport matplotlib.pylab as pl\nimport matplotlib.gridspec as gridspec\nimport random\nimport cv2\nimport numpy as np\nfrom dataset.pycocotools.coco import COCO\nfrom dataset.pycocotools import mask as maskUtils\ncompare_py_path = \"../../code/utils/\"\nsys.path.append(os.path.abspath(compare_py_path))\nimport compare\nfrom config.config import config, update_config\nupdate_config('../../code/model/resnet_v1_101_coco_fcis_end2end_ohem-nebraska-128-moresamples.yaml') # pointing to nebraska model config\nvalidation_imagesp = os.path.join(config['dataset']['dataset_path'], \"images\", \"val-nebraska\")\ntest_imagesp = os.path.join(config['dataset']['dataset_path'], \"images\", \"test-nebraska\")\noutput_fig_dir_validation = os.path.join(config['dataset']['root_path'], \"output/figs/validation\")\noutput_fig_dir_test= os.path.join(config['dataset']['root_path'], \"output/figs/test\")\n\n# deterimes val or test run\nanns_path = \"/home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/annotations/instances_test-nebraska.json\"\nimage_dir = test_imagesp\noutput_fig_dir = output_fig_dir_test\n\ncoco = COCO(anns_path) # need to load image paths from annotations file so that order of iamges matches order of annotations\nimage_names = [os.path.join(image_dir, subdict['file_name']) for key, subdict in coco.imgs.items()]",
"config/config.py:173: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details.\n exp_config = edict(yaml.load(f))\n"
]
],
[
[
"# Evaluating Validation Set (can't evaluate test set on the fly, but the validation set wasn't used to change any hyperparameters)",
"_____no_output_____"
]
],
[
[
"from dataset.pycocotools.coco import COCO\nfrom dataset.pycocotools.cocoeval import COCOeval\n\ncocoGt=COCO(\"/home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/annotations/instances_val-nebraska.json\")\ncocoDt=cocoGt.loadRes(\"/home/data/output/resnet_v1_101_coco_fcis_end2end_ohem-nebraska-128-moresamples/val-nebraska/results/detections_val-nebraska_results.json\")\n\ncocoEval = COCOeval(cocoGt,cocoDt)\ncocoEval.params.areaRng = [[0 ** 2, 1e5 ** 2], [0,21.805885 ** 2], [21.805885 ** 2, 24.016960 ** 2], [24.016960 ** 2, 1e5 ** 2]]\ncocoEval.params.maxDets = [10, 25, 50]\ncocoEval.evaluate()\ncocoEval.accumulate()\ncocoEval.summarize()",
"loading annotations into memory...\nDone (t=2.20s)\ncreating index...\nindex created!\nLoading and preparing results... \nDONE (t=2.73s)\ncreating index...\nindex created!\nRunning per image evaluation... \nDONE (t=32.98s).\nAccumulating evaluation results... \nDONE (t=1.76s).\n Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.561\n Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.782\n Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.671\n Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.429\n Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.764\n Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.693\n Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.450\n Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.620\n Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.632\n Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.512\n Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.817\n Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.771\n"
]
],
[
[
"# Making figures for All Validation Images",
"_____no_output_____"
]
],
[
[
"plt.ioff()\nall_maps = []\nall_mars = []\nall_ims, all_dets, all_masks, all_configs, all_classes = compare.predict_on_image_names(image_names, config, model_path_id=\"/home/data/output/resnet_v1_101_coco_fcis_end2end_ohem-nebraska-128-moresamples/train-nebraska/e2e\", epoch=1)\nfor index in range(len(all_ims)):\n coco_anns = compare.load_coco_annotation(index+1, anns_path, image_dir)\n if coco_anns['boxes'].shape[0] == 0 and all_masks[index][0].shape[0] != 0:\n all_maps.append(0) # no true positives or false positves so we set precision to 0\n all_mars.append(0) # no true positives, so recall is 0\n pred_boxes, pred_class_ids, pred_boxes, maskspred = compare.munge_p(coco_anns, index, all_ims, all_dets, all_masks)\n plt.style.use(\"seaborn\")\n fig, ax = plt.subplots(1,2, figsize=(12,12))\n height, width = all_ims[index-1].shape[:2]\n ax[1].set_ylim(height + 10, -10)\n ax[1].set_xlim(-10, width + 10)\n ax[1].axis('off')\n ax[1].imshow(all_ims[index])\n ax[1].set_title(\"2005 Groundtruth Labels (No Labels)\", fontsize=18)\n compare.display_instances(all_ims[index], maskspred, pred_boxes, ax=ax[0], ec= \"green\", title=\"FCIS Model Predictions\")\n plt.tight_layout()\n \n elif coco_anns['boxes'].shape[0] == 0 and all_masks[index][0].shape[0] == 0:\n all_maps.append(1) # no false positves so we set precision to 1\n all_mars.append(1) # no false negatives, so recall is 1\n \n elif all_masks[index][0].shape[0] != 0:\n gt_boxes, gt_class_ids, masksgt, pred_boxes, pred_class_ids, pred_scores, maskspred, positive_ids = compare.munge_predictions_gt(index, all_ims, all_dets, all_masks, coco_anns)\n AP_text, AR_text, aps, ars = compare.calc_stats_and_strs(gt_boxes, gt_class_ids, masksgt, pred_boxes, pred_class_ids, \n pred_scores, maskspred)\n statsdf, map_s, mar_s = compare.stats_df(aps, ars)\n all_maps.append(map_s)\n all_mars.append(mar_s)\n plt.style.use(\"seaborn\")\n gs = gridspec.GridSpec(2, 2)\n mpl.rcParams['font.family'] = 'STIXGeneral'\n plt.rcParams['font.size'] = 20\n plt.rcParams['axes.linewidth'] = 2\n plt.style.use(\"seaborn\")\n fig = plt.figure(figsize=(10,10))\n ax0 = pl.subplot(gs[0,0])\n ax1 = pl.subplot(gs[0,1])\n ax2 = pl.subplot(gs[1, :])\n compare.display_instances(all_ims[index], maskspred, pred_boxes, ax=ax0, title=\"FCIS Model Predictions\", ec= \"purple\")\n compare.display_instances(all_ims[index], masksgt, gt_boxes, ax=ax1, ec= \"green\", title=\"2005 Groundtruth Labels\", detection_mask=False)\n scores = pd.DataFrame(pred_scores, columns = [\"Detection Likelihood Scores\"])\n boxprops = dict(linestyle='-', linewidth=4, color='k')\n medianprops = dict(linestyle='-', linewidth=4, color='k')\n scores.boxplot(ax =ax2, vert=False, fontsize=14, showmeans=True,\n boxprops=boxprops,\n medianprops=medianprops)\n y_axis = ax2.axes.get_yaxis()\n y_axis.set_visible(False)\n ax2.set_title(\"Detection Likelihood Scores\", fontsize=20)\n plt.tight_layout()\n else:\n all_maps.append(0) # no true positives or false positves so we set precision to 0\n all_mars.append(0) # no true positives, so recall is 0\n gt_class_ids, masksgt = compare.munge_g(coco_anns, index, all_ims, all_dets, all_masks)\n plt.style.use(\"seaborn\")\n fig, ax = plt.subplots(1,2, figsize=(12,12))\n height, width = all_ims[index-1].shape[:2]\n ax[0].set_ylim(height + 10, -10)\n ax[0].set_xlim(-10, width + 10)\n ax[0].axis('off')\n ax[0].imshow(all_ims[index])\n ax[0].set_title(\"FCIS Model Predictions (None Predicted)\", fontsize=18)\n compare.display_instances(all_ims[index], masksgt, coco_anns['boxes'], ax=ax[1], ec= \"green\", title=\"2005 Groundtruth Labels\", detection_mask=False)\n plt.tight_layout()\n plt.savefig(os.path.join(output_fig_dir, str(index)+\"test.png\"))\n # \"mAP 0.5-0.95: \"+str(map_s), \"mAR 0.5-0.95: \"+str(map_r)",
"('use mxnet at', '/home/mxnet/python/mxnet/__init__.pyc')\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000001.jpg 0.0665s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000002.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000003.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000004.jpg 0.0543s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000005.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000006.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000007.jpg 0.0602s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000008.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000009.jpg 0.0570s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000010.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000011.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000012.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000013.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000014.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000015.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000016.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000017.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000018.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000019.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000020.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000021.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000022.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000023.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000024.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000025.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000026.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000027.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000028.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000029.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000030.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000031.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000032.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000033.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000034.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000035.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000036.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000037.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000038.jpg 0.0540s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000039.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000040.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000041.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000042.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000043.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000044.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000045.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000046.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000047.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000048.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000049.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000050.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000051.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000052.jpg 0.0568s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000053.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000054.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000055.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000056.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000057.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000058.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000059.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000060.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000061.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000062.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000063.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000064.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000065.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000066.jpg 0.0541s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000067.jpg 0.0543s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000068.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000069.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000070.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000071.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000072.jpg 0.0569s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000073.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000074.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000075.jpg 0.0543s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000076.jpg 0.0543s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000077.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000078.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000079.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000080.jpg 0.0537s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000081.jpg 0.0539s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000082.jpg 0.0538s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000083.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000084.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000085.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000086.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000087.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000088.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000089.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000090.jpg 0.0542s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000091.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000092.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000093.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000094.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000095.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000096.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000097.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000098.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000099.jpg 0.0607s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000100.jpg 0.0541s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000101.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000102.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000103.jpg 0.0543s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000104.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000105.jpg 0.0541s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000106.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000107.jpg 0.0541s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000108.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000109.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000110.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000111.jpg 0.0543s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000112.jpg 0.0540s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000113.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000114.jpg 0.0542s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000115.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000116.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000117.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000118.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000119.jpg 0.0542s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000120.jpg 0.0542s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000121.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000122.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000123.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000124.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000125.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000126.jpg 0.0595s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000127.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000128.jpg 0.0542s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000129.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000130.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000131.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000132.jpg 0.0543s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000133.jpg 0.0565s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000134.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000135.jpg 0.0585s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000136.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000137.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000138.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000139.jpg 0.0536s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000140.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000141.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000142.jpg 0.0562s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000143.jpg 0.0541s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000144.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000145.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000146.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000147.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000148.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000149.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000150.jpg 0.0560s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000151.jpg 0.0542s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000152.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000153.jpg 0.0563s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000154.jpg 0.0560s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000155.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000156.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000157.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000158.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000159.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000160.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000161.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000162.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000163.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000164.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000165.jpg 0.0564s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000166.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000167.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000168.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000169.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000170.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000171.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000172.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000173.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000174.jpg 0.0614s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000175.jpg 0.0570s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000176.jpg 0.0562s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000177.jpg 0.0571s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000178.jpg 0.0568s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000179.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000180.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000181.jpg 0.0567s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000182.jpg 0.0561s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000183.jpg 0.0561s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000184.jpg 0.0571s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000185.jpg 0.0563s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000186.jpg 0.0567s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000187.jpg 0.0564s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000188.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000189.jpg 0.0564s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000190.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000191.jpg 0.0567s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000192.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000193.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000194.jpg 0.0561s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000195.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000196.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000197.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000198.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000199.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000200.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000201.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000202.jpg 0.0572s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000203.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000204.jpg 0.0572s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000205.jpg 0.0565s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000206.jpg 0.0576s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000207.jpg 0.0566s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000208.jpg 0.0562s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000209.jpg 0.0578s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000210.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000211.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000212.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000213.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000214.jpg 0.0563s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000215.jpg 0.0523s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000216.jpg 0.0519s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000217.jpg 0.0522s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000218.jpg 0.0517s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000219.jpg 0.0513s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000220.jpg 0.0528s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000221.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000222.jpg 0.0615s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000223.jpg 0.0570s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000224.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000225.jpg 0.0565s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000226.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000227.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000228.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000229.jpg 0.0570s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000230.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000231.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000232.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000233.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000234.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000235.jpg 0.0543s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000236.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000237.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000238.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000239.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000240.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000241.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000242.jpg 0.0539s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000243.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000244.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000245.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000246.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000247.jpg 0.0561s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000248.jpg 0.0576s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000249.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000250.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000251.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000252.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000253.jpg 0.0575s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000254.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000255.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000256.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000257.jpg 0.0533s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000258.jpg 0.0528s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000259.jpg 0.0531s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000260.jpg 0.0530s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000261.jpg 0.0536s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000262.jpg 0.0560s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000263.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000264.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000265.jpg 0.0581s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000266.jpg 0.0564s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000267.jpg 0.0542s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000268.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000269.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000270.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000271.jpg 0.0584s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000272.jpg 0.0542s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000273.jpg 0.0542s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000274.jpg 0.0539s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000275.jpg 0.0594s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000276.jpg 0.0534s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000277.jpg 0.0529s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000278.jpg 0.0538s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000279.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000280.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000281.jpg 0.0573s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000282.jpg 0.0605s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000283.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000284.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000285.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000286.jpg 0.0571s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000287.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000288.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000289.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000290.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000291.jpg 0.0561s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000292.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000293.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000294.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000295.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000296.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000297.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000298.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000299.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000300.jpg 0.0563s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000301.jpg 0.0591s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000302.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000303.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000304.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000305.jpg 0.0564s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000306.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000307.jpg 0.0543s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000308.jpg 0.0577s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000309.jpg 0.0566s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000310.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000311.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000312.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000313.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000314.jpg 0.0563s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000315.jpg 0.0581s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000316.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000317.jpg 0.0561s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000318.jpg 0.0564s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000319.jpg 0.0569s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000320.jpg 0.0560s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000321.jpg 0.0560s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000322.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000323.jpg 0.0566s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000324.jpg 0.0560s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000325.jpg 0.0560s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000326.jpg 0.0565s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000327.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000328.jpg 0.0565s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000329.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000330.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000331.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000332.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000333.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000334.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000335.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000336.jpg 0.0560s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000337.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000338.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000339.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000340.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000341.jpg 0.0582s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000342.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000343.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000344.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000345.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000346.jpg 0.0564s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000347.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000348.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000349.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000350.jpg 0.0561s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000351.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000352.jpg 0.0569s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000353.jpg 0.0562s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000354.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000355.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000356.jpg 0.0522s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000357.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000358.jpg 0.0532s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000359.jpg 0.0535s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000360.jpg 0.0524s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000361.jpg 0.0529s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000362.jpg 0.0561s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000363.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000364.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000365.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000366.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000367.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000368.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000369.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000370.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000371.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000372.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000373.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000374.jpg 0.0542s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000375.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000376.jpg 0.0536s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000377.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000378.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000379.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000380.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000381.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000382.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000383.jpg 0.0576s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000384.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000385.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000386.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000387.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000388.jpg 0.0571s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000389.jpg 0.0562s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000390.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000391.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000392.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000393.jpg 0.0566s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000394.jpg 0.0562s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000395.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000396.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000397.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000398.jpg 0.0564s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000399.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000400.jpg 0.0568s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000401.jpg 0.0582s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000402.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000403.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000404.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000405.jpg 0.0632s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000406.jpg 0.0565s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000407.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000408.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000409.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000410.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000411.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000412.jpg 0.0574s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000413.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000414.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000415.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000416.jpg 0.0576s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000417.jpg 0.0568s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000418.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000419.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000420.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000421.jpg 0.0573s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000422.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000423.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000424.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000425.jpg 0.0588s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000426.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000427.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000428.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000429.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000430.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000431.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000432.jpg 0.0570s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000433.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000434.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000435.jpg 0.0578s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000436.jpg 0.0568s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000437.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000438.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000439.jpg 0.0583s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000440.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000441.jpg 0.0588s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000442.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000443.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000444.jpg 0.0594s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000445.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000446.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000447.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000448.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000449.jpg 0.0537s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000450.jpg 0.0540s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000451.jpg 0.0543s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000452.jpg 0.0534s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000453.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000454.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000455.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000456.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000457.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000458.jpg 0.0571s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000459.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000460.jpg 0.0540s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000461.jpg 0.0586s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000462.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000463.jpg 0.0542s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000464.jpg 0.0604s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000465.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000466.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000467.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000468.jpg 0.0560s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000469.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000470.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000471.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000472.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000473.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000474.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000475.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000476.jpg 0.0604s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000477.jpg 0.0620s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000478.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000479.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000480.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000481.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000482.jpg 0.0572s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000483.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000484.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000485.jpg 0.0611s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000486.jpg 0.0567s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000487.jpg 0.0582s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000488.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000489.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000490.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000491.jpg 0.0560s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000492.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000493.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000494.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000495.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000496.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000497.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000498.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000499.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000500.jpg 0.0579s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000501.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000502.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000503.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000504.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000505.jpg 0.0540s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000506.jpg 0.0539s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000507.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000508.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000509.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000510.jpg 0.0538s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000511.jpg 0.0542s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000512.jpg 0.0536s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000513.jpg 0.0540s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000514.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000515.jpg 0.0578s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000516.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000517.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000518.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000519.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000520.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000521.jpg 0.0574s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000522.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000523.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000524.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000525.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000526.jpg 0.0540s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000527.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000528.jpg 0.0542s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000529.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000530.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000531.jpg 0.0567s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000532.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000533.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000534.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000535.jpg 0.0524s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000536.jpg 0.0535s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000537.jpg 0.0543s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000538.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000539.jpg 0.0541s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000540.jpg 0.0534s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000541.jpg 0.0527s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000542.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000543.jpg 0.0578s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000544.jpg 0.0562s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000545.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000546.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000547.jpg 0.0573s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000548.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000549.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000550.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000551.jpg 0.0543s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000552.jpg 0.0543s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000553.jpg 0.0627s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000554.jpg 0.0564s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000555.jpg 0.0580s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000556.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000557.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000558.jpg 0.0542s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000559.jpg 0.0540s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000560.jpg 0.0519s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000561.jpg 0.0524s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000562.jpg 0.0519s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000563.jpg 0.0519s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000564.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000565.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000566.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000567.jpg 0.0579s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000568.jpg 0.0561s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000569.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000570.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000571.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000572.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000573.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000574.jpg 0.0543s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000575.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000576.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000577.jpg 0.0560s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000578.jpg 0.0567s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000579.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000580.jpg 0.0528s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000581.jpg 0.0535s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000582.jpg 0.0538s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000583.jpg 0.0524s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000584.jpg 0.0522s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000585.jpg 0.0605s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000586.jpg 0.0523s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000587.jpg 0.0539s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000588.jpg 0.0536s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000589.jpg 0.0542s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000590.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000591.jpg 0.0537s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000592.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000593.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000594.jpg 0.0542s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000595.jpg 0.0540s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000596.jpg 0.0524s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000597.jpg 0.0530s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000598.jpg 0.0519s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000599.jpg 0.0521s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000600.jpg 0.0542s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000601.jpg 0.0540s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000602.jpg 0.0561s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000603.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000604.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000605.jpg 0.0543s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000606.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000607.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000608.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000609.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000610.jpg 0.0565s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000611.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000612.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000613.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000614.jpg 0.0526s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000615.jpg 0.0539s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000616.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000617.jpg 0.0643s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000618.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000619.jpg 0.0541s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000620.jpg 0.0539s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000621.jpg 0.0564s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000622.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000623.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000624.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000625.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000626.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000627.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000628.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000629.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000630.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000631.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000632.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000633.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000634.jpg 0.0540s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000635.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000636.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000637.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000638.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000639.jpg 0.0534s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000640.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000641.jpg 0.0523s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000642.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000643.jpg 0.0539s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000644.jpg 0.0537s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000645.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000646.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000647.jpg 0.0569s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000648.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000649.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000650.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000651.jpg 0.0522s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000652.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000653.jpg 0.0524s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000654.jpg 0.0541s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000655.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000656.jpg 0.0626s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000657.jpg 0.0535s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000658.jpg 0.0523s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000659.jpg 0.0568s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000660.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000661.jpg 0.0532s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000662.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000663.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000664.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000665.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000666.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000667.jpg 0.0542s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000668.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000669.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000670.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000671.jpg 0.0528s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000672.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000673.jpg 0.0574s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000674.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000675.jpg 0.0524s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000676.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000677.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000678.jpg 0.0561s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000679.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000680.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000681.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000682.jpg 0.0537s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000683.jpg 0.0539s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000684.jpg 0.0517s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000685.jpg 0.0523s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000686.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000687.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000688.jpg 0.0589s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000689.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000690.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000691.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000692.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000693.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000694.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000695.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000696.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000697.jpg 0.0561s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000698.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000699.jpg 0.0541s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000700.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000701.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000702.jpg 0.0534s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000703.jpg 0.0536s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000704.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000705.jpg 0.0540s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000706.jpg 0.0568s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000707.jpg 0.0686s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000708.jpg 0.0518s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000709.jpg 0.0524s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000710.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000711.jpg 0.0538s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000712.jpg 0.0562s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000713.jpg 0.0542s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000714.jpg 0.0541s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000715.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000716.jpg 0.0520s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000717.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000718.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000719.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000720.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000721.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000722.jpg 0.0576s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000723.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000724.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000725.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000726.jpg 0.0538s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000727.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000728.jpg 0.0538s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000729.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000730.jpg 0.0541s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000731.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000732.jpg 0.0532s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000733.jpg 0.0539s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000734.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000735.jpg 0.0523s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000736.jpg 0.0534s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000737.jpg 0.0580s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000738.jpg 0.0625s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000739.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000740.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000741.jpg 0.0532s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000742.jpg 0.0528s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000743.jpg 0.0534s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000744.jpg 0.0535s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000745.jpg 0.0527s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000746.jpg 0.0532s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000747.jpg 0.0534s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000748.jpg 0.0539s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000749.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000750.jpg 0.0532s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000751.jpg 0.0533s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000752.jpg 0.0533s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000753.jpg 0.0540s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000754.jpg 0.0519s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000755.jpg 0.0528s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000756.jpg 0.0551s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000757.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000758.jpg 0.0577s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000759.jpg 0.0542s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000760.jpg 0.0543s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000761.jpg 0.0541s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000762.jpg 0.0527s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000763.jpg 0.0522s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000764.jpg 0.0528s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000765.jpg 0.0587s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000766.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000767.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000768.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000769.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000770.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000771.jpg 0.0514s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000772.jpg 0.0516s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000773.jpg 0.0521s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000774.jpg 0.0533s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000775.jpg 0.0527s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000776.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000777.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000778.jpg 0.0541s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000779.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000780.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000781.jpg 0.0545s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000782.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000783.jpg 0.0540s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000784.jpg 0.0533s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000785.jpg 0.0518s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000786.jpg 0.0539s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000787.jpg 0.0536s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000788.jpg 0.0546s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000789.jpg 0.0562s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000790.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000791.jpg 0.0537s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000792.jpg 0.0538s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000793.jpg 0.0533s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000794.jpg 0.0519s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000795.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000796.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000797.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000798.jpg 0.0566s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000799.jpg 0.0564s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000800.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000801.jpg 0.0560s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000802.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000803.jpg 0.0541s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000804.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000805.jpg 0.0587s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000806.jpg 0.0547s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000807.jpg 0.0572s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000808.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000809.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000810.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000811.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000812.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000813.jpg 0.0548s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000814.jpg 0.0544s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000815.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000816.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000817.jpg 0.0561s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000818.jpg 0.0570s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000819.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000820.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000821.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000822.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000823.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000824.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000825.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000826.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000827.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000828.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000829.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000830.jpg 0.0565s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000831.jpg 0.0566s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000832.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000833.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000834.jpg 0.0563s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000835.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000836.jpg 0.0563s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000837.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000838.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000839.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000840.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000841.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000842.jpg 0.0549s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000843.jpg 0.0553s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000844.jpg 0.0554s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000845.jpg 0.0581s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000846.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000847.jpg 0.0563s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000848.jpg 0.0550s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000849.jpg 0.0573s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000850.jpg 0.0561s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000851.jpg 0.0562s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000852.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000853.jpg 0.0555s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000854.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000855.jpg 0.0559s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000856.jpg 0.0552s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000857.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000858.jpg 0.0556s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000859.jpg 0.0569s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000860.jpg 0.0557s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000861.jpg 0.0558s\n(128, 128)\ntesting /home/data/preprocessed/test-ard-june-sept-rgb-jpeg-split-geo-128/images/test-nebraska/COCO_test-nebraska_000000000862.jpg 0.0567s\nloading annotations into memory...\nDone (t=0.47s)\ncreating index...\nindex created!\nAP at IOU 0.50: 0.011\nAP at IOU 0.55: 0.011\nAP at IOU 0.60: 0.011\nAP at IOU 0.65: 0.011\nAP at IOU 0.70: 0.011\nAP at IOU 0.75: 0.011\nAP at IOU 0.80: 0.011\nAP at IOU 0.85: 0.011\nAP at IOU 0.90: 0.011\nAP at IOU 0.95: 0.000\nAP across IOUs 0.50-0.95: 0.010\nAR at IOU 0.50: 0.444\nAR at IOU 0.55: 0.444\nAR at IOU 0.60: 0.444\nAR at IOU 0.65: 0.389\nAR at IOU 0.70: 0.333\nAR at IOU 0.75: 0.333\nAR at IOU 0.80: 0.222\nAR at IOU 0.85: 0.222\nAR at IOU 0.90: 0.056\nAR at IOU 0.95: 0.000\nAR across IOUs 0.50-0.95: 0.289\nloading annotations into memory...\nDone (t=0.51s)\ncreating index...\nindex created!\nAP at IOU 0.50: 0.014\nAP at IOU 0.55: 0.014\nAP at IOU 0.60: 0.014\nAP at IOU 0.65: 0.014\nAP at IOU 0.70: 0.014\nAP at IOU 0.75: 0.014\nAP at IOU 0.80: 0.014\nAP at IOU 0.85: 0.014\nAP at IOU 0.90: 0.014\nAP at IOU 0.95: 0.000\nAP across IOUs 0.50-0.95: 0.013\nAR at IOU 0.50: 0.444\nAR at IOU 0.55: 0.444\nAR at IOU 0.60: 0.444\nAR at IOU 0.65: 0.444\nAR at IOU 0.70: 0.389\nAR at IOU 0.75: 0.278\nAR at IOU 0.80: 0.222\nAR at IOU 0.85: 0.111\nAR at IOU 0.90: 0.056\nAR at IOU 0.95: 0.000\nAR across IOUs 0.50-0.95: 0.283\nloading annotations into memory...\nDone (t=0.36s)\ncreating index...\nindex created!\nAP at IOU 0.50: 0.000\nAP at IOU 0.55: 0.000\nAP at IOU 0.60: 0.000\nAP at IOU 0.65: 0.000\nAP at IOU 0.70: 0.000\nAP at IOU 0.75: 0.000\nAP at IOU 0.80: 0.000\nAP at IOU 0.85: 0.000\nAP at IOU 0.90: 0.000\nAP at IOU 0.95: 0.000\nAP across IOUs 0.50-0.95: 0.000\nAR at IOU 0.50: 0.400\nAR at IOU 0.55: 0.400\nAR at IOU 0.60: 0.400\nAR at IOU 0.65: 0.400\nAR at IOU 0.70: 0.200\nAR at IOU 0.75: 0.000\nAR at IOU 0.80: 0.000\nAR at IOU 0.85: 0.000\nAR at IOU 0.90: 0.000\nAR at IOU 0.95: 0.000\nAR across IOUs 0.50-0.95: 0.180\nloading annotations into memory...\nDone (t=0.36s)\ncreating index...\nindex created!\nloading annotations into memory...\nDone (t=0.54s)\ncreating index...\nindex created!\nAP at IOU 0.50: 0.000\nAP at IOU 0.55: 0.000\nAP at IOU 0.60: 0.000\nAP at IOU 0.65: 0.000\nAP at IOU 0.70: 0.000\nAP at IOU 0.75: 0.000\nAP at IOU 0.80: 0.000\nAP at IOU 0.85: 0.000\nAP at IOU 0.90: 0.000\nAP at IOU 0.95: 0.000\nAP across IOUs 0.50-0.95: 0.000\nAR at IOU 0.50: 0.000\nAR at IOU 0.55: 0.000\nAR at IOU 0.60: 0.000\nAR at IOU 0.65: 0.000\nAR at IOU 0.70: 0.000\nAR at IOU 0.75: 0.000\nAR at IOU 0.80: 0.000\nAR at IOU 0.85: 0.000\nAR at IOU 0.90: 0.000\nAR at IOU 0.95: 0.000\nAR across IOUs 0.50-0.95: 0.000\nloading annotations into memory...\nDone (t=0.37s)\ncreating index...\nindex created!\nAP at IOU 0.50: 0.000\nAP at IOU 0.55: 0.000\nAP at IOU 0.60: 0.000\nAP at IOU 0.65: 0.000\nAP at IOU 0.70: 0.000\nAP at IOU 0.75: 0.000\nAP at IOU 0.80: 0.000\nAP at IOU 0.85: 0.000\nAP at IOU 0.90: 0.000\nAP at IOU 0.95: 0.000\nAP across IOUs 0.50-0.95: 0.000\nAR at IOU 0.50: 0.200\nAR at IOU 0.55: 0.200\nAR at IOU 0.60: 0.200\nAR at IOU 0.65: 0.200\nAR at IOU 0.70: 0.200\nAR at IOU 0.75: 0.200\nAR at IOU 0.80: 0.000\nAR at IOU 0.85: 0.000\nAR at IOU 0.90: 0.000\nAR at IOU 0.95: 0.000\nAR across IOUs 0.50-0.95: 0.120\nloading annotations into memory...\nDone (t=0.37s)\ncreating index...\nindex created!\nAP at IOU 0.50: 0.000\nAP at IOU 0.55: 0.000\nAP at IOU 0.60: 0.000\nAP at IOU 0.65: 0.000\nAP at IOU 0.70: 0.000\nAP at IOU 0.75: 0.000\nAP at IOU 0.80: 0.000\nAP at IOU 0.85: 0.000\nAP at IOU 0.90: 0.000\nAP at IOU 0.95: 0.000\nAP across IOUs 0.50-0.95: 0.000\nAR at IOU 0.50: 0.200\nAR at IOU 0.55: 0.200\nAR at IOU 0.60: 0.200\nAR at IOU 0.65: 0.200\nAR at IOU 0.70: 0.200\nAR at IOU 0.75: 0.000\nAR at IOU 0.80: 0.000\nAR at IOU 0.85: 0.000\nAR at IOU 0.90: 0.000\nAR at IOU 0.95: 0.000\nAR across IOUs 0.50-0.95: 0.100\nloading annotations into memory...\nDone (t=0.76s)\ncreating index...\nindex created!\nAP at IOU 0.50: 0.000\nAP at IOU 0.55: 0.000\nAP at IOU 0.60: 0.000\nAP at IOU 0.65: 0.000\nAP at IOU 0.70: 0.000\nAP at IOU 0.75: 0.000\nAP at IOU 0.80: 0.000\nAP at IOU 0.85: 0.000\nAP at IOU 0.90: 0.000\nAP at IOU 0.95: 0.000\nAP across IOUs 0.50-0.95: 0.000\nAR at IOU 0.50: 0.200\nAR at IOU 0.55: 0.200\nAR at IOU 0.60: 0.200\nAR at IOU 0.65: 0.200\nAR at IOU 0.70: 0.200\nAR at IOU 0.75: 0.200\nAR at IOU 0.80: 0.000\nAR at IOU 0.85: 0.000\nAR at IOU 0.90: 0.000\nAR at IOU 0.95: 0.000\nAR across IOUs 0.50-0.95: 0.120\nloading annotations into memory...\nDone (t=0.35s)\ncreating index...\nindex created!\nAP at IOU 0.50: 0.000\nAP at IOU 0.55: 0.000\nAP at IOU 0.60: 0.000\nAP at IOU 0.65: 0.000\nAP at IOU 0.70: 0.000\nAP at IOU 0.75: 0.000\nAP at IOU 0.80: 0.000\nAP at IOU 0.85: 0.000\nAP at IOU 0.90: 0.000\nAP at IOU 0.95: 0.000\nAP across IOUs 0.50-0.95: 0.000\nAR at IOU 0.50: 0.600\nAR at IOU 0.55: 0.600\nAR at IOU 0.60: 0.600\nAR at IOU 0.65: 0.600\nAR at IOU 0.70: 0.600\nAR at IOU 0.75: 0.600\nAR at IOU 0.80: 0.600\nAR at IOU 0.85: 0.200\nAR at IOU 0.90: 0.000\nAR at IOU 0.95: 0.000\nAR across IOUs 0.50-0.95: 0.440\nloading annotations into memory...\nDone (t=0.36s)\ncreating index...\nindex created!\nAP at IOU 0.50: 0.000\nAP at IOU 0.55: 0.000\nAP at IOU 0.60: 0.000\nAP at IOU 0.65: 0.000\nAP at IOU 0.70: 0.000\nAP at IOU 0.75: 0.000\nAP at IOU 0.80: 0.000\nAP at IOU 0.85: 0.000\nAP at IOU 0.90: 0.000\nAP at IOU 0.95: 0.000\nAP across IOUs 0.50-0.95: 0.000\nAR at IOU 0.50: 0.400\nAR at IOU 0.55: 0.400\nAR at IOU 0.60: 0.400\nAR at IOU 0.65: 0.400\nAR at IOU 0.70: 0.200\nAR at IOU 0.75: 0.200\nAR at IOU 0.80: 0.000\nAR at IOU 0.85: 0.000\nAR at IOU 0.90: 0.000\nAR at IOU 0.95: 0.000\nAR across IOUs 0.50-0.95: 0.200\nloading annotations into memory...\nDone (t=0.50s)\ncreating index...\nindex created!\nAP at IOU 0.50: 0.002\nAP at IOU 0.55: 0.002\nAP at IOU 0.60: 0.002\nAP at IOU 0.65: 0.002\nAP at IOU 0.70: 0.002\nAP at IOU 0.75: 0.002\nAP at IOU 0.80: 0.002\nAP at IOU 0.85: 0.002\nAP at IOU 0.90: 0.002\nAP at IOU 0.95: 0.000\nAP across IOUs 0.50-0.95: 0.002\nAR at IOU 0.50: 0.640\nAR at IOU 0.55: 0.640\nAR at IOU 0.60: 0.640\nAR at IOU 0.65: 0.640\nAR at IOU 0.70: 0.640\nAR at IOU 0.75: 0.560\nAR at IOU 0.80: 0.320\nAR at IOU 0.85: 0.200\nAR at IOU 0.90: 0.040\nAR at IOU 0.95: 0.000\nAR across IOUs 0.50-0.95: 0.432\nloading annotations into memory...\nDone (t=0.51s)\ncreating index...\nindex created!\nAP at IOU 0.50: 0.004\nAP at IOU 0.55: 0.004\nAP at IOU 0.60: 0.004\nAP at IOU 0.65: 0.004\nAP at IOU 0.70: 0.004\nAP at IOU 0.75: 0.004\nAP at IOU 0.80: 0.004\nAP at IOU 0.85: 0.004\nAP at IOU 0.90: 0.004\nAP at IOU 0.95: 0.000\nAP across IOUs 0.50-0.95: 0.004\nAR at IOU 0.50: 0.520\nAR at IOU 0.55: 0.520\nAR at IOU 0.60: 0.520\nAR at IOU 0.65: 0.520\nAR at IOU 0.70: 0.480\nAR at IOU 0.75: 0.360\nAR at IOU 0.80: 0.160\nAR at IOU 0.85: 0.120\nAR at IOU 0.90: 0.040\nAR at IOU 0.95: 0.000\nAR across IOUs 0.50-0.95: 0.324\nloading annotations into memory...\nDone (t=0.36s)\ncreating index...\nindex created!\nAP at IOU 0.50: 0.000\nAP at IOU 0.55: 0.000\nAP at IOU 0.60: 0.000\nAP at IOU 0.65: 0.000\nAP at IOU 0.70: 0.000\nAP at IOU 0.75: 0.000\nAP at IOU 0.80: 0.000\nAP at IOU 0.85: 0.000\nAP at IOU 0.90: 0.000\nAP at IOU 0.95: 0.000\nAP across IOUs 0.50-0.95: 0.000\nAR at IOU 0.50: 0.760\nAR at IOU 0.55: 0.720\nAR at IOU 0.60: 0.680\nAR at IOU 0.65: 0.640\nAR at IOU 0.70: 0.600\nAR at IOU 0.75: 0.560\nAR at IOU 0.80: 0.400\nAR at IOU 0.85: 0.120\nAR at IOU 0.90: 0.000\nAR at IOU 0.95: 0.000\nAR across IOUs 0.50-0.95: 0.448\nloading annotations into memory...\nDone (t=0.37s)\ncreating index...\nindex created!\nAP at IOU 0.50: 0.000\nAP at IOU 0.55: 0.000\nAP at IOU 0.60: 0.000\nAP at IOU 0.65: 0.000\nAP at IOU 0.70: 0.000\nAP at IOU 0.75: 0.000\nAP at IOU 0.80: 0.000\nAP at IOU 0.85: 0.000\nAP at IOU 0.90: 0.000\nAP at IOU 0.95: 0.000\nAP across IOUs 0.50-0.95: 0.000\nAR at IOU 0.50: 0.818\nAR at IOU 0.55: 0.818\nAR at IOU 0.60: 0.818\nAR at IOU 0.65: 0.727\nAR at IOU 0.70: 0.727\nAR at IOU 0.75: 0.545\nAR at IOU 0.80: 0.364\nAR at IOU 0.85: 0.000\nAR at IOU 0.90: 0.000\nAR at IOU 0.95: 0.000\nAR across IOUs 0.50-0.95: 0.482\nloading annotations into memory...\nDone (t=0.53s)\ncreating index...\nindex created!\nAP at IOU 0.50: 0.000\nAP at IOU 0.55: 0.000\nAP at IOU 0.60: 0.000\nAP at IOU 0.65: 0.000\nAP at IOU 0.70: 0.000\nAP at IOU 0.75: 0.000\nAP at IOU 0.80: 0.000\nAP at IOU 0.85: 0.000\nAP at IOU 0.90: 0.000\nAP at IOU 0.95: 0.000\nAP across IOUs 0.50-0.95: 0.000\nAR at IOU 0.50: 0.545\nAR at IOU 0.55: 0.545\nAR at IOU 0.60: 0.545\nAR at IOU 0.65: 0.545\nAR at IOU 0.70: 0.455\nAR at IOU 0.75: 0.273\nAR at IOU 0.80: 0.273\nAR at IOU 0.85: 0.091\nAR at IOU 0.90: 0.000\nAR at IOU 0.95: 0.000\nAR across IOUs 0.50-0.95: 0.327\nloading annotations into memory...\nDone (t=0.36s)\ncreating index...\nindex created!\nAP at IOU 0.50: 0.000\nAP at IOU 0.55: 0.000\nAP at IOU 0.60: 0.000\nAP at IOU 0.65: 0.000\nAP at IOU 0.70: 0.000\nAP at IOU 0.75: 0.000\nAP at IOU 0.80: 0.000\nAP at IOU 0.85: 0.000\nAP at IOU 0.90: 0.000\nAP at IOU 0.95: 0.000\nAP across IOUs 0.50-0.95: 0.000\nAR at IOU 0.50: 0.909\nAR at IOU 0.55: 0.909\nAR at IOU 0.60: 0.909\nAR at IOU 0.65: 0.909\nAR at IOU 0.70: 0.636\nAR at IOU 0.75: 0.455\nAR at IOU 0.80: 0.364\nAR at IOU 0.85: 0.273\nAR at IOU 0.90: 0.000\nAR at IOU 0.95: 0.000\nAR across IOUs 0.50-0.95: 0.536\nloading annotations into memory...\nDone (t=0.37s)\ncreating index...\nindex created!\nAP at IOU 0.50: 0.000\nAP at IOU 0.55: 0.000\nAP at IOU 0.60: 0.000\nAP at IOU 0.65: 0.000\nAP at IOU 0.70: 0.000\nAP at IOU 0.75: 0.000\nAP at IOU 0.80: 0.000\nAP at IOU 0.85: 0.000\nAP at IOU 0.90: 0.000\nAP at IOU 0.95: 0.000\nAP across IOUs 0.50-0.95: 0.000\nAR at IOU 0.50: 0.750\nAR at IOU 0.55: 0.750\nAR at IOU 0.60: 0.750\nAR at IOU 0.65: 0.750\nAR at IOU 0.70: 0.750\nAR at IOU 0.75: 0.500\nAR at IOU 0.80: 0.000\nAR at IOU 0.85: 0.000\nAR at IOU 0.90: 0.000\nAR at IOU 0.95: 0.000\nAR across IOUs 0.50-0.95: 0.425\nloading annotations into memory...\nDone (t=0.52s)\ncreating index...\nindex created!\nAP at IOU 0.50: 0.000\nAP at IOU 0.55: 0.000\nAP at IOU 0.60: 0.000\nAP at IOU 0.65: 0.000\nAP at IOU 0.70: 0.000\nAP at IOU 0.75: 0.000\nAP at IOU 0.80: 0.000\nAP at IOU 0.85: 0.000\nAP at IOU 0.90: 0.000\nAP at IOU 0.95: 0.000\nAP across IOUs 0.50-0.95: 0.000\nAR at IOU 0.50: 0.250\nAR at IOU 0.55: 0.250\nAR at IOU 0.60: 0.250\nAR at IOU 0.65: 0.250\nAR at IOU 0.70: 0.250\nAR at IOU 0.75: 0.250\nAR at IOU 0.80: 0.000\nAR at IOU 0.85: 0.000\nAR at IOU 0.90: 0.000\nAR at IOU 0.95: 0.000\nAR across IOUs 0.50-0.95: 0.150\nloading annotations into memory...\nDone (t=0.36s)\ncreating index...\nindex created!\nAP at IOU 0.50: 0.000\nAP at IOU 0.55: 0.000\nAP at IOU 0.60: 0.000\nAP at IOU 0.65: 0.000\nAP at IOU 0.70: 0.000\nAP at IOU 0.75: 0.000\nAP at IOU 0.80: 0.000\nAP at IOU 0.85: 0.000\nAP at IOU 0.90: 0.000\nAP at IOU 0.95: 0.000\nAP across IOUs 0.50-0.95: 0.000\nAR at IOU 0.50: 0.000\nAR at IOU 0.55: 0.000\nAR at IOU 0.60: 0.000\nAR at IOU 0.65: 0.000\nAR at IOU 0.70: 0.000\nAR at IOU 0.75: 0.000\nAR at IOU 0.80: 0.000\nAR at IOU 0.85: 0.000\nAR at IOU 0.90: 0.000\nAR at IOU 0.95: 0.000\nAR across IOUs 0.50-0.95: 0.000\nloading annotations into memory...\nDone (t=0.36s)\ncreating index...\nindex created!\nloading annotations into memory...\nDone (t=0.54s)\ncreating index...\nindex created!\nAP at IOU 0.50: 0.000\nAP at IOU 0.55: 0.000\nAP at IOU 0.60: 0.000\nAP at IOU 0.65: 0.000\nAP at IOU 0.70: 0.000\nAP at IOU 0.75: 0.000\nAP at IOU 0.80: 0.000\nAP at IOU 0.85: 0.000\nAP at IOU 0.90: 0.000\nAP at IOU 0.95: 0.000\nAP across IOUs 0.50-0.95: 0.000\nAR at IOU 0.50: 0.720\nAR at IOU 0.55: 0.680\nAR at IOU 0.60: 0.680\nAR at IOU 0.65: 0.680\nAR at IOU 0.70: 0.640\nAR at IOU 0.75: 0.480\nAR at IOU 0.80: 0.400\nAR at IOU 0.85: 0.200\nAR at IOU 0.90: 0.000\nAR at IOU 0.95: 0.000\nAR across IOUs 0.50-0.95: 0.448\n"
],
[
"np.mean(all_maps)",
"_____no_output_____"
],
[
"np.mean(all_mars)",
"_____no_output_____"
],
[
"import json\n\nwith open(\"/home/data/output/resnet_v1_101_coco_fcis_end2end_ohem-nebraska/val-nebraska/results/detections_val-nebraska_results.json\", \"r\") as f:\n data = json.load(f)",
"_____no_output_____"
],
[
"data[-1]",
"_____no_output_____"
],
[
"import pickle\nwith open('/home/data/output/resnet_v1_101_coco_fcis_end2end_ohem-nebraska/val-nebraska/COCO_val-nebraska_masks.pkl', 'rb') as f:\n data = pickle.load(f)",
"_____no_output_____"
],
[
"data[1][0].shape",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7432db90e2611171e8fdaccd28b5d026b017b83 | 38,499 | ipynb | Jupyter Notebook | homeworks/HW8/HW8-final.ipynb | xuwd11/cs207_Weidong_Xu | 00442657239c7a4040501bf7fa0f6697c731fe94 | [
"MIT"
] | null | null | null | homeworks/HW8/HW8-final.ipynb | xuwd11/cs207_Weidong_Xu | 00442657239c7a4040501bf7fa0f6697c731fe94 | [
"MIT"
] | null | null | null | homeworks/HW8/HW8-final.ipynb | xuwd11/cs207_Weidong_Xu | 00442657239c7a4040501bf7fa0f6697c731fe94 | [
"MIT"
] | null | null | null | 36.700667 | 499 | 0.448479 | [
[
[
"# Homework 8\n## Due Date: Tuesday, October 31st at 11:59 PM",
"_____no_output_____"
],
[
"# Problem 1: BST Traversal\nThis problem builds on Problem 1 of Homework 7 in which you wrote a binary search tree.\n\n### Part 1\n\nAs discussed in lecture, three different types to do a depth-first traversal are: preorder, inorder, and postorder. Here is a reference: [Tree Traversal](https://en.wikipedia.org/wiki/Tree_traversal#Depth-first_search).\n\nWrite an iterator class called `DFSTraversal` with the following specifications:\n\n* `__init__(self, tree, traversalType)`: Constructor takes a `BinaryTree` object and one of the enums from `DFSTraversalTypes`\n\n```python\nfrom enum import Enum\n\nclass DFSTraversalTypes(Enum):\n PREORDER = 1\n INORDER = 2\n POSTORDER = 3\n```\n\n* `changeTraversalType(self, traversalType)`: Change the traversal type\n* `__iter__(self)`: This is the initialization of an iterator\n* `__next__(self)`: This is called in the iterator for getting the next value\n\nHere's how you might use your `DFSTraversal` class:\n\n```python\ninput_array = [3, 9, 2, 11]\nbt = BinaryTree()\nfor val in input_array:\n bt.insert(val)\ntraversal = DFSTraversal(bt, DFSTraversalTypes.INORDER)\nfor val in traversal:\n print(val)\n2\n3\n9\n11\n```\n\n### Part 2\nPut your `BinaryTree` class (from homework 7) and your `DFSTraversal` class (from Part 1 of this homework) in a file titled `TreeTraversal.py`.",
"_____no_output_____"
]
],
[
[
"# Part 1\n\nfrom enum import Enum\n\nclass DFSTraversalTypes(Enum):\n PREORDER = 1\n INORDER = 2\n POSTORDER = 3\n \nclass Node:\n def __init__(self, value, depth=0, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n self.parent = None\n self.depth = depth\n \n def set_left(self, left):\n self.left = left\n if left is not None:\n left.parent = self\n left.set_depth(self.depth + 1)\n \n def set_right(self, right):\n self.right = right\n if right is not None:\n right.parent = self\n right.set_depth(self.depth + 1)\n \n def set_parent(self, parent):\n self.parent = parent\n \n def set_depth(self, depth):\n self.depth = depth\n \n def __eq__(self, other):\n return self.value == other.value\n \n def __lt__(self, other):\n return self.value < other.value\n \n def __le__(self, other):\n return self.value <= other.value\n \n def __ne__(self, other):\n return self.value != other.value\n \n def __gt__(self, other):\n return self.value > other.value\n \n def __ge__(self, other):\n return self.value >= other.value\n\nclass BinaryTree:\n def __init__(self):\n self.root = None\n \n def insert(self, val):\n new_node = Node(val)\n if self.root is None:\n self.root = new_node\n return\n curr = self.root\n while True:\n if curr < new_node:\n if curr.right is None:\n curr.set_right(new_node)\n return\n else:\n curr = curr.right\n continue\n elif new_node < curr:\n if curr.left is None:\n curr.set_left(new_node)\n return\n else:\n curr = curr.left\n continue\n elif new_node == curr:\n new_node.set_left(curr.left)\n curr.set_left(new_node)\n # update depth for the subtree\n self.getInOrder(new_node)\n return\n \n def remove(self, val):\n curr = self.root\n from_ = None\n while True:\n if val < curr.value:\n curr = curr.left\n from_ = 1\n if curr is None:\n return\n continue\n elif curr.value < val:\n curr = curr.right\n from_ = 2\n if curr is None:\n return\n continue\n else:\n if curr.left is None and curr.right is None:\n if from_ is None:\n self.root = None\n elif 1 == from_:\n curr.parent.set_left(None)\n else:\n curr.parent.set_right(None)\n elif curr.left is not None and curr.right is None:\n if from_ is None:\n self.root = curr.left\n self.root.set_parent(None)\n self.root.set_depth(0)\n # update depth for the subtree\n self.getInOrder(self.root)\n elif 1 == from_:\n curr.parent.set_left(curr.left)\n # update depth for the subtree\n self.getInOrder(curr.parent)\n else:\n curr.parent.set_right(curr.left)\n # update depth for the subtree\n self.getInOrder(curr.parent)\n elif curr.left is None and curr.right is not None:\n if from_ is None:\n self.root = curr.right\n self.root.set_parent(None)\n self.root.set_depth(0)\n # update depth for the subtree\n self.getInOrder(self.root)\n elif 1 == from_:\n curr.parent.set_left(curr.right)\n # update depth for the subtree\n self.getInOrder(curr.parent)\n else:\n curr.parent.set_right(curr.right)\n # update depth for the subtree\n self.getInOrder(curr.parent)\n else:\n # find the node (`r_node`) with the maximum value in the left subtree\n r_node = curr.left\n if r_node.right is None:\n is_left = True\n else:\n is_left = False\n while r_node.right is not None:\n r_node = r_node.right\n if is_left:\n r_node.set_right(curr.right)\n else:\n r_node.parent.set_right(r_node.left)\n r_node.set_left(curr.left)\n r_node.set_right(curr.right)\n if from_ is None:\n self.root = r_node\n self.root.set_parent(None)\n self.root.set_depth(0)\n # update depth for the subtree\n self.getInOrder(self.root)\n elif 1 == from_:\n curr.parent.set_left(r_node)\n # update depth for the subtree\n self.getInOrder(curr.parent)\n else:\n curr.parent.set_right(r_node)\n # update depth for the subtree\n self.getInOrder(curr.parent)\n break\n self.remove(val)\n \n def getValues(self, depth):\n inorder = self.getInOrder(self.root)\n values = []\n for node in inorder:\n if node.depth > depth:\n continue\n elif node.depth == depth:\n values.append(node.value)\n else:\n if node.left is None:\n values += [None] * 2**(depth-node.depth-1)\n if node.right is None:\n values += [None] * 2**(depth-node.depth-1)\n return values\n \n def getInOrder(self, node):\n # in-order traversal\n # update depth\n inorder = []\n def _inorder(node, inorder):\n if node is None:\n return\n elif node.parent is not None:\n node.set_depth(node.parent.depth + 1)\n _inorder(node.left, inorder)\n inorder.append(node)\n _inorder(node.right, inorder)\n _inorder(node, inorder)\n return inorder\n \n def maxDepth(self):\n inorder = self.getInOrder(self.root)\n return max([i.depth for i in inorder])\n \n def __len__(self):\n return self.maxDepth() + 1\n \nclass DFSTraversal:\n def __init__(self, tree, traversalType):\n self.tree = tree\n self.traversalType = traversalType\n self.changeTraversalType(traversalType)\n \n def changeTraversalType(self, traversalType):\n if not traversalType in DFSTraversalTypes:\n raise TypeError('Invalid traversalType')\n self.traversalType = traversalType\n if traversalType == DFSTraversalTypes.PREORDER:\n self.traversal = self.getPreOrder(self.tree.root)\n elif traversalType == DFSTraversalTypes.INORDER:\n self.traversal = self.getInOrder(self.tree.root)\n elif traversalType == DFSTraversalTypes.POSTORDER:\n self.traversal = self.getPostOrder(self.tree.root)\n \n def getPreOrder(self, node):\n traversal = []\n if node is None:\n return traversal\n s = []\n s.append(node)\n while len(s) > 0:\n node = s.pop()\n traversal.append(node.value)\n if node.right is not None:\n s.append(node.right)\n if node.left is not None:\n s.append(node.left)\n return traversal\n \n def getInOrder(self, node):\n traversal = []\n s = []\n while len(s) > 0 or node is not None:\n if node is not None:\n s.append(node)\n node = node.left\n else:\n node = s.pop()\n traversal.append(node.value)\n node = node.right\n return traversal\n \n def getPostOrder(self, node):\n traversal = []\n s = []\n last_node_visited = None\n while len(s) > 0 or node is not None:\n if node is not None:\n s.append(node)\n node = node.left\n else:\n peek_node = s[-1]\n if peek_node.right is not None and id(last_node_visited) != id(peek_node.right):\n node = peek_node.right\n else:\n traversal.append(peek_node.value)\n last_node_visited = s.pop()\n return traversal\n \n def __getitem__(self, i):\n return self.traversal[i]\n \n def __iter__(self):\n self.index = 0\n return self\n \n def __next__(self):\n try:\n elem = self.traversal[self.index]\n except IndexError:\n raise StopIteration()\n self.index += 1\n return elem",
"_____no_output_____"
],
[
"# example\n\ninput_array = [3, 9, 2, 11]\nbt = BinaryTree()\nfor val in input_array:\n bt.insert(val)\ntraversal = DFSTraversal(bt, DFSTraversalTypes.PREORDER)\nprint('pre-oder:')\nfor val in traversal:\n print(val)\nprint()\n\ntraversal.changeTraversalType(DFSTraversalTypes.INORDER)\nprint('in-order:')\nfor val in traversal:\n print(val)\nprint()\n\ntraversal.changeTraversalType(DFSTraversalTypes.POSTORDER)\nprint('post-order:')\nfor val in traversal:\n print(val)\nprint()\n\nprint('test iterator:')\nt_it = iter(traversal)\nwhile True:\n try:\n print(next(t_it))\n except StopIteration:\n del t_it\n break",
"pre-oder:\n3\n2\n9\n11\n\nin-order:\n2\n3\n9\n11\n\npost-order:\n2\n11\n9\n3\n\ntest iterator:\n2\n11\n9\n3\n"
],
[
"# Part 2",
"_____no_output_____"
],
[
"%%file TreeTraversal.py\n\nfrom enum import Enum\n\nclass DFSTraversalTypes(Enum):\n PREORDER = 1\n INORDER = 2\n POSTORDER = 3\n \nclass Node:\n def __init__(self, value, depth=0, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n self.parent = None\n self.depth = depth\n \n def set_left(self, left):\n self.left = left\n if left is not None:\n left.parent = self\n left.set_depth(self.depth + 1)\n \n def set_right(self, right):\n self.right = right\n if right is not None:\n right.parent = self\n right.set_depth(self.depth + 1)\n \n def set_parent(self, parent):\n self.parent = parent\n \n def set_depth(self, depth):\n self.depth = depth\n \n def __eq__(self, other):\n return self.value == other.value\n \n def __lt__(self, other):\n return self.value < other.value\n \n def __le__(self, other):\n return self.value <= other.value\n \n def __ne__(self, other):\n return self.value != other.value\n \n def __gt__(self, other):\n return self.value > other.value\n \n def __ge__(self, other):\n return self.value >= other.value\n\nclass BinaryTree:\n def __init__(self):\n self.root = None\n \n def insert(self, val):\n new_node = Node(val)\n if self.root is None:\n self.root = new_node\n return\n curr = self.root\n while True:\n if curr < new_node:\n if curr.right is None:\n curr.set_right(new_node)\n return\n else:\n curr = curr.right\n continue\n elif new_node < curr:\n if curr.left is None:\n curr.set_left(new_node)\n return\n else:\n curr = curr.left\n continue\n elif new_node == curr:\n new_node.set_left(curr.left)\n curr.set_left(new_node)\n # update depth for the subtree\n self.getInOrder(new_node)\n return\n \n def remove(self, val):\n curr = self.root\n from_ = None\n while True:\n if val < curr.value:\n curr = curr.left\n from_ = 1\n if curr is None:\n return\n continue\n elif curr.value < val:\n curr = curr.right\n from_ = 2\n if curr is None:\n return\n continue\n else:\n if curr.left is None and curr.right is None:\n if from_ is None:\n self.root = None\n elif 1 == from_:\n curr.parent.set_left(None)\n else:\n curr.parent.set_right(None)\n elif curr.left is not None and curr.right is None:\n if from_ is None:\n self.root = curr.left\n self.root.set_parent(None)\n self.root.set_depth(0)\n # update depth for the subtree\n self.getInOrder(self.root)\n elif 1 == from_:\n curr.parent.set_left(curr.left)\n # update depth for the subtree\n self.getInOrder(curr.parent)\n else:\n curr.parent.set_right(curr.left)\n # update depth for the subtree\n self.getInOrder(curr.parent)\n elif curr.left is None and curr.right is not None:\n if from_ is None:\n self.root = curr.right\n self.root.set_parent(None)\n self.root.set_depth(0)\n # update depth for the subtree\n self.getInOrder(self.root)\n elif 1 == from_:\n curr.parent.set_left(curr.right)\n # update depth for the subtree\n self.getInOrder(curr.parent)\n else:\n curr.parent.set_right(curr.right)\n # update depth for the subtree\n self.getInOrder(curr.parent)\n else:\n # find the node (`r_node`) with the maximum value in the left subtree\n r_node = curr.left\n if r_node.right is None:\n is_left = True\n else:\n is_left = False\n while r_node.right is not None:\n r_node = r_node.right\n if is_left:\n r_node.set_right(curr.right)\n else:\n r_node.parent.set_right(r_node.left)\n r_node.set_left(curr.left)\n r_node.set_right(curr.right)\n if from_ is None:\n self.root = r_node\n self.root.set_parent(None)\n self.root.set_depth(0)\n # update depth for the subtree\n self.getInOrder(self.root)\n elif 1 == from_:\n curr.parent.set_left(r_node)\n # update depth for the subtree\n self.getInOrder(curr.parent)\n else:\n curr.parent.set_right(r_node)\n # update depth for the subtree\n self.getInOrder(curr.parent)\n break\n self.remove(val)\n \n def getValues(self, depth):\n inorder = self.getInOrder(self.root)\n values = []\n for node in inorder:\n if node.depth > depth:\n continue\n elif node.depth == depth:\n values.append(node.value)\n else:\n if node.left is None:\n values += [None] * 2**(depth-node.depth-1)\n if node.right is None:\n values += [None] * 2**(depth-node.depth-1)\n return values\n \n def getInOrder(self, node):\n # in-order traversal\n # update depth\n inorder = []\n def _inorder(node, inorder):\n if node is None:\n return\n elif node.parent is not None:\n node.set_depth(node.parent.depth + 1)\n _inorder(node.left, inorder)\n inorder.append(node)\n _inorder(node.right, inorder)\n _inorder(node, inorder)\n return inorder\n \n def maxDepth(self):\n inorder = self.getInOrder(self.root)\n return max([i.depth for i in inorder])\n \n def __len__(self):\n return self.maxDepth() + 1\n \nclass DFSTraversal:\n def __init__(self, tree, traversalType):\n self.tree = tree\n self.traversalType = traversalType\n self.changeTraversalType(traversalType)\n \n def changeTraversalType(self, traversalType):\n if not traversalType in DFSTraversalTypes:\n raise TypeError('Invalid traversalType')\n self.traversalType = traversalType\n if traversalType == DFSTraversalTypes.PREORDER:\n self.traversal = self.getPreOrder(self.tree.root)\n elif traversalType == DFSTraversalTypes.INORDER:\n self.traversal = self.getInOrder(self.tree.root)\n elif traversalType == DFSTraversalTypes.POSTORDER:\n self.traversal = self.getPostOrder(self.tree.root)\n \n def getPreOrder(self, node):\n traversal = []\n if node is None:\n return traversal\n s = []\n s.append(node)\n while len(s) > 0:\n node = s.pop()\n traversal.append(node.value)\n if node.right is not None:\n s.append(node.right)\n if node.left is not None:\n s.append(node.left)\n return traversal\n \n def getInOrder(self, node):\n traversal = []\n s = []\n while len(s) > 0 or node is not None:\n if node is not None:\n s.append(node)\n node = node.left\n else:\n node = s.pop()\n traversal.append(node.value)\n node = node.right\n return traversal\n \n def getPostOrder(self, node):\n traversal = []\n s = []\n last_node_visited = None\n while len(s) > 0 or node is not None:\n if node is not None:\n s.append(node)\n node = node.left\n else:\n peek_node = s[-1]\n if peek_node.right is not None and id(last_node_visited) != id(peek_node.right):\n node = peek_node.right\n else:\n traversal.append(peek_node.value)\n last_node_visited = s.pop()\n return traversal\n \n def __getitem__(self, i):\n return self.traversal[i]\n \n def __iter__(self):\n self.index = 0\n return self\n \n def __next__(self):\n try:\n elem = self.traversal[self.index]\n except IndexError:\n raise StopIteration()\n self.index += 1\n return elem",
"Writing TreeTraversal.py\n"
]
],
[
[
"---",
"_____no_output_____"
],
[
"## Problem 2: Markov Chains\n\n[Markov Chains](https://en.wikipedia.org/wiki/Markov_chain) are widely used to model and predict discrete events. Underlying Markov chains are Markov processes which make the assumption that the outcome of a future event only depends on the event immediately preceeding it. In this exercise, we will be assuming that weather has Markov properties (e.g. today's weather is dependent only on yesterday's weather). We will use the Markov assumption to create a basic model for predicting weather.",
"_____no_output_____"
],
[
"To begin, let's categorize weather into 7 types: ['sunny', 'cloudy', 'rainy', 'snowy', 'windy', 'hailing'].\n\nIn the `weather.csv` file accompanying this homework, each row corresponds to one type of weather (in the order given above) and each column is the probability of one type of weather occurring the following day (also in the order given above).\n\nThe $ij$th element is the probability that the $j$th weather type occurs after the $i$th weather type. So for example, (1,2) is the probability a cloudy day occurs after a sunny day.\n\nTake a look at the data. Make sure you see how if the previous day was sunny, the following day will have a 0.4 probability of being sunny as well. If the previous day was raining (index $i = 3$), then the following day (index $j$) has a 0.05 probability of being windy ($j = 5$).",
"_____no_output_____"
],
[
"### Part 1: Parse the `.csv` file into a `Numpy` array",
"_____no_output_____"
]
],
[
[
"#Load CSV file -- hint: you can use np.genfromtxt()\n\nimport numpy as np\n\nweather_array = np.genfromtxt('weather.csv', delimiter=',')\n\nprint(weather_array)",
"[[ 0.4 0.3 0.1 0.05 0.1 0.05]\n [ 0.3 0.4 0.1 0.1 0.08 0.02]\n [ 0.2 0.3 0.35 0.05 0.05 0.05]\n [ 0.1 0.2 0.25 0.3 0.1 0.05]\n [ 0.15 0.2 0.1 0.15 0.3 0.1 ]\n [ 0.1 0.2 0.35 0.1 0.05 0.2 ]]\n"
]
],
[
[
"### Part 2: Create a class called `Markov` that has the following methods:\n\n* `load_data(array)`: loads the Numpy 2D array and stores it as a class variable.\n* `get_prob(previous_day, following_day)`: returns the probability of `following_day` weather given `previous_day` weather. \n\n**Note:** `previous_day` and `following_day` should be passed in string form (e.g. \"sunny\"), as opposed to an index (e.g. 0). \n\n\n",
"_____no_output_____"
]
],
[
[
"class Markov:\n def __init__(self):\n # implement here\n self.weather_types = ['sunny', 'cloudy', 'rainy', 'snowy', 'windy', 'hailing']\n \n def load_data(self, array):\n # implement here\n self.weather_array = array\n return self\n \n def get_prob(self, previous_day, following_day):\n # implement here -- returns a probability\n try:\n i = self.weather_types.index(previous_day)\n j = self.weather_types.index(previous_day)\n except ValueError:\n raise ValueError('Invalid weather.')\n try:\n return self.weather_array[i, j]\n except AttributeError:\n raise AttributeError('Data have not been loaded.')",
"_____no_output_____"
],
[
"# example\nm = Markov().load_data(weather_array)\nprint(m.get_prob('sunny', 'sunny'))",
"0.4\n"
]
],
[
[
"---",
"_____no_output_____"
],
[
"## Problem 3: Iterators",
"_____no_output_____"
],
[
"Iterators are a convenient way to walk along your Markov chain.\n\n#### Part 1: Using your `Markov` class from Problem 3, write `Markov` as an iterator by implementing the `__iter__()` and `__next__()` methods.\n\nRemember: \n* `__iter__()` should return the iterator object and should be implicitly called when the loop begins\n* The `__next()__` method should return the next value and is implicitly called at each step in the loop.\n\nEach 'next' step should be stochastic (i.e. randomly selected based on the relative probabilities of the following day weather types) and should return the next day's weather as a string (e.g. \"sunny\") rather than an index (e.g. 0).",
"_____no_output_____"
]
],
[
[
"class Markov:\n def __init__(self):\n # implement here\n self.weather_types = ['sunny', 'cloudy', 'rainy', 'snowy', 'windy', 'hailing']\n self.weather = None\n \n def load_data(self, array):\n # implement here\n self.weather_array = array\n return self\n \n def get_prob(self, previous_day, following_day):\n # implement here -- returns a probability\n try:\n i = self.weather_types.index(previous_day)\n j = self.weather_types.index(previous_day)\n except ValueError:\n raise ValueError('Invalid weather.')\n try:\n return self.weather_array[i, j]\n except AttributeError:\n raise AttributeError('Data have not been loaded.')\n \n def set_weather(self, weather):\n if not weather in self.weather_types:\n raise ValueError('Invalid weather.')\n else:\n self.weather = weather\n return self\n \n def __iter__(self):\n if self.weather is None:\n self.weather = np.random.choice(self.weather_types)\n return self\n \n def __next__(self):\n self.weather = np.random.choice(self.weather_types, \\\n p=self.weather_array[self.weather_types.index(self.weather)])\n return self.weather",
"_____no_output_____"
],
[
"# example\n\nm = Markov().load_data(weather_array).set_weather('cloudy')\nm_it = iter(m)\nfor i in range(5):\n print(next(m_it))",
"cloudy\nsunny\nwindy\nsnowy\nrainy\n"
]
],
[
[
"#### Part 2: We want to predict what weather will be like in a week for 5 different cities.\n\nNow that we have our `Markov` iterator, we can try to predict what the weather will be like in seven days from now.\n\nGiven each city's current weather in the dictionary `city_weather` (see below), simulate what the weather will be like in 7 days from now. Rather than just producing one prediction per city, simulate 100 such predictions per city and store the most commonly occuring prediction.\n\nIn your submission, print a dictionary `city_weather_predictions` that has each city as a key and the most commonly predicted weather as the corresponding value.\n\n**Note**: Don't worry if your values don't seem to make intuitive sense. We made up the weather probabilities.",
"_____no_output_____"
]
],
[
[
"city_weather = {\n 'New York': 'rainy',\n 'Chicago': 'snowy',\n 'Seattle': 'rainy',\n 'Boston': 'hailing',\n 'Miami': 'windy',\n 'Los Angeles': 'cloudy',\n 'San Fransisco': 'windy'\n}",
"_____no_output_____"
],
[
"from collections import Counter\n\ndef predict(weather, days=7):\n m_it = iter(Markov().load_data(weather_array).set_weather(weather))\n return [next(m_it) for _ in range(days)][-1]\n \ncity_weather_predictions = {city:Counter([predict(weather) for _ in range(100)]).most_common(1)[0][0] \\\n for city, weather in city_weather.items()}\n\nprint(city_weather_predictions)",
"{'New York': 'cloudy', 'Chicago': 'cloudy', 'Seattle': 'sunny', 'Boston': 'sunny', 'Miami': 'cloudy', 'Los Angeles': 'cloudy', 'San Fransisco': 'cloudy'}\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e74334a4e978bf54a446f82834d80ba7024cd954 | 2,558 | ipynb | Jupyter Notebook | aoc_2020/day04.ipynb | ckuenzi/AoC | 3dd243ec02b7f2dff42aa7c67e2dcddefb0fef64 | [
"Unlicense"
] | 2 | 2020-12-11T18:07:11.000Z | 2020-12-11T18:08:04.000Z | aoc_2020/day04.ipynb | ckuenzi/AoC | 3dd243ec02b7f2dff42aa7c67e2dcddefb0fef64 | [
"Unlicense"
] | null | null | null | aoc_2020/day04.ipynb | ckuenzi/AoC | 3dd243ec02b7f2dff42aa7c67e2dcddefb0fef64 | [
"Unlicense"
] | null | null | null | 28.422222 | 179 | 0.453088 | [
[
[
"input = open('inputs/day04.txt', 'r').read()",
"_____no_output_____"
],
[
"import re\n\npassports = input.split('\\n\\n')\nresult = 0\nvalid_passes = []\nfor pas in passports:\n pas = \" \".join(pas.split())\n values = {'byr': None, 'iyr': None, 'eyr': None, 'hgt': None, 'hcl': None, 'ecl': None, 'pid': None}\n for kv in pas.split(' '):\n (k,v) = tuple(kv.split(':'))\n values[k] = v\n if not None in values.values():\n valid_passes.append(values)\nprint(\"Part 1: \" + str(len(valid_passes)))\n\n\n#Part 2\ncount = 0\nfor pas in valid_passes:\n if not (int(pas['byr']) >= 1920 and int(pas['byr']) <=2002):\n continue\n if not (int(pas['iyr']) >= 2010 and int(pas['iyr']) <=2020):\n continue\n if not (int(pas['eyr']) >= 2020 and int(pas['eyr']) <=2030):\n continue\n reg = re.split('(\\d+)(.+)', pas['hgt'])\n height = int(reg[1])\n unit = reg[2]\n if not((unit == 'cm' and height >= 150 and height <= 193) or (unit == 'in' and height >= 59 and height <= 76)):\n continue\n if re.match(\"#[0-9a-f]{6}\", pas['hcl']) == None:\n continue\n if not(pas['ecl'] == 'amb' or pas['ecl'] == 'blu' or pas['ecl'] == 'brn' or pas['ecl'] == 'gry' or pas['ecl'] == 'grn' or pas['ecl'] == 'hzl' or pas['ecl'] == 'oth'):\n continue\n if re.match(\"[0-9]{9}\", pas['pid']) == None or len(pas['pid']) != 9:\n continue\n count = count + 1\nprint(\"Part 2: \" + str(count))",
"Part 1: 256\nPart 2: 198\n"
]
]
] | [
"code"
] | [
[
"code",
"code"
]
] |
e74336f1a59ed1f826425a1371c3e9136cb34375 | 4,201 | ipynb | Jupyter Notebook | ipynb/Germany-Sachsen-Anhalt-LK-Saalekreis.ipynb | RobertRosca/oscovida.github.io | d609949076e3f881e38ec674ecbf0887e9a2ec25 | [
"CC-BY-4.0"
] | null | null | null | ipynb/Germany-Sachsen-Anhalt-LK-Saalekreis.ipynb | RobertRosca/oscovida.github.io | d609949076e3f881e38ec674ecbf0887e9a2ec25 | [
"CC-BY-4.0"
] | null | null | null | ipynb/Germany-Sachsen-Anhalt-LK-Saalekreis.ipynb | RobertRosca/oscovida.github.io | d609949076e3f881e38ec674ecbf0887e9a2ec25 | [
"CC-BY-4.0"
] | null | null | null | 29.377622 | 190 | 0.518924 | [
[
[
"# Germany: LK Saalekreis (Sachsen-Anhalt)\n\n* Homepage of project: https://oscovida.github.io\n* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Sachsen-Anhalt-LK-Saalekreis.ipynb)",
"_____no_output_____"
]
],
[
[
"import datetime\nimport time\n\nstart = datetime.datetime.now()\nprint(f\"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}\")",
"_____no_output_____"
],
[
"%config InlineBackend.figure_formats = ['svg']\nfrom oscovida import *",
"_____no_output_____"
],
[
"overview(country=\"Germany\", subregion=\"LK Saalekreis\");",
"_____no_output_____"
],
[
"# load the data\ncases, deaths, region_label = germany_get_region(landkreis=\"LK Saalekreis\")\n\n# compose into one table\ntable = compose_dataframe_summary(cases, deaths)\n\n# show tables with up to 500 rows\npd.set_option(\"max_rows\", 500)\n\n# display the table\ntable",
"_____no_output_____"
]
],
[
[
"# Explore the data in your web browser\n\n- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Sachsen-Anhalt-LK-Saalekreis.ipynb)\n- and wait (~1 to 2 minutes)\n- Then press SHIFT+RETURN to advance code cell to code cell\n- See http://jupyter.org for more details on how to use Jupyter Notebook",
"_____no_output_____"
],
[
"# Acknowledgements:\n\n- Johns Hopkins University provides data for countries\n- Robert Koch Institute provides data for within Germany\n- Open source and scientific computing community for the data tools\n- Github for hosting repository and html files\n- Project Jupyter for the Notebook and binder service\n- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))\n\n--------------------",
"_____no_output_____"
]
],
[
[
"print(f\"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and \"\n f\"deaths at {fetch_deaths_last_execution()}.\")",
"_____no_output_____"
],
[
"# to force a fresh download of data, run \"clear_cache()\"",
"_____no_output_____"
],
[
"print(f\"Notebook execution took: {datetime.datetime.now()-start}\")\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7435f2a26190a4f888e811313d2e021cfc390eb | 848,453 | ipynb | Jupyter Notebook | Deep-Learning-Notebooks/notebooks/fatchordWaveRNN.ipynb | deepraj1729/Resources-and-Guides | 0ca030d2f6a5d3533101b1ad7a0f329cc6538c52 | [
"MIT"
] | 2 | 2020-09-06T15:51:13.000Z | 2020-10-04T23:29:19.000Z | Deep-Learning-Notebooks/notebooks/fatchordWaveRNN.ipynb | deepraj1729/Resources-and-Guides | 0ca030d2f6a5d3533101b1ad7a0f329cc6538c52 | [
"MIT"
] | null | null | null | Deep-Learning-Notebooks/notebooks/fatchordWaveRNN.ipynb | deepraj1729/Resources-and-Guides | 0ca030d2f6a5d3533101b1ad7a0f329cc6538c52 | [
"MIT"
] | null | null | null | 5,080.556886 | 843,515 | 0.972191 | [
[
[
"# Text-to-Speech with Tacotron+WaveRNN\n\nThis is an English female voice TTS demo using an open source project [fatchord/WaveRNN](https://github.com/fatchord/WaveRNN).\n\nFor other deep-learning Colab notebooks, visit [tugstugi/dl-colab-notebooks](https://github.com/tugstugi/dl-colab-notebooks).\n\n## Install fatchord/WaveRNN",
"_____no_output_____"
]
],
[
[
"import os\nimport time\nfrom os.path import exists, join, basename, splitext\n\ngit_repo_url = 'https://github.com/fatchord/WaveRNN.git'\nproject_name = splitext(basename(git_repo_url))[0]\nif not exists(project_name):\n !git clone -q {git_repo_url}\n !cd {project_name} && pip install -q -r requirements.txt\n\nimport sys \nsys.path.append(project_name)\n \nfrom IPython.display import Audio, display",
"_____no_output_____"
]
],
[
[
"## Sentence to synthesize",
"_____no_output_____"
]
],
[
[
"SENTENCE = 'Supporters say they expect the law to be blocked in court but hope that the appeals process will bring it before the Supreme Court.'",
"_____no_output_____"
]
],
[
[
"## Synthetize",
"_____no_output_____"
]
],
[
[
"!rm -rf {project_name}/quick_start/*.wav\n!cd {project_name} && python quick_start.py --input_text \"{SENTENCE}\"\n\nwavs = !ls {project_name}/quick_start/*.wav\ndisplay(Audio(wavs[0], rate=22050))",
"\nInitialising WaveRNN Model...\n\nTrainable Parameters: 4.234M\n\nLoading Weights: \"quick_start/voc_weights/latest_weights.pyt\"\n\n\nInitialising Tacotron Model...\n\nTrainable Parameters: 11.088M\n\nLoading Weights: \"quick_start/tts_weights/latest_weights.pyt\"\n\n+---------+---------------+-----------------+----------------+-----------------+\n| WaveRNN | Tacotron(r=2) | Generation Mode | Target Samples | Overlap Samples |\n+---------+---------------+-----------------+----------------+-----------------+\n| 797k | 180k | Batched | 11000 | 550 |\n+---------+---------------+-----------------+----------------+-----------------+\n \n\n| Generating 1/1\n| ████████████████ 168000/169400 | Batch Size: 14 | Gen Rate: 13.4kHz | \n\nDone.\n\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7435f9e34b7f346321ea91635f49f6fcf7fee9c | 11,428 | ipynb | Jupyter Notebook | chan/03_Unfinished_Centre_Expand_Handler_Alpha_31.ipynb | fs714/concurrency-example | fbff041804b9c46fb7f21ebbae22acff745c7b0c | [
"Apache-2.0"
] | null | null | null | chan/03_Unfinished_Centre_Expand_Handler_Alpha_31.ipynb | fs714/concurrency-example | fbff041804b9c46fb7f21ebbae22acff745c7b0c | [
"Apache-2.0"
] | null | null | null | chan/03_Unfinished_Centre_Expand_Handler_Alpha_31.ipynb | fs714/concurrency-example | fbff041804b9c46fb7f21ebbae22acff745c7b0c | [
"Apache-2.0"
] | 1 | 2020-03-10T15:47:05.000Z | 2020-03-10T15:47:05.000Z | 37.592105 | 104 | 0.433059 | [
[
[
"# Unfinished Down Centre Handler\n%matplotlib inline\n\nimport matplotlib.pyplot as plt\nfrom IPython.core.debugger import Pdb; pdb = Pdb()\n\n\ndef plot_trend(y):\n x = list(range(0, len(y)))\n gg = [min(y[1], y[3])] * len(y)\n dd = [max(y[2], y[4])] * len(y)\n\n plt.figure(figsize=(len(y),4))\n plt.grid()\n plt.plot(x, y)\n plt.plot(x, gg, '--')\n plt.plot(x, dd, '--')\n sx, sy = unfinished_dc_expand_spliter(y)\n plt.plot(sx, sy)\n cs, cgg, cdd = get_last_centre_interval(sx, sy, y)\n plt.plot(cs, cgg, '--')\n plt.plot(cs, cdd, '--')\n plt.show()\n \ndef get_last_centre_interval(sx, sy, ts):\n if len(sx) == 3 or len(sx) == 5 or len(sx) == 6:\n return [], [], []\n elif len(sx) == 4:\n s0 = sx[2]\n s1 = sy[2]\n e0 = sx[3]\n e1 = sy[3]\n if e0 - s0 == 3:\n return [s0 + 1, e0 - 1], [ts[s0+1]] * 2, [ts[e0-1]] * 2\n elif e0 - s0 >= 5:\n gg = min(ts[s0+1], ts[s0+3])\n dd = max(ts[s0+2], ts[s0+4])\n num = e0 - s0 - 1\n return list(range(s0 + 1, e0)), [gg] * num, [dd] * num\n else:\n raise RuntimeError('Not Handled')\n else:\n raise RuntimeError('Not Handled')\n\ndef get_dc_high_order(ts):\n ths_start_offset = 3\n ths_end_offset = ts[-1][0] - 6\n \n ths = ts[ths_start_offset : ths_end_offset + 1][::2]\n \n ths_ordered = sorted(ths, key=lambda x: x[1], reverse=True)\n return ths_ordered\n\ndef get_dc_low_order(ts, th):\n tl_start_offset = th[0] + 3\n tl_end_offset = ts[-1][0] - 3\n \n tls = ts[tl_start_offset : tl_end_offset + 1][::2]\n \n tls_ordered = sorted(tls, key=lambda x: x[1])\n return tls_ordered\n\ndef finished_dc_expand_spliter(t_list):\n ts = list(enumerate(t_list))\n th_ordered = get_dc_high_order(ts)\n tl_ordered = get_dc_low_order(ts, th_ordered[0])\n \n th = th_ordered[0]\n tl = tl_ordered[0]\n \n found = False\n for tl in tl_ordered:\n if ts[-1][0] - tl[0] <= 5:\n sx = [ts[0][0], th[0], tl[0], ts[-1][0]]\n sy = [ts[0][1], th[1], tl[1], ts[-1][1]]\n found = True\n break\n else:\n i = tl[0]\n dd = max(ts[i+2][1], ts[i+4][1])\n found_centre = True\n for zn in ts[(i+5):-2][::2]:\n if zn[1] < dd:\n found_centre = False\n break\n if found_centre:\n sx = [ts[0][0], th[0], tl[0], ts[-1][0]]\n sy = [ts[0][1], th[1], tl[1], ts[-1][1]]\n found = True\n break\n \n if found:\n if sx[1] - sx[0] >= 11:\n t_slice = t_list[sx[0] : sx[1] + 1]\n sxx, syy = finished_dc_expand_spliter(t_slice)\n sxx = [x + sx[0] for x in sxx]\n sx = sxx + sx[2:]\n sy = [t_list[x] for x in sx]\n elif sx[3] - sx[2] >= 11:\n t_slice = t_list[sx[2] : sx[3] + 1]\n sxx, syy = finished_dc_expand_spliter(t_slice)\n sxx = [x + sx[2] for x in sxx]\n sx = sx[:2] + sxx\n sy = [t_list[x] for x in sx]\n return sx, sy\n else: \n raise RuntimeError('Not Handled')\n \ndef unfinished_dc_expand_spliter(t_list):\n ts = list(enumerate(t_list))\n \n found_down = False\n for i in range(ts[-2][0], ts[4][0] - 1, -2):\n if ts[i][1] < ts[i-2][1] and ts[i-1][1] < ts[i-3][1]:\n found_down = True\n continue\n else:\n break\n\n if found_down:\n th = ts[i-1]\n if th[0] >= 11:\n t_slice = t_list[:th[0]+1]\n sx, sy = finished_dc_expand_spliter(t_slice)\n sx.append(ts[-2][0])\n sy.append(ts[-2][1])\n return sx, sy\n else:\n return [ts[0][0], th[0], ts[-2][0]], [ts[0][1], th[1], ts[-2][1]]\n else:\n sx, sy = finished_dc_expand_spliter(t_list)\n if sy[-1] > sy[-2]:\n return sx, sy\n else:\n th_start_offset = sx[-4] + 3\n th_end_offset = sx[-1] - 4\n\n ths = ts[th_start_offset : th_end_offset + 1][::2]\n\n ths_max = max(ths, key=lambda x: x[1])\n sx = sx[0: -3] + [ths_max[0], ts[-2][0]]\n sy = [t_list[x] for x in sx]\n return sx, sy\n \n\nys = []\n\nys.append(([0, 100, 60, 120, 70, 110, 25, 155, 35, 145, 45, 150], []))\nys.append(([0, 100, 60, 120, 70, 110, 25, 155, 35, 145, 45, 135], []))\nys.append(([0, 100, 60, 120, 70, 110, 40, 130, 55, 140, 65, 75], []))\nys.append(([0, 100, 60, 120, 70, 110, 40, 130, 80, 140, 65, 75], []))\nys.append(([0, 100, 60, 120, 70, 110, 25, 155, 35, 145, 45, 115, 55, 105], []))\n\nys.append(([0, 100, 60, 120, 70, 110, 35, 75, 25, 145, 65, 85], []))\nys.append(([0, 100, 50, 120, 60, 90, 30, 110, 80, 130, 45, 70, 35, 105], []))\nys.append(([0, 100, 50, 130, 60, 90, 30, 110, 80, 120, 45, 70, 35, 105], []))\nys.append(([0, 100, 50, 120, 60, 90, 30, 70, 40, 110, 80, 130, 45, 70, 35, 105], []))\nys.append(([0, 100, 50, 130, 60, 90, 30, 70, 40, 110, 80, 120, 45, 70, 35, 105], []))\n\nys.append(([0, 100, 60, 120, 70, 110, 25, 155, 55, 145, 45, 150], []))\nys.append(([0, 100, 60, 120, 70, 110, 40, 130, 80, 140, 65, 75], []))\nys.append(([0, 100, 60, 120, 70, 110, 40, 140, 80, 130, 65, 75], []))\n\nys.append(([0, 110, 70, 100, 60, 80, 61, 94, 77, 98, 70, 136, 68, 90, 66, 82, 73, 110], []))\nys.append(([0, 110, 70, 100, 60, 100, 78, 90, 53, 109, 56, 141, 99, 106, 89, 99, 93, 141], []))\nys.append(([0, 100, 70, 110, 60, 100, 53, 90, 78, 109, 56, 109, 99, 106, 89, 99, 93, 141], []))\n\nys.append(([0, 100, 60, 110, 70, 120, 80, 130, 90, 140, 50, 75], []))\n\nfor y in ys:\n plot_trend(y[0])\n",
"_____no_output_____"
],
[
"# Random Centre Generator\n%matplotlib inline\n\nimport random\nimport matplotlib.pyplot as plt\n\ny_max = 150\ny_min = 50\nnum_max = 18\n\ndef generate_next(y_list, direction):\n if direction == 1:\n y_list.append(random.randint(max(y_list[2], y_list[4], y_list[-1]) + 1, y_max))\n elif direction == -1:\n y_list.append(random.randint(y_min, min(y_list[1], y_list[3], y_list[-1]) - 1))\n\n# y_base = [0, 100, 60, 110, 70]\ny_base = [0, 110, 70, 100, 60]\n# y_base = [0, 100, 60, 90, 70]\n# y_base = [0, 90, 70, 100, 60]\n\ndirection = 1\nfor i in range(5, num_max):\n generate_next(y_base, direction)\n direction = 0 - direction\n\ny_base[-1] = 110\nprint(y_base)\nfor i in range(11, len(y_base), 2):\n y = y_base[:(i + 1)]\n plot_trend(y)\n ",
"_____no_output_____"
],
[
"%matplotlib inline\n\nimport matplotlib.pyplot as plt\n\n\nys = []\n\n# Group 1\n# ys.append([0, 100, 60, 110, 70, 99, 66, 121, 91, 141, 57, 111, 69, 111])\n# ys.append([0, 100, 60, 110, 70, 105, 58, 102, 74, 137, 87, 142, 55, 128])\n# ys.append([0, 100, 60, 110, 70, 115, 75, 120, 80, 125, 85, 130, 90, 135])\n# ys.append([0, 100, 60, 110, 70, 120, 80, 130, 90, 140, 50, 75])\n# ys.append([0, 100, 60, 110, 70, 114, 52, 75, 54, 77, 65, 100, 66, 87, 70, 116])\n# ys.append([0, 100, 60, 110, 70, 72, 61, 143, 77, 91, 82, 100, 83, 124, 89, 99, 89, 105])\n\n# Group 2\n# ys.append([0, 110, 70, 100, 60, 142, 51, 93, 78, 109, 60, 116, 50, 106])\n# ys.append([0, 110, 70, 100, 60, 88, 70, 128, 82, 125, 72, 80, 63, 119])\n# ys.append([0, 110, 70, 100, 60, 74, 66, 86, 57, 143, 50, 95, 70, 91])\n# ys.append([0, 110, 70, 100, 60, 77, 73, 122, 96, 116, 82, 124, 69, 129])\n# ys.append([0, 110, 70, 100, 60, 147, 53, 120, 77, 103, 56, 76, 74, 92])\n# ys.append([0, 110, 70, 100, 60, 95, 55, 90, 50, 85, 45, 80, 40, 75])\n# ys.append([0, 110, 70, 100, 60, 100, 78, 90, 53, 109, 56, 141, 99, 106, 89, 99, 93, 141])\n\n# Group 3\n# ys.append([0, 100, 60, 90, 70, 107, 55, 123, 79, 112, 64, 85, 74, 110])\n# ys.append([0, 100, 60, 90, 70, 77, 55, 107, 76, 141, 87, 91, 60, 83])\n# ys.append([0, 100, 60, 90, 70, 114, 67, 93, 58, 134, 53, 138, 64, 107])\n# ys.append([0, 100, 60, 90, 70, 77, 66, 84, 79, 108, 87, 107, 72, 89])\n# ys.append([0, 100, 60, 90, 70, 88, 72, 86, 74, 84, 76, 82, 74, 80])\n\n# Group 4\n# ys.append([0, 90, 70, 100, 60, 131, 57, 144, 85, 109, 82, 124, 87, 101])\n# ys.append([0, 90, 70, 100, 60, 150, 56, 112, 63, 95, 84, 118, 58, 110])\n# ys.append([0, 90, 70, 100, 60, 145, 64, 112, 69, 86, 71, 119, 54, 95])\n# ys.append([0, 90, 70, 100, 60, 105, 55, 110, 50, 115, 45, 120, 40, 125])\n\nfor y_base in ys:\n for i in range(11, len(y_base), 2):\n y = y_base[:(i + 1)]\n plot_trend(y)\n ",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
e7436b9939bf9b54ee935a0a0beae2eadbf67092 | 7,615 | ipynb | Jupyter Notebook | Polarisation and Light.ipynb | refmitchell/pathfinder | 34fbf71985a662693fafd6c13b9724bc45cddca7 | [
"MIT"
] | 1 | 2020-03-03T13:55:17.000Z | 2020-03-03T13:55:17.000Z | Polarisation and Light.ipynb | refmitchell/pathfinder | 34fbf71985a662693fafd6c13b9724bc45cddca7 | [
"MIT"
] | null | null | null | Polarisation and Light.ipynb | refmitchell/pathfinder | 34fbf71985a662693fafd6c13b9724bc45cddca7 | [
"MIT"
] | null | null | null | 61.41129 | 702 | 0.70742 | [
[
[
"# Pathfinder Application (Polarisation and Light)\n\nAuthor: R. Mitchell (email: [email protected])\n\n### Feedback\nQuestions, comments, suggestions, or requests for functionality are welcome and can be sent to the email address above. This tool will continue development on an 'as-required' basis (i.e. I will add features when I need them, or when somebody asks me for them)!\n\n### What is this?\nThis is a streamlined version of the Pathfinder application (similar to the Wind and Light notebook) which provides a single light cue and a single polarisation cue. This version does **not** require using any use of configuration files and can be configured entirely within the notebook. If you want to create more general/complex scenarios, please see the 'General' notebook.\n\nThe software mimics the tests from the sky-compass literature whereby the beetle is placed in the centre of an arena, presented with cues, and allowed to roll to the edge of an arena; the cues may then be changed for the second roll and the absolute change in bearing recorded.\n\n### Polarisation as a cue\nSkylight polarisation is often treated as being mirrored or ambiguous about 180$^\\circ$; the synthetic polarisation filters used in behavioural experiments are ambiguous, however the real skylight pattern may in fact give information about the full 360$^\\circ$. Recent work by *Gkanais et al.* has shown that the bimodal results often observed are likely a cause of methodology rather than ambiguity in the natural polarisation pattern. Given that most dung beetle orientation results discussing polarisation as a cue use polarising filters the cue included in the Pathfinder application is designed to mimic the filter rather than the natural skylight pattern (it is treated as ambiguous). \n\n#### Display\nThe polarisation cue is displayed as a bi-directional magenta arrow at the top of the world. The line indicates the axis of the polarsation filter (i.e. the axis of ambiguity).\n\n\n\n#### Interpretation\nIt is useful to understand how the polarisation cue is integrated in different strategies. The rest of the cues are implemented simply as vectors which can be combined but due to its bidirectional ambiguity a polarisation filter is slightly more complex.\n\n**avg:** When takeing the average of all available cues the other cues are used to disambiguate the polarisation cue. We compare the average cue with either of the two possible directions and take the one which gives the strongest response.\n\n**wta:** Winner Take All relies on finding a single strongest cue. If the polarisation cue as the greatest (raw) strength then we pick one of the two possible directions uniform randomly. \n\n**proj_wta:** If the projected polarisation cue is strongest of the projected cues then again we pick one of the two possible directions uniform randomly to use as the reference.\n\n### Usage\nStart by running the code cell below, this will initialise the software and generate a series of graphical controls. You can use the controls to configure the cues. Once you've configured the software, click 'Run Interact' to generate a plot. If you want to change the scenario, simply modify the configuration using the graphical controls and click 'Run Interact' again. A more detailed control reference can be found below but they should be largely self explanatory.",
"_____no_output_____"
]
],
[
[
"# Run this cell!\n%matplotlib notebook\nfrom pathfinder.runnable.pol_and_light import generate_controls\nfrom IPython.display import display\ncontrols = generate_controls()\ndisplay(controls)",
"_____no_output_____"
]
],
[
[
"### Control reference\n#### Switches/checkboxes\nThree checkboxes are provided:\n* Show individual cues: when enabled this will show the directional reference given by each individual cue (this can be a nice way of visualising the relative weight of each cue).\n* Enable/disable the legend: the legend positioning is not consistent as the plots are resized so they can occassionally obscure the plot. Checking this box will turn the legend on. I find the legends useful for reference but annoying once you know what the plot is showing.\n* Show sensory vectors: sensory vectors are the true geometric representation of the cues which indicate how the simulated beetle perceives the cues. These can be visualised by enabling this setting. For more information please see the General notebook (section \"How does the beetle get its bearing?\").\n\n#### Generic settings\n* Combination strategy: this defines the method used to combine the cues; currently implemented are:\n * avg: simply take the average; and,\n * wta: winner take all, the strongest cue wins (no decision can be made if there are multiple strongest).\n * proj_wta: projected winner take all. The cue with the strongest sensory vector after projection into the ground plane. This accounts for a perceived strength difference of lights at different elevations.\n \n There are any number of strategies that could be implemented, these are simply the ones that I added during development. Again, see \"How does the beetle get its bearing?\" in the General notebook for more information.\n \n* Confidence threshold: this represents the minimum magnitude required from the combined cue vector. If the magnitude of the combined cue is less than this threshold we can assume that the beetle would not actually have enough information to orient itself and discard the change in bearing. The default value was chosen based on the contrast experiments from \"Stellar performance: Mechanisms underlying milky way orientation in dung beetles\" (*Foster et al. 2017*), see the Example notebook. \n\n#### Cue setings\nFinally we provide individual cue configuration for each roll of the beetle. Each roll has a single light cue and some wind. For the light we can alter the strength, elevation, and azimuth. For the wind we can alter its strength and direction.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7437087d85fc60b7da674542eeefb954880db65 | 122,695 | ipynb | Jupyter Notebook | DataPreparation/DataPreparation.ipynb | ranjith283/Data-analysis-by-python | 94c89074ad56f749f37a719a46fb1cc21366d5f1 | [
"MIT"
] | 126 | 2018-04-05T23:45:00.000Z | 2022-03-30T21:37:02.000Z | DataPreparation/DataPreparation.ipynb | ranjith283/Data-analysis-by-python | 94c89074ad56f749f37a719a46fb1cc21366d5f1 | [
"MIT"
] | null | null | null | DataPreparation/DataPreparation.ipynb | ranjith283/Data-analysis-by-python | 94c89074ad56f749f37a719a46fb1cc21366d5f1 | [
"MIT"
] | 72 | 2018-04-05T23:44:45.000Z | 2022-03-31T00:25:01.000Z | 141.843931 | 66,552 | 0.877542 | [
[
[
"# Data Wrangling, Cleaning of Data, Exploration of Data to make it consistent for Analysis",
"_____no_output_____"
]
],
[
[
"%matplotlib inline",
"_____no_output_____"
],
[
"# importing required libraries\nimport os\nimport subprocess\nimport stat\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nsns.set(style=\"white\")",
"_____no_output_____"
],
[
"# getting absolute path till the raw data file\nabs_path = os.getcwd()[:-15]\nraw_data_path = abs_path + \"/RawData/autos.csv\"\n\n# reading csv into raw dataframe\ndf = pd.read_csv(raw_data_path,encoding=\"latin-1\")",
"_____no_output_____"
],
[
"# checking the column vehicleType for null values \ndf[\"vehicleType\"].isnull().values.sum()",
"_____no_output_____"
],
[
"# changing the vehicleType from NaN to Others\ndf[\"vehicleType\"].fillna(\"Other\", inplace=True)",
"_____no_output_____"
],
[
"# checking if there are any null values in the column brand\ndf[\"brand\"].isnull().value_counts()",
"_____no_output_____"
],
[
"# checking if there are any null values in the column seller\ndf[\"seller\"].isnull().value_counts()",
"_____no_output_____"
],
[
"# checking if there are any null values in the offerType \ndf[\"offerType\"].isnull().value_counts()",
"_____no_output_____"
],
[
"# checking the different types of values in the column offerType\ndf[\"offerType\"].unique()",
"_____no_output_____"
],
[
"# changing values of offerType Gesuch to Request and Angebot to Offer\ndf[\"offerType\"] = df[\"offerType\"].map({'Gesuch':\"Request\",'Angebot':'Offer'})",
"_____no_output_____"
],
[
"# checking if there are any null values in the yearOfRegistration\ndf[\"yearOfRegistration\"].isnull().value_counts()",
"_____no_output_____"
],
[
"# Distribution of vehicles based on year of registration\nfig, ax = plt.subplots(figsize=(8,6))\nax = sns.distplot(df[\"yearOfRegistration\"], color=\"g\", ax=ax)\nax.set_title('Distribution of vehicles based on Year of Registration', fontsize= 15)\nplt.ylabel(\"Density (KDE)\", fontsize= 15)\nplt.xlabel(\"Year Of Registration\", fontsize= 15)\nbbox_props = dict(boxstyle=\"larrow,pad=0.3\", fc=\"white\", ec=\"b\", lw=2)\nax.text(2500,0.015,\"Maximum No Of Vehicles\",ha=\"left\", va=\"center\", rotation=0,size=12,bbox=bbox_props)\nax.annotate('', xy=(25, 0.0009), xycoords='data',\n xytext=(1500, 0.0009), textcoords='data',\n arrowprops=dict(facecolor='red', shrink=0.05, ec=\"r\"),\n horizontalalignment='left', verticalalignment='left',\n )\nax.annotate('', xy=(10000, 0.0009), xycoords='data',\n xytext=(2050, 0.0009), textcoords='data',\n arrowprops=dict(facecolor='red', shrink=0.05, ec=\"r\"),\n horizontalalignment='right', verticalalignment='right',\n )\nax.text(5000,0.002,\"Invalid Registration Years\",ha=\"center\", va=\"center\")\nplt.show()\n",
"/Users/aj/anaconda/lib/python3.5/site-packages/statsmodels/nonparametric/kdetools.py:20: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future\n y = X[:m/2+1] + np.r_[0,X[m/2+1:],0]*1j\n"
],
[
"fig.savefig(abs_path + \"/DataPreparation/Plots/vehicle-distribution.png\")",
"_____no_output_____"
],
[
"# dropping rows that are inconsistent with the dataset based on year of registration\ndf = df[(df[\"yearOfRegistration\"] >= 1890) & (df[\"yearOfRegistration\"] <= 2016)]",
"_____no_output_____"
],
[
"# Boxplot to see the distribution after outliers has been removed\nfig, ax = plt.subplots(figsize=(8,6))\nsns.boxplot(x=\"vehicleType\", y=\"price\", data=df)\nax.text(4,1.2*(10**9),\"Many Outliers as Boxplot has been suppressed\",fontsize=18,color=\"r\",ha=\"center\", va=\"center\")\nplt.show()",
"_____no_output_____"
],
[
"fig.savefig(abs_path + \"/DataPreparation/Plots/price-vehicleType-boxplot.png\")",
"_____no_output_____"
],
[
"# Mean of the prices of all the vehicle types\n_median = df.groupby(\"vehicleType\")[\"price\"].median()\n\n# 75th percentile of the prices of all the vehicles types\n_quantile75 = df.groupby(\"vehicleType\")[\"price\"].quantile(0.75)\n\n# 25th percentile of the prices of all the vehicles types\n_quantile25 = df.groupby(\"vehicleType\")[\"price\"].quantile(0.25)\n\n# Calculating the value of the prices of each vehicle type above which all the values are outliers\niqr = (_quantile75 - _quantile25)*1.5 + _median\niqr",
"_____no_output_____"
],
[
"# Removing the outliers as per the logic above\ndf = df[((df[\"vehicleType\"] == \"andere\") & (df[\"price\"] <= 8429)) |\n ((df[\"vehicleType\"] == \"Other\") & (df[\"price\"] <= 3708)) |\n ((df[\"vehicleType\"] == \"suv\") & (df[\"price\"] <= 28800)) |\n ((df[\"vehicleType\"] == \"kombi\") & (df[\"price\"] <= 13076)) |\n ((df[\"vehicleType\"] == \"bus\") & (df[\"price\"] <= 13948)) |\n ((df[\"vehicleType\"] == \"cabrio\") & (df[\"price\"] <= 21400)) |\n ((df[\"vehicleType\"] == \"limousine\") & (df[\"price\"] <= 12801)) |\n ((df[\"vehicleType\"] == \"coupe\") & (df[\"price\"] <= 24300)) |\n ((df[\"vehicleType\"] == \"kleinwagen\") & (df[\"price\"] <= 5775))]",
"_____no_output_____"
],
[
"# checking if the gearbox column has null values\ndf[\"gearbox\"].isnull().value_counts()\n\n# setting the NaN gearbox types to Unspecified\ndf[\"gearbox\"].fillna(\"Unspecified\", inplace=True)",
"_____no_output_____"
],
[
"# checking if the fuelType has null values\ndf[\"fuelType\"].isnull().value_counts()",
"_____no_output_____"
],
[
"# setting the NaN fuelType types to other\ndf[\"fuelType\"].fillna(\"other\",inplace=True)",
"_____no_output_____"
],
[
"# checking how many unique types of fuelTypes are present\ndf[\"fuelType\"].unique()",
"_____no_output_____"
],
[
"# Changing german names to english readable format\ndf[\"fuelType\"] = df[\"fuelType\"].map({'benzin':'Gasoline','diesel':'Diesel','other':'Other','lpg':'Lpg','hybrid':'Hybrid','cng':'Cng','elektro':'Electric'})",
"_____no_output_____"
],
[
"# deleting the column noOfPictures since all of them are Zero\ndel df[\"nrOfPictures\"]",
"_____no_output_____"
],
[
"# splitting dateCreated by year\ndf[\"yearOfCreation\"] = df['dateCreated'].apply(lambda x: datetime.strptime(x,'%Y-%m-%d %H:%M:%S').date().strftime('%Y'))\n\n# splitting dateCrawled by year\ndf[\"yearCrawled\"] = df['dateCrawled'].apply(lambda x: datetime.strptime(x,'%Y-%m-%d %H:%M:%S').date().strftime('%Y'))\n\n# splitting dateCreated by month\ndf[\"monthOfCreation\"] = df['dateCreated'].apply(lambda x: datetime.strptime(x,'%Y-%m-%d %H:%M:%S').date().strftime('%B'))\n\n# splitting dateCrawled by month\ndf[\"monthCrawled\"] = df['dateCrawled'].apply(lambda x: datetime.strptime(x,'%Y-%m-%d %H:%M:%S').date().strftime('%B'))\n\n# no of days seen online\nfrom_date = df['dateCreated'].apply(lambda x: datetime.strptime(x,'%Y-%m-%d %H:%M:%S').date())\nto_date = df['lastSeen'].apply(lambda x: datetime.strptime(x,'%Y-%m-%d %H:%M:%S').date())\nno_days = abs((to_date-from_date))\nno_days = (no_days / np.timedelta64(1, 'D')).astype(int)\ndf[\"NoOfDaysOnline\"] = no_days\n\n# no of hrs seen online\nhrs_from = df['dateCreated'].apply(lambda x: datetime.strptime(x,'%Y-%m-%d %H:%M:%S').time().strftime('%H'))\nhrs_to = df['lastSeen'].apply(lambda x: datetime.strptime(x,'%Y-%m-%d %H:%M:%S').time().strftime('%H'))\ntotal_hrs = (hrs_to.astype(int) - hrs_from.astype(int))\ndf[\"NoOfHrsOnline\"] = total_hrs",
"_____no_output_____"
],
[
"# checking for null values in powerPS column\ndf[\"powerPS\"].isnull().value_counts()",
"_____no_output_____"
],
[
"# checking for unique values in notRepairedDamage\ndf[\"notRepairedDamage\"].unique()",
"_____no_output_____"
],
[
"# setting nan in notRepairedDamage to other\ndf[\"notRepairedDamage\"].fillna(\"other\",inplace=True)",
"_____no_output_____"
],
[
"# Changing german names to english readable format\ndf[\"notRepairedDamage\"] = df[\"notRepairedDamage\"].map({'other':'Other','ja':'Yes','nein':'No'})",
"_____no_output_____"
],
[
"# checking for unique values in model column\ndf[\"model\"].isnull().value_counts()",
"_____no_output_____"
],
[
"# setting nan in model column to Other\ndf[\"model\"].fillna(\"Other\",inplace=True)",
"_____no_output_____"
],
[
"# checking for null values in abtest column\ndf[\"abtest\"].isnull().value_counts()",
"_____no_output_____"
],
[
"# mappig the values on monthOfRegistration column from 1-12 \ndf[\"monthOfRegistration\"].replace([0,12],[1,11],inplace=True)",
"_____no_output_____"
],
[
"# calculating no of years the vehicle is old\ndf[\"yearsOld\"] = 2016 - df[\"yearOfRegistration\"]\n\n# calculating no of months the vehicle is old\ndf[\"monthsOld\"] = 12 - df[\"monthOfRegistration\"]\n",
"_____no_output_____"
],
[
"# mappig the values on monthOfRegistration column from 1-12 to Jan to Dec\nmonths = [\"Jan\",\"Feb\",\"Mar\",\"Apr\",\"May\",\"Jun\",\"Jul\",\"Aug\",\"Sep\",\"Oct\",\"Nov\"]\ndf[\"monthOfRegistration\"].replace([1,2,3,4,5,6,7,8,9,10,11],months,inplace=True)",
"_____no_output_____"
],
[
"# checking if postal code values are null\ndf[\"postalCode\"].isnull().value_counts()",
"_____no_output_____"
],
[
"# creating folder structure as per brand of vehicle\nfileName = abs_path + \"/ShellScripts/CreateFolder.sh\"\nfile = open(fileName, \"w+\")\nbaseString = \"mkdir -p \" + abs_path + \"/CleanData/DataForAnalysis/\"\nfor b in list(t[\"brand\"].unique()):\n newString = baseString + b + \"\\n\"\n file.write(newString)\nfile.write(\"mkdir -p \" + abs_path + \"/CleanData/CleanedDataSet\")\nfile.close()\nst = os.stat(fileName)\nos.chmod(fileName, st.st_mode | stat.S_IEXEC)\nsubprocess.call(fileName,shell=True)",
"_____no_output_____"
],
[
"# saving the data file as csv to current directory location\npath_to_file = abs_path + \"/CleanData/CleanedDataSet/cleaned_autos.csv\"\ndf.to_csv(path_to_file,index=False)",
"_____no_output_____"
],
[
"# splitting the the records based on brand and vehicleType\nfileName = abs_path + \"/ShellScripts/CreateFiles.sh\"\nfile = open(fileName, \"w+\")\nbaseString_before = '''awk 'BEGIN{FS=OFS=\",\"} FNR == 1 {print} '''\nbaseString_after = \" {print}' \" + abs_path + \"/CleanData/CleanedDataSet/cleaned_autos.csv\" + \" >> \" + abs_path + \"/CleanData/DataForAnalysis/\"\nfor b in list(df[\"brand\"].unique()):\n for typ in list(df[df[\"brand\"] == b][\"vehicleType\"].unique()):\n newString = baseString_before + '$15 == ' + '\"' + b + '\"' + ' && $7 == ' + '\"' + typ + '\"' + baseString_after + b + \"/\" + b + \"_\" + typ + \".csv\" + \"\\n\"\n file.write(newString)\nfile.close()\nst = os.stat(fileName)\nos.chmod(fileName, st.st_mode | stat.S_IEXEC)\nsubprocess.call(fileName,shell=True)",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e743729b4ab4d087ec8cfddae894d2f05e967181 | 866,034 | ipynb | Jupyter Notebook | examples/tsfresh_integration.ipynb | predict-idlab/tsflex | 73aae174ec292da7c1117f94a08b5ee14a57a17e | [
"MIT"
] | 138 | 2021-06-29T15:51:16.000Z | 2022-03-29T07:05:47.000Z | examples/tsfresh_integration.ipynb | predict-idlab/tsflex | 73aae174ec292da7c1117f94a08b5ee14a57a17e | [
"MIT"
] | 60 | 2021-06-24T16:28:50.000Z | 2022-03-28T14:52:24.000Z | examples/tsfresh_integration.ipynb | predict-idlab/tsflex | 73aae174ec292da7c1117f94a08b5ee14a57a17e | [
"MIT"
] | 11 | 2021-11-13T09:54:34.000Z | 2022-03-02T15:16:39.000Z | 86.044113 | 21,558 | 0.466546 | [
[
[
"import tsflex\nprint(tsflex.__version__)",
"0.2.3\n"
]
],
[
[
"## Get the data",
"_____no_output_____"
]
],
[
[
"from tsflex.utils.data import load_empatica_data\n\ndf_tmp, df_acc, df_gsr, df_ibi = load_empatica_data([\"tmp\", \"acc\", \"gsr\", \"ibi\"])",
"_____no_output_____"
],
[
"from pandas.tseries.frequencies import to_offset\n\ndata = [df_tmp, df_acc, df_gsr, df_ibi]\nfor df in data:\n print(\"Time-series:\", df.columns.values)\n print(df.shape)\n try:\n print(\"Sampling rate:\", 1 / pd.to_timedelta(to_offset(pd.infer_freq(df.index))).total_seconds(), \"Hz\")\n except:\n print(\"Irregular sampling rate\")\n print()",
"Time-series: ['TMP']\n(30200, 1)\nIrregular sampling rate\n\nTime-series: ['ACC_x' 'ACC_y' 'ACC_z']\n(241620, 3)\nIrregular sampling rate\n\nTime-series: ['EDA']\n(30204, 1)\nIrregular sampling rate\n\nTime-series: ['IBI']\n(1230, 1)\nIrregular sampling rate\n\n"
]
],
[
[
"## Look at the data",
"_____no_output_____"
]
],
[
[
"import plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfig = make_subplots(\n rows=len(data), cols=1, shared_xaxes=True, \n subplot_titles=[df.columns.values[0].split('_')[0] for df in data],\n vertical_spacing=0.1,\n)\n\nfor plot_idx, df in enumerate(data, 1):\n # Select first minute of data\n sub_df = df.first('1min')\n for col in df.columns:\n fig.add_trace(\n go.Scattergl(x=sub_df.index, y=sub_df[col].values, name=col, mode='markers'),\n row=plot_idx, col=1\n )\n\nfig.update_layout(height=len(data)*200)\n\nfig.show(renderer='iframe')",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\n\nfig, axes = plt.subplots(nrows=1, ncols=4, figsize=(16,4))\n\nfor plot_idx, df in enumerate(data):\n df.plot(kind='box', ax=axes[plot_idx])\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"These visualizations indicate that some preprocessing might be necessary for the signals (some sort of clipping)",
"_____no_output_____"
],
[
"# tsflex processing",
"_____no_output_____"
],
[
"This is roughly identical to the processing of notebook containing the example code of the paper.",
"_____no_output_____"
]
],
[
[
"import pandas as pd; import numpy as np; from scipy.signal import savgol_filter\nfrom tsflex.processing import SeriesProcessor, SeriesPipeline\n\n# Create the processing functions\ndef clip_data(sig: pd.Series, min_val=None, max_val=None) -> np.ndarray:\n return np.clip(sig, a_min=min_val, a_max=max_val)\n\ndef smv(*sigs) -> pd.Series:\n sig_prefixes = set(sig.name.split('_')[0] for sig in sigs)\n result = np.sqrt(np.sum([np.square(sig) for sig in sigs], axis=0))\n return pd.Series(result, index=sigs[0].index, name='|'.join(sig_prefixes)+'_'+'SMV')\n\n# Create the series processors (with their keyword arguments)\ntmp_clippper = SeriesProcessor(clip_data, series_names=\"TMP\", max_val=35)\nacc_savgol = SeriesProcessor(\n savgol_filter, [\"ACC_x\", \"ACC_y\", \"ACC_z\"], window_length=33, polyorder=2\n)\nacc_smv = SeriesProcessor(smv, (\"ACC_x\", \"ACC_y\", \"ACC_z\"))\n\n# Create the series pipeline & process the data\nseries_pipe = SeriesPipeline([tmp_clippper, acc_savgol, acc_smv])\nseries_pipe",
"_____no_output_____"
],
[
"out_data = series_pipe.process(data, drop_keys=[\"ACC_x\", \"ACC_y\", \"ACC_z\"])",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\n\nfig, axes = plt.subplots(nrows=1, ncols=4, figsize=(16,4))\n\nfor plot_idx, df in enumerate(out_data):\n df.plot(kind='box', ax=axes[plot_idx])\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"# tsflex feature extraction with [tsfresh](https://github.com/blue-yonder/tsfresh) integration",
"_____no_output_____"
]
],
[
[
"# !pip install tsfresh",
"_____no_output_____"
]
],
[
[
"> Useful links; \n> [List of all tsfresh features](https://tsfresh.readthedocs.io/en/latest/text/list_of_features.html) \n> [More detailed documentation of the tsfresh features](https://tsfresh.readthedocs.io/en/latest/api/tsfresh.feature_extraction.html#module-tsfresh.feature_extraction.feature_calculators) \n> [More detailed documentation of the tsfresh feature extraction settings](https://tsfresh.readthedocs.io/en/latest/text/feature_extraction_settings.html)\n\n[tsfresh feature extraction settings](https://github.com/blue-yonder/tsfresh/blob/main/tsfresh/feature_extraction/settings.py) is how tsfresh represents a collection of features (with their parameters). \n**=> requires wrapping this settings object in a `tsfresh_settings_wrapper` for interoperability with tsflex**.\n\n[tsfresh feature-funtions](https://github.com/blue-yonder/tsfresh/blob/main/tsfresh/feature_extraction/feature_calculators.py) are either of type `simple` or `combiner`.\n* `simple`: feature calculators which calculate a single number \n **=> integrates natively with tsflex**\n* `combiner`: feature calculates which calculate a bunch of features for a list of parameters. These features are returned as a list of (key, value) pairs for each input parameter. \n **=> requires wrapping the function to only extract the values of the returned tuples** \n \nOf course, feature functions that require other keyword arguments, should be wrapped in a `FuncWrapper`",
"_____no_output_____"
]
],
[
[
"# This wrapper handles tsfresh its feature extraction settings\nfrom tsflex.features.integrations import tsfresh_settings_wrapper\n# This wrappers handles tsfresh its combiner functions\nfrom tsflex.features.integrations import tsfresh_combiner_wrapper",
"_____no_output_____"
],
[
"from tsflex.features import FeatureCollection, MultipleFeatureDescriptors",
"_____no_output_____"
]
],
[
[
"## Using tsfresh feature extraction settings",
"_____no_output_____"
]
],
[
[
"# Import some preset feature extraction setting from tsfresh\nfrom tsfresh.feature_extraction import MinimalFCParameters, EfficientFCParameters",
"_____no_output_____"
]
],
[
[
"Calculate the features for a tsfresh feature extraction setting. \nNote that;\n* `tsfresh_settings_wrapper` transforms this feature extraction settings object to a list of features that you can directly pass as the `function` argument of tsflex `MultipleFeatureDescriptors`.",
"_____no_output_____"
]
],
[
[
"simple_feats = MultipleFeatureDescriptors(\n functions=tsfresh_settings_wrapper(MinimalFCParameters()),\n series_names=[\"ACC_SMV\", \"EDA\", \"TMP\"],\n windows=[\"5min\", \"2.5min\"],\n strides=[\"2.5min\"],\n)\nfeature_collection = FeatureCollection(simple_feats)\nfeature_collection",
"_____no_output_____"
],
[
"features_df = feature_collection.calculate(out_data, return_df=True, show_progress=True)\nfeatures_df",
"_____no_output_____"
]
],
[
[
"Extract a lot more tsfresh features (& customize the settings, i.e., remove the slower functions)",
"_____no_output_____"
]
],
[
[
"slow_funcs = [\n\"matrix_profile\",\n\"number_cwt_peaks\",\n\"augmented_dickey_fuller\",\n\"partial_autocorrelation\",\n\"agg_linear_trend\",\n\"lempel_ziv_complexity\",\n\"benford_correlation\",\n\"ar_coefficient\",\n\"permutation_entropy\",\n\"friedrich_coefficients\",\n]\n\nsettings = EfficientFCParameters()\nfor f in slow_funcs:\n del settings[f]",
"_____no_output_____"
],
[
"efficient_feats = MultipleFeatureDescriptors(\n functions=tsfresh_settings_wrapper(settings),\n series_names=[\"ACC_SMV\", \"EDA\", \"TMP\"],\n windows=[\"5min\", \"2.5min\"],\n strides=[\"2.5min\"],\n)\nfeature_collection = FeatureCollection(efficient_feats)\nfeature_collection",
"_____no_output_____"
],
[
"features_df = feature_collection.calculate(out_data, return_df=True, show_progress=True)\nfeatures_df",
"_____no_output_____"
]
],
[
[
"### Plot the EDA features",
"_____no_output_____"
]
],
[
[
"import plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfig = make_subplots(\n rows=2, cols=1, shared_xaxes=True, \n subplot_titles=['Raw EDA data', 'EDA features'],\n vertical_spacing=0.1\n)\n\nfig.add_trace(\n go.Scattergl(x=df_gsr.index[::4*5], y=df_gsr['EDA'].values[::4*5], name='EDA', mode='markers'),\n row=1, col=1\n)\n\nibi_feats = [c for c in features_df.columns if 'EDA_' in c and 'w=2m30s_' in c]\n\nfor col in ibi_feats:\n sub_df = features_df[[col]].dropna()\n if not np.issubdtype(sub_df.values.dtype, np.number):\n continue\n fig.add_trace(\n go.Scattergl(x=sub_df.index, y=sub_df[col].values, name=col, mode='markers'),\n row=2, col=1\n )\n\nfig.update_layout(height=2*350)\n\nfig.show(renderer='iframe')",
"_____no_output_____"
]
],
[
[
"## Using simple tsfresh features",
"_____no_output_____"
],
[
"Integrates natively :)",
"_____no_output_____"
]
],
[
[
"# Import some simple funtions\nfrom tsfresh.feature_extraction.feature_calculators import (\n abs_energy,\n absolute_sum_of_changes,\n cid_ce,\n variance_larger_than_standard_deviation,\n)\n\nfrom tsflex.features import FeatureCollection, FuncWrapper, MultipleFeatureDescriptors\n\nsimple_feats = MultipleFeatureDescriptors(\n functions=[\n abs_energy,\n absolute_sum_of_changes,\n variance_larger_than_standard_deviation,\n FuncWrapper(cid_ce, normalize=True),\n ],\n series_names=[\"ACC_SMV\", \"EDA\", \"TMP\"],\n windows=[\"5min\", \"2.5min\"],\n strides=\"2min\",\n)\nfeature_collection = FeatureCollection(simple_feats)\nfeature_collection",
"_____no_output_____"
],
[
"features_df = feature_collection.calculate(out_data, return_df=True)\nfeatures_df",
"_____no_output_____"
]
],
[
[
"### Plot the EDA features",
"_____no_output_____"
]
],
[
[
"import plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfig = make_subplots(\n rows=2, cols=1, shared_xaxes=True, \n subplot_titles=['Raw EDA data', 'EDA features'],\n vertical_spacing=0.1,\n)\n\nfig.add_trace(\n go.Scattergl(x=df_gsr.index[::4*5], y=df_gsr['EDA'].values[::4*5], name='EDA', mode='markers'),\n row=1, col=1\n)\n\nibi_feats = [c for c in features_df.columns if 'EDA_' in c and 'w=2m30s_' in c]\n\nfor col in ibi_feats:\n sub_df = features_df[[col]].dropna()\n fig.add_trace(\n go.Scattergl(x=sub_df.index, y=sub_df[col].values, name=col, mode='markers'),\n row=2, col=1\n )\n\nfig.update_layout(height=2*350)\n\nfig.show(renderer='iframe')",
"_____no_output_____"
]
],
[
[
"## Using combiner tsfresh features",
"_____no_output_____"
]
],
[
[
"# Import all combiner funcs\nfrom tsfresh.feature_extraction.feature_calculators import (\n agg_autocorrelation,\n augmented_dickey_fuller,\n cwt_coefficients,\n fft_aggregated,\n fft_coefficient,\n index_mass_quantile,\n linear_trend,\n partial_autocorrelation,\n spkt_welch_density,\n symmetry_looking,\n ar_coefficient,\n friedrich_coefficients,\n agg_linear_trend,\n energy_ratio_by_chunks,\n linear_trend_timewise,\n matrix_profile,\n query_similarity_count,\n)",
"_____no_output_____"
]
],
[
[
"Calculate the features for some of tsfresh its combiner functions. \nNote that;\n* `param` is now passed to `tsfresh_combiner_wrapper` instead of the combiner function itself\n* combiner functions that require a `pd.Series` (with a `pd.DatetimeIndex`) are also handled by this wrapper",
"_____no_output_____"
]
],
[
[
"from tsflex.features import FeatureCollection, MultipleFeatureDescriptors\n\ncombiner_feats = MultipleFeatureDescriptors(\n functions=[\n tsfresh_combiner_wrapper(index_mass_quantile, param=[{\"q\": v} for v in [0.15, 0.5, 0.75]]),\n tsfresh_combiner_wrapper(linear_trend, param=[{\"attr\": v} for v in [\"intercept\", \"slope\", \"stderr\"]]),\n tsfresh_combiner_wrapper(spkt_welch_density, param=[{\"coeff\": v} for v in range(5)]),\n # This function requires a pd.Series with a pd.DatetimeIndex\n tsfresh_combiner_wrapper(linear_trend_timewise, param=[{\"attr\": v} for v in [\"intercept\", \"slope\"]]),\n ],\n series_names=[\"ACC_SMV\", \"EDA\", \"TMP\"],\n windows=[\"5min\", \"2.5min\"],\n strides=[\"2.5min\"],\n)\nfeature_collection = FeatureCollection(combiner_feats)\nfeature_collection",
"_____no_output_____"
],
[
"features_df = feature_collection.calculate(out_data, return_df=True)\nfeatures_df",
"_____no_output_____"
]
],
[
[
"### Plot the EDA features",
"_____no_output_____"
]
],
[
[
"import plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfig = make_subplots(\n rows=2, cols=1, shared_xaxes=True, \n subplot_titles=['Raw EDA data', 'EDA features'],\n vertical_spacing=0.1,\n)\n\nfig.add_trace(\n go.Scattergl(x=df_gsr.index[::4*5], y=df_gsr['EDA'].values[::4*5], name='EDA', mode='markers'),\n row=1, col=1,\n)\n\nibi_feats = [c for c in features_df.columns if 'EDA_' in c and 'w=2m30s_' in c]\n\nfor col in ibi_feats:\n sub_df = features_df[[col]].dropna()\n fig.add_trace(\n go.Scattergl(x=sub_df.index, y=sub_df[col].values, name=col, mode='markers'),\n row=2, col=1\n )\n\nfig.update_layout(height=2*350)\n\nfig.show(renderer='iframe')",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74373e71b08a5ca4ac94eb42b758defb8e62c5f | 2,124 | ipynb | Jupyter Notebook | basics/P05_userInput.ipynb | shahin-cuet/Python_Practice | 1ab93c7cc704f38b498ebce1d2d53c21de65b7d7 | [
"MIT"
] | 1 | 2021-08-19T17:59:29.000Z | 2021-08-19T17:59:29.000Z | basics/P05_userInput.ipynb | shahin-cuet/Python_Practice | 1ab93c7cc704f38b498ebce1d2d53c21de65b7d7 | [
"MIT"
] | null | null | null | basics/P05_userInput.ipynb | shahin-cuet/Python_Practice | 1ab93c7cc704f38b498ebce1d2d53c21de65b7d7 | [
"MIT"
] | null | null | null | 20.037736 | 73 | 0.467043 | [
[
[
"# Python program to illustrate \n# getting input from user \nname = input(\"Enter ur name: \")\nprint(\"Hello \" + name)",
"Enter ur name: 333\nHello 333\n"
],
[
"n1 = float(input(\"n1 = \"))\nn2 = float(input(\"n2 = \"))\nn = n1*n2\nprint(\"Mul = \", n)",
"n1 = 33.4\nn2 = 33\nMul = 1102.2\n"
],
[
"print(\"Enter your age:\", end = ' ')\nage = input()\nprint(\"Enter your weight:\", end = ' ')\nweight = input()\nprint(\"Enter your hight:\", end = ' ')\nhight = input()\nprint(f\"Age is {age} , weight is {weight} and hight is {hight}.\")",
"Enter your age: "
],
[
"age = input(\"Enter your age: \")\nweight = input(\"Enter your weight: \")\nhight = input(\"Enter your hight: \")\nprint(f\"Age is {age} , weight is {weight} and hight is {hight}.\")",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e74375309e200ca071d04ee80c99701510d1131e | 44,540 | ipynb | Jupyter Notebook | notebooks/pipeline.ipynb | Bhaskers-Blu-Org1/differential-privacy-library | b7de5d0238191d70cc8ad844cc9eb33c705955d0 | [
"MIT"
] | null | null | null | notebooks/pipeline.ipynb | Bhaskers-Blu-Org1/differential-privacy-library | b7de5d0238191d70cc8ad844cc9eb33c705955d0 | [
"MIT"
] | null | null | null | notebooks/pipeline.ipynb | Bhaskers-Blu-Org1/differential-privacy-library | b7de5d0238191d70cc8ad844cc9eb33c705955d0 | [
"MIT"
] | 1 | 2020-07-30T10:02:32.000Z | 2020-07-30T10:02:32.000Z | 149.463087 | 36,924 | 0.896722 | [
[
[
"# Training differentially private pipelines",
"_____no_output_____"
],
[
"We start by importing the required libraries and modules and collecting the data that we need from the [Adult dataset](https://archive.ics.uci.edu/ml/datasets/adult).",
"_____no_output_____"
]
],
[
[
"import warnings\n\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\n\nfrom diffprivlib import models",
"_____no_output_____"
],
[
"X_train = np.loadtxt(\"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data\",\n usecols=(0, 4, 10, 11, 12), delimiter=\", \")\ny_train = np.loadtxt(\"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data\",\n usecols=14, dtype=str, delimiter=\", \")",
"_____no_output_____"
],
[
"np.unique(y_train)",
"_____no_output_____"
],
[
"X_test = np.loadtxt(\"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test\",\n usecols=(0, 4, 10, 11, 12), delimiter=\", \", skiprows=1)\n\ny_test = np.loadtxt(\"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test\",\n usecols=14, dtype=str, delimiter=\", \", skiprows=1)\n# Must trim trailing period \".\" from label\ny_test = np.array([a[:-1] for a in y_test])",
"_____no_output_____"
],
[
"np.unique(y_test)",
"_____no_output_____"
]
],
[
[
"## Pipeline with no privacy",
"_____no_output_____"
],
[
"To begin, let's train and test a scikit-learn pipeline without any privacy guarantees. We first use `StandardScaler` to normalise the data to zero mean and unit variance, then use `PCA` to reduce the dimensionality of the system, and then use `LogisticRegression` as a classifier.",
"_____no_output_____"
]
],
[
[
"pipe = Pipeline([\n ('scaler', StandardScaler()),\n ('pca', PCA(2)),\n ('lr', LogisticRegression(solver=\"lbfgs\"))\n])",
"_____no_output_____"
]
],
[
[
"We now train the model, and save the test accuracy as a baseline.",
"_____no_output_____"
]
],
[
[
"pipe.fit(X_train, y_train)\nbaseline = pipe.score(X_test, y_test)\nprint(\"Non-private test accuracy: %.2f%%\" % (baseline * 100))",
"Non-private test accuracy: 80.30%\n"
]
],
[
[
"## Differentially private pipeline",
"_____no_output_____"
],
[
"Using `diffprivlib`, we can now train a differentially private pipeline. We use the same components as in our pipeline above, but with each component satisfying differential privacy. We decide on the `bounds` and `data_norm` parameters by trial and error for this example. In practice, these hyperparameters should be chosen using non-sensitive data, i.e. from metadata provided by the data owner.",
"_____no_output_____"
]
],
[
[
"epsilons = np.logspace(-3, 0, 500)\n\ndp_pipe = Pipeline([\n ('scaler', models.StandardScaler(bounds=([17, 1, 0, 0, 1], [90, 160, 10000, 4356, 99]))),\n ('pca', models.PCA(2, data_norm=5, centered=True)),\n ('lr', models.LogisticRegression(data_norm=5))\n])",
"_____no_output_____"
]
],
[
[
"Let's now train the pipeline across a range of epsilons.",
"_____no_output_____"
]
],
[
[
"pipe_accuracy = []\n\nfor epsilon in epsilons:\n _eps = epsilon / 3\n dp_pipe.set_params(scaler__epsilon=_eps, pca__epsilon=_eps, lr__epsilon=_eps)\n \n dp_pipe.fit(X_train, y_train)\n pipe_accuracy.append(dp_pipe.score(X_test, y_test))",
"_____no_output_____"
]
],
[
[
"Let's save the results so they can be used later.",
"_____no_output_____"
]
],
[
[
"import pickle\n\npickle.dump((epsilons, baseline, pipe_accuracy), open(\"pipeline_accuracy_500.p\", \"wb\" ) )",
"_____no_output_____"
]
],
[
[
"## Results",
"_____no_output_____"
],
[
"We can now plot the results, showing that non-private accuracy is matched from approximately `epsilon = 0.1`.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport pickle\n\nepsilons, baseline, pipe_accuracy = pickle.load(open(\"pipeline_accuracy_500.p\", \"rb\"))\n\nplt.semilogx(epsilons, pipe_accuracy, label=\"Differentially private pipeline\", zorder=10)\nplt.plot(epsilons, np.ones_like(epsilons) * baseline, dashes=[2,2], label=\"Non-private pipeline\", zorder=5)\nplt.title(\"Differentially private pipeline accuracy\")\nplt.xlabel(\"epsilon\")\nplt.ylabel(\"Accuracy\")\nplt.ylim(0, 1)\nplt.xlim(epsilons[0], epsilons[-1])\nplt.legend(loc=4)\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
e743784ca144688c80f828b8e71852d88b0d3c43 | 3,189 | ipynb | Jupyter Notebook | Probability-Theory/Chapter_01.ipynb | gaufung/Data_Analytics_Learning_Note | 10426cbdc34c7d79228c9f45477f743b57f2cd2b | [
"MIT"
] | 1 | 2016-12-30T11:52:02.000Z | 2016-12-30T11:52:02.000Z | Probability-Theory/Chapter_01.ipynb | gaufung/Data_Analytics_Learning_Note | 10426cbdc34c7d79228c9f45477f743b57f2cd2b | [
"MIT"
] | null | null | null | Probability-Theory/Chapter_01.ipynb | gaufung/Data_Analytics_Learning_Note | 10426cbdc34c7d79228c9f45477f743b57f2cd2b | [
"MIT"
] | 3 | 2018-06-14T08:47:22.000Z | 2019-07-24T05:58:30.000Z | 26.139344 | 275 | 0.560991 | [
[
[
"Chapter 01",
"_____no_output_____"
],
[
"# 1 Condition Probability\nThe probability of **A** given **B** \n$$P(A \\rvert B) = \\frac{P(AB)}{P(B)}$$ \n$\\Rightarrow$ \n\n**Product Rule**\n$$\\begin{cases}\nP(AB) = P(A)P(B \\vert A) \\\\\nP(AB) = P(B)P(A \\vert B)\n\\end{cases}$$",
"_____no_output_____"
],
[
"# 2 Law of Total Probability\nIf $\\{ B_n: n = 1, 2, 3, \\ldots \\}$ is a finite or contably infinite partition of a sample space and each event $B_n$ is measureable. then for any event $A$ of the same probabilty space:\n$$P(A) = \\sum_{n} P(A \\vert B_n)P(B_n)$$",
"_____no_output_____"
],
[
"# 3 Bayes' Theorem",
"_____no_output_____"
],
[
"Describe the probabilty of an envent, base on the prior knowledge of condition that might be related to the event.",
"_____no_output_____"
],
[
"$$P(B_i|A)=\\frac{P(B_iA)}{P(A)}=\\frac{P(A|B_i)P(B_i)}{P(A)}=\\frac{P(A|B_i)P(B_i)}{ \\sum_{n} P(A \\vert B_n)P(B_n)}$$",
"_____no_output_____"
],
[
"# 4 Prior Probability Vs Posterior Probability",
"_____no_output_____"
],
[
"## 4.1 Prior Probability\nExpress one's beliefs about this quantity before some evidence is taken into account. The unknown quantity may be a paramter of the model of the latent variable ranther than the an observation variable. And the paramter of prior distrubtions are kind of hyperparamters.",
"_____no_output_____"
],
[
"## 4.2 Posterior Probability\nA random event or an uncertain proposition is the conditional probability that is assigned after the relevant evidence or background is taken into account. \nThe posterior probability is the probability of the parameter $\\theta$ given the evdence $X$: $p(\\theta \\rvert X)$. According to Bayes Theorem, the posterior probability is defined as \n$$p(\\theta | x)=\\frac{p(x|\\theta)p(\\theta)}{x})$$",
"_____no_output_____"
],
[
"$\\text{Posterior probability} \\propto \\text{Likelihood} \\times \\text{Proior probability}$",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e74381ad72f882b3954942a01c0766c69bb2f044 | 6,554 | ipynb | Jupyter Notebook | notebooks/colab-github-demo.ipynb | wilaiphorn/PatientExploreR | a83a5a715902e13e39002035b6b04d886e9b7118 | [
"MIT"
] | null | null | null | notebooks/colab-github-demo.ipynb | wilaiphorn/PatientExploreR | a83a5a715902e13e39002035b6b04d886e9b7118 | [
"MIT"
] | null | null | null | notebooks/colab-github-demo.ipynb | wilaiphorn/PatientExploreR | a83a5a715902e13e39002035b6b04d886e9b7118 | [
"MIT"
] | null | null | null | 43.118421 | 377 | 0.610009 | [
[
[
"<a href=\"https://colab.research.google.com/github/wilaiphorn/PatientExploreR/blob/master/notebooks/colab-github-demo.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Using Google Colab with GitHub\n\n",
"_____no_output_____"
],
[
"\n[Google Colaboratory](http://colab.research.google.com) is designed to integrate cleanly with GitHub, allowing both loading notebooks from github and saving notebooks to github.",
"_____no_output_____"
],
[
"## Loading Public Notebooks Directly from GitHub\n\nColab can load public github notebooks directly, with no required authorization step.\n\nFor example, consider the notebook at this address: https://github.com/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb.\n\nThe direct colab link to this notebook is: https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb.\n\nTo generate such links in one click, you can use the [Open in Colab](https://chrome.google.com/webstore/detail/open-in-colab/iogfkhleblhcpcekbiedikdehleodpjo) Chrome extension.",
"_____no_output_____"
],
[
"## Browsing GitHub Repositories from Colab\n\nColab also supports special URLs that link directly to a GitHub browser for any user/organization, repository, or branch. For example:\n\n- http://colab.research.google.com/github will give you a general github browser, where you can search for any github organization or username.\n- http://colab.research.google.com/github/googlecolab/ will open the repository browser for the ``googlecolab`` organization. Replace ``googlecolab`` with any other github org or user to see their repositories.\n- http://colab.research.google.com/github/googlecolab/colabtools/ will let you browse the main branch of the ``colabtools`` repository within the ``googlecolab`` organization. Substitute any user/org and repository to see its contents.\n- http://colab.research.google.com/github/googlecolab/colabtools/blob/master will let you browse ``master`` branch of the ``colabtools`` repository within the ``googlecolab`` organization. (don't forget the ``blob`` here!) You can specify any valid branch for any valid repository.",
"_____no_output_____"
],
[
"## Loading Private Notebooks\n\nLoading a notebook from a private GitHub repository is possible, but requires an additional step to allow Colab to access your files.\nDo the following:\n\n1. Navigate to http://colab.research.google.com/github.\n2. Click the \"Include Private Repos\" checkbox.\n3. In the popup window, sign-in to your Github account and authorize Colab to read the private files.\n4. Your private repositories and notebooks will now be available via the github navigation pane.",
"_____no_output_____"
],
[
"## Saving Notebooks To GitHub or Drive\n\nAny time you open a GitHub hosted notebook in Colab, it opens a new editable view of the notebook. You can run and modify the notebook without worrying about overwriting the source.\n\nIf you would like to save your changes from within Colab, you can use the File menu to save the modified notebook either to Google Drive or back to GitHub. Choose **File→Save a copy in Drive** or **File→Save a copy to GitHub** and follow the resulting prompts. To save a Colab notebook to GitHub requires giving Colab permission to push the commit to your repository.",
"_____no_output_____"
],
[
"## Open In Colab Badge\n\nAnybody can open a copy of any github-hosted notebook within Colab. To make it easier to give people access to live views of GitHub-hosted notebooks,\ncolab provides a [shields.io](http://shields.io/)-style badge, which appears as follows:\n\n[](https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb)\n\nThe markdown for the above badge is the following:\n\n```markdown\n[](https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb)\n```\n\nThe HTML equivalent is:\n\n```HTML\n<a href=\"https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb\">\n <img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/>\n</a>\n```\n\nRemember to replace the notebook URL in this template with the notebook you want to link to.",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
e74389a77294b14b0e46569e0d1cc6a5b7b6c81b | 335,794 | ipynb | Jupyter Notebook | convert_from_pytorch_2.6_with_kv_onnx.ipynb | deepdialog/PanGu-alpha-tf | 5c413223592df609511ac44233e992ec1e7e00e7 | [
"Apache-2.0"
] | 3 | 2021-09-13T01:40:52.000Z | 2021-12-21T16:55:46.000Z | convert_from_pytorch_2.6_with_kv_onnx.ipynb | deepdialog/PanGu-alpha-tf | 5c413223592df609511ac44233e992ec1e7e00e7 | [
"Apache-2.0"
] | null | null | null | convert_from_pytorch_2.6_with_kv_onnx.ipynb | deepdialog/PanGu-alpha-tf | 5c413223592df609511ac44233e992ec1e7e00e7 | [
"Apache-2.0"
] | null | null | null | 97.756623 | 489 | 0.739721 | [
[
[
"import re\n\nimport torch\nimport numpy as np\nimport tensorflow as tf\n\nfrom tf2gpt.model import GPT",
"_____no_output_____"
],
[
"!du -sh ../Pangu-alpha_2.6B_mgt/iter_0001000/mp_rank_00/model_optim_rng.pt",
"4.9G\t../Pangu-alpha_2.6B_mgt/iter_0001000/mp_rank_00/model_optim_rng.pt\r\n"
],
[
"m0 = torch.load('../Pangu-alpha_2.6B_mgt/iter_0001000/mp_rank_00/model_optim_rng.pt', map_location='cpu')",
"_____no_output_____"
],
[
"m0_weights = []\n\ndef extract_weight(w = m0['model'], root=''):\n for k, v in w.items():\n if isinstance(v, dict):\n extract_weight(v, root + '.' + k)\n elif isinstance(v, torch.Tensor):\n k = root + '.' + k\n k = k.replace('.language_model.', '')\n k = k.replace('.topQueryLayer.', '.layers.31.')\n m0_weights.append((\n k,\n v\n ))\n else:\n print('what?', type(v))",
"_____no_output_____"
],
[
"extract_weight()",
"_____no_output_____"
],
[
"len(m0_weights)",
"_____no_output_____"
],
[
"pangu_weights = {}\nfor k, v in m0_weights:\n print(k, v.shape)\n pangu_weights[k] = v",
"embedding.word_embeddings.weight torch.Size([40064, 2560])\nembedding.position_embeddings.weight torch.Size([1024, 2560])\ntopQueryEmbedding.top_query_embeddings.weight torch.Size([1024, 2560])\ntransformer.layers.0.input_layernorm.weight torch.Size([2560])\ntransformer.layers.0.input_layernorm.bias torch.Size([2560])\ntransformer.layers.0.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.0.attention.query.bias torch.Size([2560])\ntransformer.layers.0.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.0.attention.key.bias torch.Size([2560])\ntransformer.layers.0.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.0.attention.value.bias torch.Size([2560])\ntransformer.layers.0.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.0.attention.dense.bias torch.Size([2560])\ntransformer.layers.0.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.0.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.0.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.0.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.0.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.0.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.1.input_layernorm.weight torch.Size([2560])\ntransformer.layers.1.input_layernorm.bias torch.Size([2560])\ntransformer.layers.1.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.1.attention.query.bias torch.Size([2560])\ntransformer.layers.1.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.1.attention.key.bias torch.Size([2560])\ntransformer.layers.1.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.1.attention.value.bias torch.Size([2560])\ntransformer.layers.1.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.1.attention.dense.bias torch.Size([2560])\ntransformer.layers.1.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.1.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.1.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.1.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.1.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.1.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.2.input_layernorm.weight torch.Size([2560])\ntransformer.layers.2.input_layernorm.bias torch.Size([2560])\ntransformer.layers.2.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.2.attention.query.bias torch.Size([2560])\ntransformer.layers.2.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.2.attention.key.bias torch.Size([2560])\ntransformer.layers.2.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.2.attention.value.bias torch.Size([2560])\ntransformer.layers.2.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.2.attention.dense.bias torch.Size([2560])\ntransformer.layers.2.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.2.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.2.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.2.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.2.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.2.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.3.input_layernorm.weight torch.Size([2560])\ntransformer.layers.3.input_layernorm.bias torch.Size([2560])\ntransformer.layers.3.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.3.attention.query.bias torch.Size([2560])\ntransformer.layers.3.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.3.attention.key.bias torch.Size([2560])\ntransformer.layers.3.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.3.attention.value.bias torch.Size([2560])\ntransformer.layers.3.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.3.attention.dense.bias torch.Size([2560])\ntransformer.layers.3.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.3.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.3.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.3.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.3.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.3.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.4.input_layernorm.weight torch.Size([2560])\ntransformer.layers.4.input_layernorm.bias torch.Size([2560])\ntransformer.layers.4.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.4.attention.query.bias torch.Size([2560])\ntransformer.layers.4.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.4.attention.key.bias torch.Size([2560])\ntransformer.layers.4.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.4.attention.value.bias torch.Size([2560])\ntransformer.layers.4.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.4.attention.dense.bias torch.Size([2560])\ntransformer.layers.4.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.4.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.4.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.4.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.4.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.4.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.5.input_layernorm.weight torch.Size([2560])\ntransformer.layers.5.input_layernorm.bias torch.Size([2560])\ntransformer.layers.5.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.5.attention.query.bias torch.Size([2560])\ntransformer.layers.5.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.5.attention.key.bias torch.Size([2560])\ntransformer.layers.5.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.5.attention.value.bias torch.Size([2560])\ntransformer.layers.5.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.5.attention.dense.bias torch.Size([2560])\ntransformer.layers.5.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.5.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.5.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.5.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.5.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.5.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.6.input_layernorm.weight torch.Size([2560])\ntransformer.layers.6.input_layernorm.bias torch.Size([2560])\ntransformer.layers.6.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.6.attention.query.bias torch.Size([2560])\ntransformer.layers.6.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.6.attention.key.bias torch.Size([2560])\ntransformer.layers.6.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.6.attention.value.bias torch.Size([2560])\ntransformer.layers.6.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.6.attention.dense.bias torch.Size([2560])\ntransformer.layers.6.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.6.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.6.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.6.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.6.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.6.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.7.input_layernorm.weight torch.Size([2560])\ntransformer.layers.7.input_layernorm.bias torch.Size([2560])\ntransformer.layers.7.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.7.attention.query.bias torch.Size([2560])\ntransformer.layers.7.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.7.attention.key.bias torch.Size([2560])\ntransformer.layers.7.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.7.attention.value.bias torch.Size([2560])\ntransformer.layers.7.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.7.attention.dense.bias torch.Size([2560])\ntransformer.layers.7.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.7.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.7.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.7.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.7.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.7.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.8.input_layernorm.weight torch.Size([2560])\ntransformer.layers.8.input_layernorm.bias torch.Size([2560])\ntransformer.layers.8.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.8.attention.query.bias torch.Size([2560])\ntransformer.layers.8.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.8.attention.key.bias torch.Size([2560])\ntransformer.layers.8.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.8.attention.value.bias torch.Size([2560])\ntransformer.layers.8.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.8.attention.dense.bias torch.Size([2560])\ntransformer.layers.8.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.8.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.8.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.8.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.8.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.8.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.9.input_layernorm.weight torch.Size([2560])\ntransformer.layers.9.input_layernorm.bias torch.Size([2560])\ntransformer.layers.9.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.9.attention.query.bias torch.Size([2560])\ntransformer.layers.9.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.9.attention.key.bias torch.Size([2560])\ntransformer.layers.9.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.9.attention.value.bias torch.Size([2560])\ntransformer.layers.9.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.9.attention.dense.bias torch.Size([2560])\ntransformer.layers.9.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.9.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.9.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.9.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.9.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.9.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.10.input_layernorm.weight torch.Size([2560])\ntransformer.layers.10.input_layernorm.bias torch.Size([2560])\ntransformer.layers.10.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.10.attention.query.bias torch.Size([2560])\ntransformer.layers.10.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.10.attention.key.bias torch.Size([2560])\ntransformer.layers.10.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.10.attention.value.bias torch.Size([2560])\ntransformer.layers.10.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.10.attention.dense.bias torch.Size([2560])\ntransformer.layers.10.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.10.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.10.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.10.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.10.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.10.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.11.input_layernorm.weight torch.Size([2560])\ntransformer.layers.11.input_layernorm.bias torch.Size([2560])\ntransformer.layers.11.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.11.attention.query.bias torch.Size([2560])\ntransformer.layers.11.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.11.attention.key.bias torch.Size([2560])\ntransformer.layers.11.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.11.attention.value.bias torch.Size([2560])\ntransformer.layers.11.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.11.attention.dense.bias torch.Size([2560])\ntransformer.layers.11.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.11.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.11.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.11.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.11.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.11.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.12.input_layernorm.weight torch.Size([2560])\ntransformer.layers.12.input_layernorm.bias torch.Size([2560])\ntransformer.layers.12.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.12.attention.query.bias torch.Size([2560])\ntransformer.layers.12.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.12.attention.key.bias torch.Size([2560])\ntransformer.layers.12.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.12.attention.value.bias torch.Size([2560])\ntransformer.layers.12.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.12.attention.dense.bias torch.Size([2560])\ntransformer.layers.12.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.12.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.12.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.12.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.12.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.12.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.13.input_layernorm.weight torch.Size([2560])\ntransformer.layers.13.input_layernorm.bias torch.Size([2560])\ntransformer.layers.13.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.13.attention.query.bias torch.Size([2560])\ntransformer.layers.13.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.13.attention.key.bias torch.Size([2560])\ntransformer.layers.13.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.13.attention.value.bias torch.Size([2560])\ntransformer.layers.13.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.13.attention.dense.bias torch.Size([2560])\ntransformer.layers.13.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.13.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.13.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.13.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.13.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.13.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.14.input_layernorm.weight torch.Size([2560])\ntransformer.layers.14.input_layernorm.bias torch.Size([2560])\ntransformer.layers.14.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.14.attention.query.bias torch.Size([2560])\ntransformer.layers.14.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.14.attention.key.bias torch.Size([2560])\ntransformer.layers.14.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.14.attention.value.bias torch.Size([2560])\ntransformer.layers.14.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.14.attention.dense.bias torch.Size([2560])\ntransformer.layers.14.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.14.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.14.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.14.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.14.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.14.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.15.input_layernorm.weight torch.Size([2560])\ntransformer.layers.15.input_layernorm.bias torch.Size([2560])\ntransformer.layers.15.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.15.attention.query.bias torch.Size([2560])\ntransformer.layers.15.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.15.attention.key.bias torch.Size([2560])\ntransformer.layers.15.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.15.attention.value.bias torch.Size([2560])\ntransformer.layers.15.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.15.attention.dense.bias torch.Size([2560])\ntransformer.layers.15.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.15.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.15.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.15.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.15.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.15.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.16.input_layernorm.weight torch.Size([2560])\ntransformer.layers.16.input_layernorm.bias torch.Size([2560])\ntransformer.layers.16.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.16.attention.query.bias torch.Size([2560])\ntransformer.layers.16.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.16.attention.key.bias torch.Size([2560])\ntransformer.layers.16.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.16.attention.value.bias torch.Size([2560])\ntransformer.layers.16.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.16.attention.dense.bias torch.Size([2560])\ntransformer.layers.16.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.16.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.16.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.16.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.16.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.16.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.17.input_layernorm.weight torch.Size([2560])\ntransformer.layers.17.input_layernorm.bias torch.Size([2560])\ntransformer.layers.17.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.17.attention.query.bias torch.Size([2560])\ntransformer.layers.17.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.17.attention.key.bias torch.Size([2560])\ntransformer.layers.17.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.17.attention.value.bias torch.Size([2560])\ntransformer.layers.17.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.17.attention.dense.bias torch.Size([2560])\ntransformer.layers.17.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.17.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.17.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.17.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.17.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.17.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.18.input_layernorm.weight torch.Size([2560])\ntransformer.layers.18.input_layernorm.bias torch.Size([2560])\ntransformer.layers.18.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.18.attention.query.bias torch.Size([2560])\ntransformer.layers.18.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.18.attention.key.bias torch.Size([2560])\ntransformer.layers.18.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.18.attention.value.bias torch.Size([2560])\ntransformer.layers.18.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.18.attention.dense.bias torch.Size([2560])\ntransformer.layers.18.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.18.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.18.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.18.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.18.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.18.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.19.input_layernorm.weight torch.Size([2560])\ntransformer.layers.19.input_layernorm.bias torch.Size([2560])\ntransformer.layers.19.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.19.attention.query.bias torch.Size([2560])\ntransformer.layers.19.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.19.attention.key.bias torch.Size([2560])\ntransformer.layers.19.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.19.attention.value.bias torch.Size([2560])\ntransformer.layers.19.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.19.attention.dense.bias torch.Size([2560])\ntransformer.layers.19.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.19.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.19.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.19.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.19.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.19.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.20.input_layernorm.weight torch.Size([2560])\ntransformer.layers.20.input_layernorm.bias torch.Size([2560])\ntransformer.layers.20.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.20.attention.query.bias torch.Size([2560])\ntransformer.layers.20.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.20.attention.key.bias torch.Size([2560])\ntransformer.layers.20.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.20.attention.value.bias torch.Size([2560])\ntransformer.layers.20.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.20.attention.dense.bias torch.Size([2560])\ntransformer.layers.20.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.20.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.20.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.20.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.20.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.20.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.21.input_layernorm.weight torch.Size([2560])\ntransformer.layers.21.input_layernorm.bias torch.Size([2560])\ntransformer.layers.21.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.21.attention.query.bias torch.Size([2560])\ntransformer.layers.21.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.21.attention.key.bias torch.Size([2560])\ntransformer.layers.21.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.21.attention.value.bias torch.Size([2560])\ntransformer.layers.21.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.21.attention.dense.bias torch.Size([2560])\ntransformer.layers.21.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.21.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.21.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.21.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.21.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.21.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.22.input_layernorm.weight torch.Size([2560])\ntransformer.layers.22.input_layernorm.bias torch.Size([2560])\ntransformer.layers.22.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.22.attention.query.bias torch.Size([2560])\ntransformer.layers.22.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.22.attention.key.bias torch.Size([2560])\ntransformer.layers.22.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.22.attention.value.bias torch.Size([2560])\ntransformer.layers.22.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.22.attention.dense.bias torch.Size([2560])\ntransformer.layers.22.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.22.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.22.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.22.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.22.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.22.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.23.input_layernorm.weight torch.Size([2560])\ntransformer.layers.23.input_layernorm.bias torch.Size([2560])\ntransformer.layers.23.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.23.attention.query.bias torch.Size([2560])\ntransformer.layers.23.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.23.attention.key.bias torch.Size([2560])\ntransformer.layers.23.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.23.attention.value.bias torch.Size([2560])\ntransformer.layers.23.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.23.attention.dense.bias torch.Size([2560])\ntransformer.layers.23.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.23.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.23.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.23.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.23.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.23.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.24.input_layernorm.weight torch.Size([2560])\ntransformer.layers.24.input_layernorm.bias torch.Size([2560])\ntransformer.layers.24.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.24.attention.query.bias torch.Size([2560])\ntransformer.layers.24.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.24.attention.key.bias torch.Size([2560])\ntransformer.layers.24.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.24.attention.value.bias torch.Size([2560])\ntransformer.layers.24.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.24.attention.dense.bias torch.Size([2560])\ntransformer.layers.24.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.24.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.24.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.24.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.24.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.24.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.25.input_layernorm.weight torch.Size([2560])\ntransformer.layers.25.input_layernorm.bias torch.Size([2560])\ntransformer.layers.25.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.25.attention.query.bias torch.Size([2560])\ntransformer.layers.25.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.25.attention.key.bias torch.Size([2560])\ntransformer.layers.25.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.25.attention.value.bias torch.Size([2560])\ntransformer.layers.25.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.25.attention.dense.bias torch.Size([2560])\ntransformer.layers.25.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.25.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.25.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.25.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.25.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.25.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.26.input_layernorm.weight torch.Size([2560])\ntransformer.layers.26.input_layernorm.bias torch.Size([2560])\ntransformer.layers.26.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.26.attention.query.bias torch.Size([2560])\ntransformer.layers.26.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.26.attention.key.bias torch.Size([2560])\ntransformer.layers.26.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.26.attention.value.bias torch.Size([2560])\ntransformer.layers.26.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.26.attention.dense.bias torch.Size([2560])\ntransformer.layers.26.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.26.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.26.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.26.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.26.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.26.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.27.input_layernorm.weight torch.Size([2560])\ntransformer.layers.27.input_layernorm.bias torch.Size([2560])\ntransformer.layers.27.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.27.attention.query.bias torch.Size([2560])\ntransformer.layers.27.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.27.attention.key.bias torch.Size([2560])\ntransformer.layers.27.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.27.attention.value.bias torch.Size([2560])\ntransformer.layers.27.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.27.attention.dense.bias torch.Size([2560])\ntransformer.layers.27.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.27.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.27.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.27.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.27.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.27.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.28.input_layernorm.weight torch.Size([2560])\ntransformer.layers.28.input_layernorm.bias torch.Size([2560])\ntransformer.layers.28.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.28.attention.query.bias torch.Size([2560])\ntransformer.layers.28.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.28.attention.key.bias torch.Size([2560])\ntransformer.layers.28.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.28.attention.value.bias torch.Size([2560])\ntransformer.layers.28.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.28.attention.dense.bias torch.Size([2560])\ntransformer.layers.28.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.28.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.28.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.28.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.28.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.28.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.29.input_layernorm.weight torch.Size([2560])\ntransformer.layers.29.input_layernorm.bias torch.Size([2560])\ntransformer.layers.29.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.29.attention.query.bias torch.Size([2560])\ntransformer.layers.29.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.29.attention.key.bias torch.Size([2560])\ntransformer.layers.29.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.29.attention.value.bias torch.Size([2560])\ntransformer.layers.29.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.29.attention.dense.bias torch.Size([2560])\ntransformer.layers.29.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.29.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.29.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.29.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.29.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.29.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.30.input_layernorm.weight torch.Size([2560])\ntransformer.layers.30.input_layernorm.bias torch.Size([2560])\ntransformer.layers.30.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.30.attention.query.bias torch.Size([2560])\ntransformer.layers.30.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.30.attention.key.bias torch.Size([2560])\ntransformer.layers.30.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.30.attention.value.bias torch.Size([2560])\ntransformer.layers.30.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.30.attention.dense.bias torch.Size([2560])\ntransformer.layers.30.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.30.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.30.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.30.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.30.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.30.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.layers.31.input_layernorm.weight torch.Size([2560])\ntransformer.layers.31.input_layernorm.bias torch.Size([2560])\ntransformer.layers.31.attention.query.weight torch.Size([2560, 2560])\ntransformer.layers.31.attention.query.bias torch.Size([2560])\ntransformer.layers.31.attention.key.weight torch.Size([2560, 2560])\ntransformer.layers.31.attention.key.bias torch.Size([2560])\ntransformer.layers.31.attention.value.weight torch.Size([2560, 2560])\ntransformer.layers.31.attention.value.bias torch.Size([2560])\ntransformer.layers.31.attention.dense.weight torch.Size([2560, 2560])\ntransformer.layers.31.attention.dense.bias torch.Size([2560])\ntransformer.layers.31.post_attention_layernorm.weight torch.Size([2560])\ntransformer.layers.31.post_attention_layernorm.bias torch.Size([2560])\ntransformer.layers.31.mlp.dense_h_to_4h.weight torch.Size([10240, 2560])\ntransformer.layers.31.mlp.dense_h_to_4h.bias torch.Size([10240])\ntransformer.layers.31.mlp.dense_4h_to_h.weight torch.Size([2560, 10240])\ntransformer.layers.31.mlp.dense_4h_to_h.bias torch.Size([2560])\ntransformer.final_layernorm.weight torch.Size([2560])\ntransformer.final_layernorm.bias torch.Size([2560])\n"
],
[
"gpt = GPT(\n vocab_size=40_064,\n layer_size=32,\n block_size=1024,\n embedding_dropout=0.0,\n embedding_size=2560,\n num_attention_heads=32,\n attention_dropout=0.0,\n residual_dropout=0.0,\n use_cache=True\n)",
"_____no_output_____"
],
[
"print(gpt(tf.constant([[1]]))[0].shape)",
"(1, 1, 40064)\n"
],
[
"for x in gpt.weights:\n if 'gpt/layer' in x.name:\n if 'gpt/layer00' in x.name:\n print(x.name, x.shape)\n else:\n print(x.name, x.shape)",
"gpt/embedding/embeddings:0 (40064, 2560)\nposition_embeddings:0 (1024, 2560)\ntop_query:0 (1024, 2560)\ngpt/layer00/attention/query_layer/kernel:0 (2560, 2560)\ngpt/layer00/attention/query_layer/bias:0 (2560,)\ngpt/layer00/attention/key_layer/kernel:0 (2560, 2560)\ngpt/layer00/attention/key_layer/bias:0 (2560,)\ngpt/layer00/attention/value_layer/kernel:0 (2560, 2560)\ngpt/layer00/attention/value_layer/bias:0 (2560,)\ngpt/layer00/attention/context_projection_layer/kernel:0 (2560, 2560)\ngpt/layer00/attention/context_projection_layer/bias:0 (2560,)\ngpt/layer00/LayerNorm_mlp_ln0/gamma:0 (2560,)\ngpt/layer00/LayerNorm_mlp_ln0/beta:0 (2560,)\ngpt/layer00/LayerNorm_mlp_ln1/gamma:0 (2560,)\ngpt/layer00/LayerNorm_mlp_ln1/beta:0 (2560,)\ngpt/layer00/intermediate/kernel:0 (2560, 10240)\ngpt/layer00/intermediate/bias:0 (10240,)\ngpt/layer00/output/kernel:0 (10240, 2560)\ngpt/layer00/output/bias:0 (2560,)\ngpt/LayerNorm_final_norm/gamma:0 (2560,)\ngpt/LayerNorm_final_norm/beta:0 (2560,)\n"
],
[
"new_weights = []\n\nfor x in gpt.weights:\n xs = tuple(x.shape)\n\n if 'gpt/embedding/embeddings:' in x.name:\n pname = 'embedding.word_embeddings.weight'\n w = pangu_weights[pname]\n assert w.shape == (4_0064, 2560)\n new_weights.append((x.name, xs, pname, w))\n\n elif 'position_embeddings' in x.name:\n pname = 'embedding.position_embeddings.weight'\n w = pangu_weights[pname]\n assert xs == w.shape\n new_weights.append((x.name, xs, pname, w))\n \n elif 'top_query' in x.name:\n pname = 'topQueryEmbedding.top_query_embeddings.weight'\n w = pangu_weights[pname]\n assert xs == w.shape\n new_weights.append((x.name, xs, pname, w))\n\n elif 'gpt/layer' in x.name:\n n_layer = int(x.name[len('gpt/layer'):][:2])\n if 'query_layer/kernel' in x.name:\n pname = f'transformer.layers.{n_layer}.attention.query.weight'\n w = pangu_weights[pname]\n w = np.transpose(w)\n assert xs == w.shape\n new_weights.append((x.name, xs, pname, w))\n elif 'key_layer/kernel' in x.name:\n pname = f'transformer.layers.{n_layer}.attention.key.weight'\n w = pangu_weights[pname]\n w = np.transpose(w)\n assert xs == w.shape\n new_weights.append((x.name, xs, pname, w))\n elif 'value_layer/kernel' in x.name:\n pname = f'transformer.layers.{n_layer}.attention.value.weight'\n w = pangu_weights[pname]\n w = np.transpose(w)\n assert xs == w.shape\n new_weights.append((x.name, xs, pname, w))\n elif 'query_layer/bias' in x.name:\n pname = f'transformer.layers.{n_layer}.attention.query.bias'\n w = pangu_weights[pname]\n assert xs == w.shape\n new_weights.append((x.name, xs, pname, w))\n elif 'key_layer/bias' in x.name:\n pname = f'transformer.layers.{n_layer}.attention.key.bias'\n w = pangu_weights[pname]\n assert xs == w.shape\n new_weights.append((x.name, xs, pname, w))\n elif 'value_layer/bias' in x.name:\n pname = f'transformer.layers.{n_layer}.attention.value.bias'\n w = pangu_weights[pname]\n assert xs == w.shape\n new_weights.append((x.name, xs, pname, w))\n\n elif 'attention/context_projection_layer/kernel' in x.name:\n pname = f'transformer.layers.{n_layer}.attention.dense.weight'\n w = pangu_weights[pname]\n w = np.transpose(w)\n assert w.shape == xs\n new_weights.append((x.name, xs, pname, w))\n\n elif 'attention/context_projection_layer/bias' in x.name:\n pname = f'transformer.layers.{n_layer}.attention.dense.bias'\n w = pangu_weights[pname]\n assert w.shape == xs\n new_weights.append((x.name, xs, pname, w))\n\n elif 'LayerNorm_mlp_ln0/gamma' in x.name:\n pname = f'transformer.layers.{n_layer}.input_layernorm.weight'\n w = pangu_weights[pname]\n assert w.shape == xs\n new_weights.append((x.name, x.shape, pname, w))\n\n elif 'LayerNorm_mlp_ln1/gamma' in x.name:\n pname = f'transformer.layers.{n_layer}.post_attention_layernorm.weight'\n w = pangu_weights[pname]\n assert w.shape == xs\n new_weights.append((x.name, x.shape, pname, w))\n\n elif 'LayerNorm_mlp_ln0/beta' in x.name:\n pname = f'transformer.layers.{n_layer}.input_layernorm.bias'\n w = pangu_weights[pname]\n assert w.shape == xs\n new_weights.append((x.name, x.shape, pname, w))\n\n elif 'LayerNorm_mlp_ln1/beta' in x.name:\n pname = f'transformer.layers.{n_layer}.post_attention_layernorm.bias'\n w = pangu_weights[pname]\n assert w.shape == xs\n new_weights.append((x.name, x.shape, pname, w))\n\n elif 'intermediate/kernel' in x.name:\n pname = f'transformer.layers.{n_layer}.mlp.dense_h_to_4h.weight'\n w = pangu_weights[pname]\n w = np.transpose(w)\n assert w.shape == xs\n new_weights.append((x.name, x.shape, pname, w))\n\n elif 'intermediate/bias' in x.name:\n pname = f'transformer.layers.{n_layer}.mlp.dense_h_to_4h.bias'\n w = pangu_weights[pname]\n assert w.shape == xs\n new_weights.append((x.name, x.shape, pname, w))\n\n elif '/output/kernel' in x.name:\n pname = f'transformer.layers.{n_layer}.mlp.dense_4h_to_h.weight'\n w = pangu_weights[pname]\n w = np.transpose(w)\n assert w.shape == xs\n new_weights.append((x.name, x.shape, pname, w))\n\n elif '/output/bias' in x.name:\n pname = f'transformer.layers.{n_layer}.mlp.dense_4h_to_h.bias'\n w = pangu_weights[pname]\n assert w.shape == xs\n new_weights.append((x.name, x.shape, pname, w))\n\n else:\n print('BAD', x.name, xs)\n break\n elif 'gpt/LayerNorm_final_norm/gamma' in x.name:\n pname = 'transformer.final_layernorm.weight'\n w = pangu_weights[pname]\n assert w.shape == xs\n new_weights.append((x.name, x.shape, pname, w))\n\n elif 'gpt/LayerNorm_final_norm/beta' in x.name:\n pname = 'transformer.final_layernorm.bias'\n w = pangu_weights[pname]\n assert w.shape == xs\n new_weights.append((x.name, x.shape, pname, w))\n\n else:\n print('BAD', x.name, xs)\n break",
"_____no_output_____"
],
[
"assert len(new_weights) == len(gpt.weights)\nfor x in new_weights:\n assert tuple(x[1]) == x[-1].shape",
"_____no_output_____"
],
[
"len(gpt.weights)",
"_____no_output_____"
],
[
"gpt.set_weights([x[-1] for x in new_weights])",
"_____no_output_____"
],
[
"from tokenization_jieba import JIEBATokenizer\ncbpe = JIEBATokenizer(\n 'PanGu-Alpha-GPU/panguAlpha_pytorch/megatron/tokenizer/bpe_4w_pcl/vocab.vocab',\n 'PanGu-Alpha-GPU/panguAlpha_pytorch/megatron/tokenizer/bpe_4w_pcl/vocab.model')",
"_____no_output_____"
],
[
"cbpe.vocab_size",
"_____no_output_____"
],
[
"ids = cbpe.encode('青椒肉丝的做法:')\n\nfor i in range(10):\n output = gpt(tf.constant([ids]))[0]\n nid = np.argmax(output[0, -1])\n ids += [int(nid)]\n print(i, cbpe.decode(ids))",
"0 青椒肉丝的做法:是\n1 青椒肉丝的做法:是青\n2 青椒肉丝的做法:是青椒\n3 青椒肉丝的做法:是青椒洗净\n4 青椒肉丝的做法:是青椒洗净切\n5 青椒肉丝的做法:是青椒洗净切丝\n6 青椒肉丝的做法:是青椒洗净切丝<eot>\n7 青椒肉丝的做法:是青椒洗净切丝<eot>青\n8 青椒肉丝的做法:是青椒洗净切丝<eot>青椒\n9 青椒肉丝的做法:是青椒洗净切丝<eot>青椒肉\n"
],
[
"@tf.function\ndef batch_gather(a, b):\n return tf.gather(a, b, batch_dims=1)\n\n\[email protected]\ndef top_k_top_p_sample(logits, num_samples=1, top_k=0, p=0.95):\n batch_size, vocab_size = logits.shape\n probs = tf.nn.softmax(logits, axis=-1)\n \n # [batch_size, vocab_perm]\n indices = tf.argsort(probs, direction='DESCENDING')\n logits_to_use = batch_gather(logits, indices)\n cumulative_probabilities = tf.math.cumsum(batch_gather(probs, indices), axis=-1, exclusive=False)\n\n # find the top pth index to cut off. careful we don't want to cutoff everything!\n # result will be [batch_size, vocab_perm]\n if p > 0.0:\n exclude_mask = tf.logical_not(\n tf.logical_or(cumulative_probabilities < p, tf.range(vocab_size)[None] < 1))\n # OPTION A - sample in the sorted space, then unsort.\n logits_to_use = logits_to_use - tf.cast(exclude_mask, tf.float32) * 1e10\n \n if top_k > 0:\n logits_to_use = logits_to_use - tf.cast(\n tf.argsort(logits_to_use, direction='DESCENDING') >= top_k,\n dtype=tf.float32\n ) * 1e10\n \n sample_perm = tf.random.categorical(logits=logits_to_use, num_samples=num_samples)\n sample = batch_gather(indices, sample_perm)\n\n return tf.cast(sample, tf.int64)\n\[email protected]\ndef serve(inputs):\n return gpt(inputs, kv_cache=None, use_cache=True)\n\n\[email protected]\ndef serve_cache(inputs, kv_cache):\n return gpt(inputs, kv_cache=kv_cache, use_cache=True)\n\nserve_concrete = serve.get_concrete_function(\n tf.TensorSpec(shape=[None, None], dtype=tf.int64, name=\"inp\")\n)\n\nlayer_size = 32\nattention_head = 32\nembedding_size = 2560\n\nserve_cache_concrete = serve_cache.get_concrete_function(\n tf.TensorSpec(shape=[None, None], dtype=tf.int64, name=\"inp\"),\n tf.TensorSpec(shape=[\n layer_size, None, 2, attention_head,\n None, embedding_size // attention_head\n ], dtype=tf.float32, name=\"kv_cache\")\n)",
"_____no_output_____"
],
[
"# ids = cbpe.encode('今天天气不错')\n\n# ret = sample(\n# tf.constant([ids], dtype=tf.int64),\n# tf.constant(15, dtype=tf.int64),\n# tf.constant(15, dtype=tf.int32),\n# tf.constant(0.95, dtype=tf.float32),\n# tf.constant(0.9, dtype=tf.float32)\n# )\n# print(ret)\n# print(cbpe.decode(ret.numpy().tolist()[0]))",
"_____no_output_____"
],
[
"gpt.save('./pangu-2.6B-tf2', include_optimizer=False, signatures={\n 'serving_default': serve.get_concrete_function(\n tf.TensorSpec(shape=[None, None], dtype=tf.int64, name=\"input_ids\"),\n )\n})",
"WARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fdd1764b430>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe013b9fbe0>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe013a18520>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe013a24760>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe0139b39a0>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe0139bfc10>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe0139cbe50>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe0139dbf70>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe01396e310>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe01397d550>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe01398a790>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe0139979d0>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe0139a6c10>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe013934e50>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe013940fa0>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe013955310>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe013963550>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe0138f0790>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe0138fe9d0>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe01390dc10>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe013919e50>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe013929fa0>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe0138bd310>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe0138ca580>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe0138d97c0>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe0138e6a00>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe013874c40>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe013881e80>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe01388efd0>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe0138a4340>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe013831580>, because it is not built.\nWARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fe01383e7c0>, because it is not built.\n"
],
[
"gpt.save('./pangu-2.6B-tf2-kv', include_optimizer=False, signatures={\n 'serving_default': serve_cache.get_concrete_function(\n tf.TensorSpec(shape=[None, None], dtype=tf.int64, name=\"input_ids\"),\n tf.TensorSpec(shape=[\n layer_size, None, 2, attention_head,\n None, embedding_size // attention_head\n ], dtype=tf.float32, name=\"kv_cache\")\n )\n})",
"WARNING:tensorflow:Skipping full serialization of Keras layer <keras.layers.core.Dropout object at 0x7fdd1764b430>, because it is not built.\n"
],
[
"!rm -rf onnx\n!mkdir -p onnx\n!python -m tf2onnx.convert \\\n --saved-model pangu-2.6B-tf2 \\\n --output onnx/pangu.zip --large_model --opset=13",
"2021-10-13 23:43:00.016579: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\n2021-10-13 23:43:00.016608: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\n/opt/conda/lib/python3.8/runpy.py:127: RuntimeWarning: 'tf2onnx.convert' found in sys.modules after import of package 'tf2onnx', but prior to execution of 'tf2onnx.convert'; this may result in unpredictable behaviour\n warn(RuntimeWarning(msg))\n2021-10-13 23:43:02.475426: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory\n2021-10-13 23:43:02.475453: W tensorflow/stream_executor/cuda/cuda_driver.cc:269] failed call to cuInit: UNKNOWN ERROR (303)\n2021-10-13 23:43:02.475478: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (iZuf6fokcl2k1pwfopz0n4Z): /proc/driver/nvidia/version does not exist\n2021-10-13 23:43:02.475722: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F FMA\nTo enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n2021-10-13 23:43:02,487 - WARNING - '--tag' not specified for saved_model. Using --tag serve\n2021-10-13 23:43:23.659086: W tensorflow/core/framework/cpu_allocator_impl.cc:80] Allocation of 410255360 exceeds 10% of free system memory.\n2021-10-13 23:43:23.659121: W tensorflow/core/framework/cpu_allocator_impl.cc:80] Allocation of 104857600 exceeds 10% of free system memory.\n2021-10-13 23:43:23.659148: W tensorflow/core/framework/cpu_allocator_impl.cc:80] Allocation of 104857600 exceeds 10% of free system memory.\n2021-10-13 23:43:23.659188: W tensorflow/core/framework/cpu_allocator_impl.cc:80] Allocation of 104857600 exceeds 10% of free system memory.\n2021-10-13 23:43:23.659221: W tensorflow/core/framework/cpu_allocator_impl.cc:80] Allocation of 104857600 exceeds 10% of free system memory.\n2021-10-13 23:44:05,935 - INFO - Signatures found in model: [serving_default].\n2021-10-13 23:44:05,935 - WARNING - '--signature_def' not specified, using first signature: serving_default\n2021-10-13 23:44:05,937 - INFO - Output names: ['output_0', 'output_1']\n2021-10-13 23:44:06.089326: I tensorflow/core/grappler/devices.cc:66] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 0\n2021-10-13 23:44:06.089635: I tensorflow/core/grappler/clusters/single_machine.cc:357] Starting new session\n2021-10-13 23:44:06.677587: I tensorflow/core/grappler/optimizers/meta_optimizer.cc:1137] Optimization results for grappler item: graph_to_optimize\n function_optimizer: Graph size after: 9064 nodes (8542), 11802 edges (11279), time = 328.654ms.\n function_optimizer: function_optimizer did nothing. time = 9.059ms.\n\n2021-10-13 23:45:30,805 - INFO - Using tensorflow=2.6.0, onnx=1.9.0, tf2onnx=1.9.2/0f28b7\n2021-10-13 23:45:30,806 - INFO - Using opset <onnx, 13>\n2021-10-13 23:47:21,993 - INFO - Computed 742 values for constant folding\n2021-10-13 23:47:52,292 - INFO - folding node using tf type=Identity, name=Func/StatefulPartitionedCall/input/_1\n2021-10-13 23:47:53,113 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer00/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,114 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer00/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,114 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer00/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,114 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer00/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,118 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer00/intermediate/Tensordot/concat\n2021-10-13 23:47:53,118 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer00/output/Tensordot/concat\n2021-10-13 23:47:53,119 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer01/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,119 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer01/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,119 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer01/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,119 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer01/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,119 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer01/intermediate/Tensordot/concat\n2021-10-13 23:47:53,119 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer01/output/Tensordot/concat\n2021-10-13 23:47:53,119 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer02/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,119 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer02/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,120 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer02/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,120 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer02/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,120 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer02/intermediate/Tensordot/concat\n2021-10-13 23:47:53,120 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer02/output/Tensordot/concat\n2021-10-13 23:47:53,120 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer03/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,120 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer03/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,120 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer03/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,120 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer03/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,121 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer03/intermediate/Tensordot/concat\n2021-10-13 23:47:53,121 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer03/output/Tensordot/concat\n2021-10-13 23:47:53,121 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer04/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,121 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer04/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,121 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer04/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,121 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer04/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,121 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer04/intermediate/Tensordot/concat\n2021-10-13 23:47:53,121 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer04/output/Tensordot/concat\n2021-10-13 23:47:53,122 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer05/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,122 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer05/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,122 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer05/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,122 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer05/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,122 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer05/intermediate/Tensordot/concat\n2021-10-13 23:47:53,122 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer05/output/Tensordot/concat\n2021-10-13 23:47:53,122 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer06/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,122 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer06/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,123 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer06/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,127 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer06/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,128 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer06/intermediate/Tensordot/concat\n2021-10-13 23:47:53,128 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer06/output/Tensordot/concat\n2021-10-13 23:47:53,128 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer07/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,128 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer07/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,128 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer07/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,128 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer07/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,128 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer07/intermediate/Tensordot/concat\n2021-10-13 23:47:53,128 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer07/output/Tensordot/concat\n2021-10-13 23:47:53,129 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer08/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,129 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer08/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,129 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer08/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,129 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer08/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,129 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer08/intermediate/Tensordot/concat\n2021-10-13 23:47:53,129 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer08/output/Tensordot/concat\n2021-10-13 23:47:53,129 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer09/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,129 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer09/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,130 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer09/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,130 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer09/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,130 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer09/intermediate/Tensordot/concat\n2021-10-13 23:47:53,130 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer09/output/Tensordot/concat\n2021-10-13 23:47:53,130 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer10/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,130 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer10/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,130 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer10/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,130 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer10/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,130 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer10/intermediate/Tensordot/concat\n2021-10-13 23:47:53,131 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer10/output/Tensordot/concat\n2021-10-13 23:47:53,131 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer11/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,131 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer11/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,131 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer11/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,131 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer11/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,131 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer11/intermediate/Tensordot/concat\n2021-10-13 23:47:53,131 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer11/output/Tensordot/concat\n2021-10-13 23:47:53,131 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer12/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,132 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer12/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,132 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer12/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,132 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer12/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,132 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer12/intermediate/Tensordot/concat\n2021-10-13 23:47:53,132 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer12/output/Tensordot/concat\n2021-10-13 23:47:53,132 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer13/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,132 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer13/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,132 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer13/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,132 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer13/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,133 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer13/intermediate/Tensordot/concat\n2021-10-13 23:47:53,133 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer13/output/Tensordot/concat\n2021-10-13 23:47:53,133 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer14/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,133 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer14/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,133 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer14/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,133 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer14/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,133 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer14/intermediate/Tensordot/concat\n2021-10-13 23:47:53,133 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer14/output/Tensordot/concat\n2021-10-13 23:47:53,134 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer15/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,134 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer15/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,134 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer15/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,134 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer15/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,134 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer15/intermediate/Tensordot/concat\n2021-10-13 23:47:53,134 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer15/output/Tensordot/concat\n2021-10-13 23:47:53,134 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer16/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,134 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer16/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,134 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer16/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,135 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer16/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,135 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer16/intermediate/Tensordot/concat\n2021-10-13 23:47:53,135 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer16/output/Tensordot/concat\n2021-10-13 23:47:53,135 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer17/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,135 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer17/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,135 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer17/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,135 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer17/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,136 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer17/intermediate/Tensordot/concat\n2021-10-13 23:47:53,136 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer17/output/Tensordot/concat\n2021-10-13 23:47:53,136 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer18/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,136 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer18/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,136 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer18/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,136 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer18/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,136 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer18/intermediate/Tensordot/concat\n2021-10-13 23:47:53,136 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer18/output/Tensordot/concat\n2021-10-13 23:47:53,136 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer19/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,137 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer19/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,137 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer19/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,137 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer19/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,137 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer19/intermediate/Tensordot/concat\n2021-10-13 23:47:53,137 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer19/output/Tensordot/concat\n2021-10-13 23:47:53,137 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer20/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,137 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer20/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,137 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer20/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,138 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer20/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,138 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer20/intermediate/Tensordot/concat\n2021-10-13 23:47:53,138 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer20/output/Tensordot/concat\n2021-10-13 23:47:53,138 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer21/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,138 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer21/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,138 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer21/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,138 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer21/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,139 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer21/intermediate/Tensordot/concat\n2021-10-13 23:47:53,139 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer21/output/Tensordot/concat\n2021-10-13 23:47:53,139 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer22/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,139 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer22/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,139 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer22/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,139 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer22/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,139 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer22/intermediate/Tensordot/concat\n2021-10-13 23:47:53,139 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer22/output/Tensordot/concat\n2021-10-13 23:47:53,140 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer23/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,140 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer23/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,140 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer23/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,140 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer23/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,140 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer23/intermediate/Tensordot/concat\n2021-10-13 23:47:53,140 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer23/output/Tensordot/concat\n2021-10-13 23:47:53,140 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer24/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,140 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer24/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,141 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer24/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,141 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer24/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,141 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer24/intermediate/Tensordot/concat\n2021-10-13 23:47:53,141 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer24/output/Tensordot/concat\n2021-10-13 23:47:53,141 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer25/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,141 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer25/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,141 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer25/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,141 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer25/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,141 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer25/intermediate/Tensordot/concat\n2021-10-13 23:47:53,142 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer25/output/Tensordot/concat\n2021-10-13 23:47:53,142 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer26/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,142 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer26/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,142 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer26/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,142 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer26/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,142 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer26/intermediate/Tensordot/concat\n2021-10-13 23:47:53,142 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer26/output/Tensordot/concat\n2021-10-13 23:47:53,142 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer27/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,142 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer27/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,143 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer27/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,143 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer27/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,143 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer27/intermediate/Tensordot/concat\n2021-10-13 23:47:53,143 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer27/output/Tensordot/concat\n2021-10-13 23:47:53,143 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer28/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,143 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer28/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,143 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer28/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,144 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer28/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,144 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer28/intermediate/Tensordot/concat\n2021-10-13 23:47:53,144 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer28/output/Tensordot/concat\n2021-10-13 23:47:53,144 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer29/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,144 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer29/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,144 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer29/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,144 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer29/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,144 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer29/intermediate/Tensordot/concat\n2021-10-13 23:47:53,145 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer29/output/Tensordot/concat\n2021-10-13 23:47:53,145 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer30/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,145 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer30/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,145 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer30/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,145 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer30/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,145 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer30/intermediate/Tensordot/concat\n2021-10-13 23:47:53,145 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer30/output/Tensordot/concat\n2021-10-13 23:47:53,145 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer31/attention/key_layer/Tensordot/concat\n2021-10-13 23:47:53,146 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer31/attention/value_layer/Tensordot/concat\n2021-10-13 23:47:53,146 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer31/attention/query_layer/Tensordot/concat\n2021-10-13 23:47:53,146 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer31/attention/context_projection_layer/Tensordot/concat\n2021-10-13 23:47:53,146 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer31/intermediate/Tensordot/concat\n2021-10-13 23:47:53,146 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer31/output/Tensordot/concat\n2021-10-13 23:47:53,146 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer00/activation/PartitionedCall/Sqrt\n2021-10-13 23:47:53,146 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer01/activation_1/PartitionedCall/Sqrt\n2021-10-13 23:47:53,147 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer02/activation_2/PartitionedCall/Sqrt\n2021-10-13 23:47:53,147 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer03/activation_3/PartitionedCall/Sqrt\n2021-10-13 23:47:53,147 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer04/activation_4/PartitionedCall/Sqrt\n2021-10-13 23:47:53,147 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer05/activation_5/PartitionedCall/Sqrt\n2021-10-13 23:47:53,147 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer06/activation_6/PartitionedCall/Sqrt\n2021-10-13 23:47:53,147 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer07/activation_7/PartitionedCall/Sqrt\n2021-10-13 23:47:53,147 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer08/activation_8/PartitionedCall/Sqrt\n2021-10-13 23:47:53,148 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer09/activation_9/PartitionedCall/Sqrt\n2021-10-13 23:47:53,148 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer10/activation_10/PartitionedCall/Sqrt\n2021-10-13 23:47:53,148 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer11/activation_11/PartitionedCall/Sqrt\n2021-10-13 23:47:53,148 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer12/activation_12/PartitionedCall/Sqrt\n2021-10-13 23:47:53,148 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer13/activation_13/PartitionedCall/Sqrt\n2021-10-13 23:47:53,148 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer14/activation_14/PartitionedCall/Sqrt\n2021-10-13 23:47:53,148 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer15/activation_15/PartitionedCall/Sqrt\n2021-10-13 23:47:53,148 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer16/activation_16/PartitionedCall/Sqrt\n2021-10-13 23:47:53,149 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer17/activation_17/PartitionedCall/Sqrt\n2021-10-13 23:47:53,149 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer18/activation_18/PartitionedCall/Sqrt\n2021-10-13 23:47:53,149 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer19/activation_19/PartitionedCall/Sqrt\n2021-10-13 23:47:53,149 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer20/activation_20/PartitionedCall/Sqrt\n2021-10-13 23:47:53,149 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer21/activation_21/PartitionedCall/Sqrt\n2021-10-13 23:47:53,149 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer22/activation_22/PartitionedCall/Sqrt\n2021-10-13 23:47:53,149 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer23/activation_23/PartitionedCall/Sqrt\n2021-10-13 23:47:53,149 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer24/activation_24/PartitionedCall/Sqrt\n2021-10-13 23:47:53,150 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer25/activation_25/PartitionedCall/Sqrt\n2021-10-13 23:47:53,150 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer26/activation_26/PartitionedCall/Sqrt\n2021-10-13 23:47:53,150 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer27/activation_27/PartitionedCall/Sqrt\n2021-10-13 23:47:53,150 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer28/activation_28/PartitionedCall/Sqrt\n2021-10-13 23:47:53,150 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer29/activation_29/PartitionedCall/Sqrt\n2021-10-13 23:47:53,150 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer30/activation_30/PartitionedCall/Sqrt\n2021-10-13 23:47:53,150 - INFO - folding node using tf type=Sqrt, name=StatefulPartitionedCall/gpt/layer31/activation_31/PartitionedCall/Sqrt\n2021-10-13 23:47:53,150 - INFO - folding node using tf type=Identity, name=StatefulPartitionedCall/gpt/position_embedding_1/ReadVariableOp\n"
],
[
"!cd onnx && unzip -q pangu.zip",
"_____no_output_____"
],
[
"!rm -rf onnx_kv\n!mkdir -p onnx_kv\n!python -m tf2onnx.convert \\\n --saved-model pangu-2.6B-tf2-kv \\\n --output onnx_kv/pangu.zip --large_model --opset=13",
"2021-10-14 00:15:59.812010: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\n2021-10-14 00:15:59.812039: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\n/opt/conda/lib/python3.8/runpy.py:127: RuntimeWarning: 'tf2onnx.convert' found in sys.modules after import of package 'tf2onnx', but prior to execution of 'tf2onnx.convert'; this may result in unpredictable behaviour\n warn(RuntimeWarning(msg))\n2021-10-14 00:16:01.012691: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory\n2021-10-14 00:16:01.012718: W tensorflow/stream_executor/cuda/cuda_driver.cc:269] failed call to cuInit: UNKNOWN ERROR (303)\n2021-10-14 00:16:01.012742: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (iZuf6fokcl2k1pwfopz0n4Z): /proc/driver/nvidia/version does not exist\n2021-10-14 00:16:01.012975: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F FMA\nTo enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n2021-10-14 00:16:01,022 - WARNING - '--tag' not specified for saved_model. Using --tag serve\n2021-10-14 00:16:25,175 - INFO - Signatures found in model: [serving_default].\n2021-10-14 00:16:25,175 - WARNING - '--signature_def' not specified, using first signature: serving_default\n2021-10-14 00:16:25,177 - INFO - Output names: ['output_0', 'output_1']\n2021-10-14 00:16:25.340780: I tensorflow/core/grappler/devices.cc:66] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 0\n2021-10-14 00:16:25.340989: I tensorflow/core/grappler/clusters/single_machine.cc:357] Starting new session\n2021-10-14 00:16:25.909159: I tensorflow/core/grappler/optimizers/meta_optimizer.cc:1137] Optimization results for grappler item: graph_to_optimize\n function_optimizer: Graph size after: 9361 nodes (8838), 12164 edges (11640), time = 342.326ms.\n function_optimizer: function_optimizer did nothing. time = 8.119ms.\n\n2021-10-14 00:17:21,589 - INFO - Using tensorflow=2.6.0, onnx=1.9.0, tf2onnx=1.9.2/0f28b7\n2021-10-14 00:17:21,589 - INFO - Using opset <onnx, 13>\n2021-10-14 00:18:48,589 - INFO - Computed 742 values for constant folding\n2021-10-14 00:19:13,383 - INFO - folding node using tf type=Identity, name=Func/StatefulPartitionedCall/input/_2\n2021-10-14 00:19:14,111 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer00/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,111 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer00/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,111 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer00/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,112 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer00/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,112 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer00/intermediate/Tensordot/concat\n2021-10-14 00:19:14,112 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer00/output/Tensordot/concat\n2021-10-14 00:19:14,112 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer01/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,112 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer01/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,112 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer01/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,112 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer01/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,112 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer01/intermediate/Tensordot/concat\n2021-10-14 00:19:14,113 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer01/output/Tensordot/concat\n2021-10-14 00:19:14,113 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer02/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,113 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer02/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,113 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer02/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,113 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer02/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,113 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer02/intermediate/Tensordot/concat\n2021-10-14 00:19:14,113 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer02/output/Tensordot/concat\n2021-10-14 00:19:14,113 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer03/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,113 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer03/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,114 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer03/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,114 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer03/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,114 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer03/intermediate/Tensordot/concat\n2021-10-14 00:19:14,114 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer03/output/Tensordot/concat\n2021-10-14 00:19:14,114 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer04/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,114 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer04/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,114 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer04/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,114 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer04/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,114 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer04/intermediate/Tensordot/concat\n2021-10-14 00:19:14,115 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer04/output/Tensordot/concat\n2021-10-14 00:19:14,115 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer05/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,115 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer05/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,115 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer05/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,115 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer05/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,115 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer05/intermediate/Tensordot/concat\n2021-10-14 00:19:14,115 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer05/output/Tensordot/concat\n2021-10-14 00:19:14,115 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer06/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,115 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer06/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,116 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer06/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,116 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer06/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,116 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer06/intermediate/Tensordot/concat\n2021-10-14 00:19:14,116 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer06/output/Tensordot/concat\n2021-10-14 00:19:14,116 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer07/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,116 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer07/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,116 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer07/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,116 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer07/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,116 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer07/intermediate/Tensordot/concat\n2021-10-14 00:19:14,117 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer07/output/Tensordot/concat\n2021-10-14 00:19:14,117 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer08/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,117 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer08/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,117 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer08/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,117 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer08/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,117 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer08/intermediate/Tensordot/concat\n2021-10-14 00:19:14,117 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer08/output/Tensordot/concat\n2021-10-14 00:19:14,117 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer09/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,117 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer09/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,118 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer09/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,119 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer09/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,119 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer09/intermediate/Tensordot/concat\n2021-10-14 00:19:14,119 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer09/output/Tensordot/concat\n2021-10-14 00:19:14,119 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer10/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,120 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer10/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,120 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer10/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,120 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer10/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,120 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer10/intermediate/Tensordot/concat\n2021-10-14 00:19:14,120 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer10/output/Tensordot/concat\n2021-10-14 00:19:14,120 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer11/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,120 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer11/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,120 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer11/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,120 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer11/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,121 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer11/intermediate/Tensordot/concat\n2021-10-14 00:19:14,121 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer11/output/Tensordot/concat\n2021-10-14 00:19:14,121 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer12/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,121 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer12/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,121 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer12/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,121 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer12/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,121 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer12/intermediate/Tensordot/concat\n2021-10-14 00:19:14,121 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer12/output/Tensordot/concat\n2021-10-14 00:19:14,121 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer13/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,122 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer13/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,122 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer13/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,122 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer13/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,122 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer13/intermediate/Tensordot/concat\n2021-10-14 00:19:14,122 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer13/output/Tensordot/concat\n2021-10-14 00:19:14,122 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer14/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,122 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer14/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,122 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer14/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,122 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer14/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,123 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer14/intermediate/Tensordot/concat\n2021-10-14 00:19:14,123 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer14/output/Tensordot/concat\n2021-10-14 00:19:14,123 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer15/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,123 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer15/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,123 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer15/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,123 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer15/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,123 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer15/intermediate/Tensordot/concat\n2021-10-14 00:19:14,123 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer15/output/Tensordot/concat\n2021-10-14 00:19:14,123 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer16/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,124 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer16/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,124 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer16/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,124 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer16/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,124 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer16/intermediate/Tensordot/concat\n2021-10-14 00:19:14,124 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer16/output/Tensordot/concat\n2021-10-14 00:19:14,124 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer17/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,124 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer17/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,124 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer17/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,124 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer17/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,125 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer17/intermediate/Tensordot/concat\n2021-10-14 00:19:14,125 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer17/output/Tensordot/concat\n2021-10-14 00:19:14,125 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer18/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,125 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer18/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,125 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer18/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,125 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer18/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,125 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer18/intermediate/Tensordot/concat\n2021-10-14 00:19:14,125 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer18/output/Tensordot/concat\n2021-10-14 00:19:14,125 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer19/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,126 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer19/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,126 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer19/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,126 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer19/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,126 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer19/intermediate/Tensordot/concat\n2021-10-14 00:19:14,126 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer19/output/Tensordot/concat\n2021-10-14 00:19:14,126 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer20/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,126 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer20/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,126 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer20/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,126 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer20/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,127 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer20/intermediate/Tensordot/concat\n2021-10-14 00:19:14,127 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer20/output/Tensordot/concat\n2021-10-14 00:19:14,127 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer21/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,127 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer21/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,127 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer21/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,127 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer21/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,127 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer21/intermediate/Tensordot/concat\n2021-10-14 00:19:14,127 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer21/output/Tensordot/concat\n2021-10-14 00:19:14,127 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer22/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,128 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer22/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,128 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer22/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,128 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer22/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,128 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer22/intermediate/Tensordot/concat\n2021-10-14 00:19:14,128 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer22/output/Tensordot/concat\n2021-10-14 00:19:14,128 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer23/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,128 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer23/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,128 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer23/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,128 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer23/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,129 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer23/intermediate/Tensordot/concat\n2021-10-14 00:19:14,129 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer23/output/Tensordot/concat\n2021-10-14 00:19:14,129 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer24/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,129 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer24/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,129 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer24/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,129 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer24/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,129 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer24/intermediate/Tensordot/concat\n2021-10-14 00:19:14,129 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer24/output/Tensordot/concat\n2021-10-14 00:19:14,129 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer25/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,129 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer25/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,130 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer25/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,130 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer25/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,130 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer25/intermediate/Tensordot/concat\n2021-10-14 00:19:14,130 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer25/output/Tensordot/concat\n2021-10-14 00:19:14,130 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer26/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,132 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer26/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,132 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer26/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,132 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer26/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,133 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer26/intermediate/Tensordot/concat\n2021-10-14 00:19:14,133 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer26/output/Tensordot/concat\n2021-10-14 00:19:14,133 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer27/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,133 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer27/attention/value_layer/Tensordot/concat\n2021-10-14 00:19:14,133 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer27/attention/query_layer/Tensordot/concat\n2021-10-14 00:19:14,133 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer27/attention/context_projection_layer/Tensordot/concat\n2021-10-14 00:19:14,133 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer27/intermediate/Tensordot/concat\n2021-10-14 00:19:14,133 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer27/output/Tensordot/concat\n2021-10-14 00:19:14,133 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer28/attention/key_layer/Tensordot/concat\n2021-10-14 00:19:14,134 - INFO - folding node using tf type=ConcatV2, name=StatefulPartitionedCall/gpt/layer28/attention/value_layer/Tensordot/concat\n"
],
[
"!cd onnx_kv && unzip -q pangu.zip",
"_____no_output_____"
],
[
"import onnx\nfrom onnxruntime.quantization import quantize_dynamic, QuantType",
"_____no_output_____"
],
[
"!rm -rf onnx_q && mkdir -p onnx_q",
"_____no_output_____"
],
[
"quantized_model = quantize_dynamic(\n './onnx/__MODEL_PROTO.onnx',\n './onnx_q/pangu.onnx',\n weight_type=QuantType.QUInt8,\n use_external_data_format=True\n)",
"_____no_output_____"
],
[
"!rm -rf onnx_kv_q && mkdir -p onnx_kv_q",
"_____no_output_____"
],
[
"quantized_model = quantize_dynamic(\n './onnx_kv/__MODEL_PROTO.onnx',\n './onnx_kv_q/pangu.onnx',\n weight_type=QuantType.QUInt8,\n use_external_data_format=True\n)",
"_____no_output_____"
],
[
"!rm -rf onnx\n!rm -rf onnx_kv",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e743a6dfba9b20d98ae09bf9422fd76514f0e68e | 206,950 | ipynb | Jupyter Notebook | Mk/2.fitting.ipynb | Romanism/dss-project-taxi | 8802e74eac51103bece8ee7ac556dece2b5c38bd | [
"MIT"
] | null | null | null | Mk/2.fitting.ipynb | Romanism/dss-project-taxi | 8802e74eac51103bece8ee7ac556dece2b5c38bd | [
"MIT"
] | null | null | null | Mk/2.fitting.ipynb | Romanism/dss-project-taxi | 8802e74eac51103bece8ee7ac556dece2b5c38bd | [
"MIT"
] | 2 | 2018-02-04T00:42:54.000Z | 2018-02-08T12:18:48.000Z | 83.92133 | 24,800 | 0.713815 | [
[
[
"%matplotlib inline\nfrom taxi_pakage import *\ntaxi = pd.read_csv(\"edited_train.csv\")",
"_____no_output_____"
],
[
"# 날씨 데이터 생성\nweather_event = ['20160110', '20160113', '20160117', '20160123', '20160205', '20160208', '20160215', '20160216',\n '20160224', '20160225', '20160314', '20160315', '20160328', '20160329', '20160403', '20160404',\n '20160530', '20160628']\n\nweather_event = pd.Series(pd.to_datetime(weather_event, format = '%Y%m%d')).dt.date\nweather_event = weather_event.astype('<U32')\nweather_event = list(weather_event)\n\ntaxi[\"y-m-d\"] = pd.to_datetime(taxi[\"pickup_datetime\"]).apply(lambda x: x.strftime(\"%Y-%m-%d\"))\ntaxi[\"extreme_weather\"] = taxi[\"y-m-d\"].apply(lambda x: 1 if x in weather_event else 0)\ntaxi[\"weather_event\"] = taxi[\"extreme_weather\"] # 날씨 (1:자연재해, 0:자연재해X)\ntaxi.drop(['y-m-d', 'extreme_weather'], axis=1, inplace=True)",
"_____no_output_____"
],
[
"taxi['sqrt_log_dist'] = taxi['dist'].apply(lambda x: np.sqrt(np.log1p(x)))\ntaxi['cbrt_log_dist'] = taxi['dist'].apply(lambda x: np.cbrt(np.log1p(x)))\ntaxi['log_duration'] = taxi['trip_duration'].apply(lambda x: np.log1p(x))",
"_____no_output_____"
],
[
"taxi['velo'] = taxi['dist']/taxi['trip_duration']*3600 # 시속\ntaxi['no_passenger'] = taxi['passenger_count'].apply(lambda x: 1 if x == 0 else 0)",
"_____no_output_____"
],
[
"# 아웃라이어 제거\ntaxi = taxi[taxi['trip_duration'] < 1500000].reset_index(drop=True)\ntaxi = taxi[taxi['velo']<100]",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"# model\n### kaggle_ 0.48665 - 768/1257 (61%)",
"_____no_output_____"
]
],
[
[
"model1 = sm.OLS.from_formula(\"log_duration ~ \\\n scale(sqrt_log_dist)*C(vendor_id)\\\n + scale(sqrt_log_dist)*C(work)\\\n + C(weekday)\\\n + C(hour)\\\n + scale(sqrt_log_dist)*scale(weather_event)\\\n + scale(month)\\\n +0\", data = taxi)\nresult = model1.fit()\nresult.summary()",
"_____no_output_____"
]
],
[
[
"## cross validation",
"_____no_output_____"
]
],
[
[
"score, result_set = cross_validater(\"log_duration ~ \\\n scale(sqrt_log_dist)*C(vendor_id)\\\n + scale(sqrt_log_dist)*C(work)\\\n + C(weekday)\\\n + C(hour)\\\n + scale(sqrt_log_dist)*scale(weather_event)\\\n + scale(month)\\\n +0\", taxi, 3, r_seed=3, target_log=True)",
"_____no_output_____"
],
[
"result_set",
"_____no_output_____"
],
[
"score",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"## Kaggle",
"_____no_output_____"
]
],
[
[
"test = pd.read_csv(\"edited_test.csv\")\ntest['sqrt_log_dist'] = test['dist'].apply(lambda x: np.sqrt(np.log1p(x)))",
"_____no_output_____"
],
[
"# 테스트 데이터를 통해 y값 예측\ny_hat = result.predict(test)\ny_hat = y_hat.apply(lambda x: int(round(np.exp(x))))\nans = pd.concat([test['id'], y_hat], axis=1)\nans.rename(columns={'id':'id' , 0:'trip_duration'}, inplace=True)\nans.tail()",
"_____no_output_____"
],
[
"# Kaggle 제출파일\nans.to_csv('basic_model.csv', index=False)",
"_____no_output_____"
]
],
[
[
"### 0.48665 - 768/1257 (61%)",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"## dist",
"_____no_output_____"
]
],
[
[
"a = taxi.pivot_table(\"log_duration\", \"sqrt_log_dist\", aggfunc='mean')\na.plot()",
"_____no_output_____"
],
[
"results = pd.DataFrame(columns = [\"R-square\", \"AIC\", \"BIC\", \"Cond.No.\", \"Pb(Fstatics)\", \"Pb(omnibus)\", \"Pb(jb)\", \"Dub-Wat\",\"Remarks\"])\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'scale_sqrt_log_dist')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(cbrt_log_dist)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'scale_cbrt_log_dist')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist)\", data = filtered)\nresult1 = model1.fit()\nstorage(result1, results, 'f~scale_sqrt_log_dist')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(cbrt_log_dist)\", data = filtered)\nresult1 = model1.fit()\nstorage(result1, results, 'f~scale_cbrt_log_dist')\n\nresults",
"_____no_output_____"
]
],
[
[
"아웃라이어를 제거한 상태에서는 cbrt가 더 좋음",
"_____no_output_____"
],
[
"## work",
"_____no_output_____"
]
],
[
[
"a = taxi.pivot_table(\"trip_duration\", \"work\", aggfunc='mean')\na.plot()",
"_____no_output_____"
],
[
"results = pd.DataFrame(columns = [\"R-square\", \"AIC\", \"BIC\", \"Cond.No.\", \"Pb(Fstatics)\", \"Pb(omnibus)\", \"Pb(jb)\", \"Dub-Wat\",\"Remarks\"])\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + C(work)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'C(work)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + scale(work)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist + scale(work)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist)*scale(work)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist*scale(work)')\n\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist)*C(work)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist*C(work)')\n\nresults",
"_____no_output_____"
]
],
[
[
"## weather_event",
"_____no_output_____"
]
],
[
[
"a = taxi.pivot_table(\"log_duration\", \"weather_event\", aggfunc='mean')\na.plot()",
"_____no_output_____"
],
[
"results = pd.DataFrame(columns = [\"R-square\", \"AIC\", \"BIC\", \"Cond.No.\", \"Pb(Fstatics)\", \"Pb(omnibus)\", \"Pb(jb)\", \"Dub-Wat\",\"Remarks\"])\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + C(weather_event)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'C(weather_event)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + scale(weather_event)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist + scale(weather_event)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist)*scale(weather_event)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist*scale(weather_event)')\n\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist)*C(weather_event)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist*C(weather_event)')\n\nresults",
"_____no_output_____"
]
],
[
[
"## weekday",
"_____no_output_____"
]
],
[
[
"a = taxi.pivot_table(\"log_duration\", \"weekday\", aggfunc='mean')\na.plot()",
"_____no_output_____"
],
[
"# origin data model\nmodel = sm.OLS.from_formula(\"log_duration ~ scale(weekday) +scale(weekday**2) +scale(weekday**3) + scale(weekday**4) +scale(weekday**5) +scale(weekday**6)+scale(weekday**7) + scale(weekday**8) + scale(weekday**9)\", data = taxi)\nresult2 = model.fit_regularized(alpha=0.001, L1_wt=1)\nprint(result2.params)",
"Intercept 6.466458\nscale(weekday) 0.086116\nscale(weekday ** 2) -0.032375\nscale(weekday ** 3) -0.063171\nscale(weekday ** 4) -0.021102\nscale(weekday ** 5) 0.000000\nscale(weekday ** 6) 0.000000\nscale(weekday ** 7) 0.000000\nscale(weekday ** 8) 0.000000\nscale(weekday ** 9) 0.000000\ndtype: float64\n"
],
[
"results = pd.DataFrame(columns = [\"R-square\", \"AIC\", \"BIC\", \"Cond.No.\", \"Pb(Fstatics)\", \"Pb(omnibus)\", \"Pb(jb)\", \"Dub-Wat\",\"Remarks\"])\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + C(weekday)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'C(weekday)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist)*C(weekday)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist*C(weekday)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) +scale(weekday) +scale(weekday**2)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist + s(weekday**1,2) ')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + scale(weekday**2)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist *scale(weekday**2)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + scale(weekday)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist *scale(weekday)')\n\nresults",
"_____no_output_____"
]
],
[
[
"## month",
"_____no_output_____"
]
],
[
[
"# origin data model\nmodel = sm.OLS.from_formula(\"log_duration ~ scale(month) +scale(month**2) +scale(month**3) + scale(month**4) +scale(month**5) +scale(month**6)+scale(month**7) + scale(month**8) + scale(month**9)\", data = taxi)\nresult2 = model.fit_regularized(alpha=0.01, L1_wt=1)\nprint(result2.params)",
"Intercept 6.457458\nscale(month) 0.026241\nscale(month ** 2) 0.000968\nscale(month ** 3) 0.000000\nscale(month ** 4) 0.000000\nscale(month ** 5) 0.000000\nscale(month ** 6) 0.000000\nscale(month ** 7) 0.000000\nscale(month ** 8) 0.000000\nscale(month ** 9) 0.000000\ndtype: float64\n"
],
[
"results = pd.DataFrame(columns = [\"R-square\", \"AIC\", \"BIC\", \"Cond.No.\", \"Pb(Fstatics)\", \"Pb(omnibus)\", \"Pb(jb)\", \"Dub-Wat\",\"Remarks\"])\n\nresult1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + C(month)\", data = taxi).fit()\nstorage(result1, results, 'sqrt dist + C(month)')\n\nresult1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist)*C(month)\", data = taxi).fit()\nstorage(result1, results, 'sqrt dist *C(month)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + scale(month)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist + s(month)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + scale(month**2)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist + s(month**2)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist)*scale(month)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist * s(month)')\n\n\n\nresults",
"_____no_output_____"
]
],
[
[
"## day",
"_____no_output_____"
]
],
[
[
"a = taxi.pivot_table(\"log_duration\", \"day\", aggfunc='mean')\na.plot()",
"_____no_output_____"
],
[
"# origin data model\nmodel = sm.OLS.from_formula(\"log_duration ~ scale(day) +scale(day**2) +scale(day**3) + scale(day**4) +scale(day**5) +scale(day**6)+scale(day**7) + scale(day**8) + scale(day**9)\", data = taxi)\nresult2 = model.fit_regularized(alpha=0.01, L1_wt=1)\nprint(result2.params)",
"Intercept 6.45734\nscale(day) 0.00000\nscale(day ** 2) 0.00000\nscale(day ** 3) 0.00000\nscale(day ** 4) 0.00000\nscale(day ** 5) 0.00000\nscale(day ** 6) 0.00000\nscale(day ** 7) 0.00000\nscale(day ** 8) 0.00000\nscale(day ** 9) 0.00000\ndtype: float64\n"
],
[
"results = pd.DataFrame(columns = [\"R-square\", \"AIC\", \"BIC\", \"Cond.No.\", \"Pb(Fstatics)\", \"Pb(omnibus)\", \"Pb(jb)\", \"Dub-Wat\",\"Remarks\"])\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + scale(day)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist + s(day)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + scale(day**2)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist + s(day**2)')\n\n\nresults",
"_____no_output_____"
]
],
[
[
"## hour",
"_____no_output_____"
]
],
[
[
"a = taxi.pivot_table(\"log_duration\", \"hour\", aggfunc='mean')\na.plot()",
"_____no_output_____"
],
[
"# origin data model\nmodel = sm.OLS.from_formula(\"log_duration ~ scale(hour) +scale(hour**2) +scale(hour**3) + scale(hour**4) +scale(hour**5) +scale(hour**6)+scale(hour**7) + scale(hour**8) + scale(hour**9)\", data = taxi)\nresult2 = model.fit_regularized(alpha=0.001, L1_wt=1)\nprint(result2.params)",
"Intercept 6.466458\nscale(hour) 0.107375\nscale(hour ** 2) 0.000000\nscale(hour ** 3) -0.030961\nscale(hour ** 4) -0.048082\nscale(hour ** 5) -0.008822\nscale(hour ** 6) 0.000000\nscale(hour ** 7) 0.000000\nscale(hour ** 8) 0.000000\nscale(hour ** 9) 0.000000\ndtype: float64\n"
],
[
"results = pd.DataFrame(columns = [\"R-square\", \"AIC\", \"BIC\", \"Cond.No.\", \"Pb(Fstatics)\", \"Pb(omnibus)\", \"Pb(jb)\", \"Dub-Wat\",\"Remarks\"])\n\n# origin data model\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + C(hour)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist + C(hour)')\n\n# origin data model\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + scale(hour) + scale(hour**4)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist + s(hour**1, 4)')\n\n# origin data model\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist)*scale(hour)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist * scale(hour)')\n\n# origin data model\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist)*C(hour)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist * C(hour)')\n\nresults",
"_____no_output_____"
]
],
[
[
"interaction은 다른 변수랑 같이 넣으면 너무 많아져서 에러남\n",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"## store",
"_____no_output_____"
]
],
[
[
"results = pd.DataFrame(columns = [\"R-square\", \"AIC\", \"BIC\", \"Cond.No.\", \"Pb(Fstatics)\", \"Pb(omnibus)\", \"Pb(jb)\", \"Dub-Wat\",\"Remarks\"])\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + C(store_and_fwd_flag) +0\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist + C(store_and_fwd_flag)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist):C(store_and_fwd_flag) +0\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist : C(store_and_fwd_flag)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist)*C(store_and_fwd_flag) +0\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist * C(store_and_fwd_flag)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist)/C(store_and_fwd_flag) +0\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist / C(store_and_fwd_flag)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + store_and_fwd_flag\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist + store_and_fwd_flag')\n\nresults",
"_____no_output_____"
]
],
[
[
"## vendor_id",
"_____no_output_____"
]
],
[
[
"results = pd.DataFrame(columns = [\"R-square\", \"AIC\", \"BIC\", \"Cond.No.\", \"Pb(Fstatics)\", \"Pb(omnibus)\", \"Pb(jb)\", \"Dub-Wat\",\"Remarks\"])\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + C(vendor_id) +0\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist + C(vendor_id)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist):C(vendor_id) +0\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist : C(vendor_id)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist)*C(vendor_id) +0\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist * C(vendor_id)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist)/C(vendor_id) +0\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist / C(vendor_id)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + vendor_id\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist + vendor_id')\n\nresults",
"_____no_output_____"
]
],
[
[
"## no_passenger",
"_____no_output_____"
]
],
[
[
"results = pd.DataFrame(columns = [\"R-square\", \"AIC\", \"BIC\", \"Cond.No.\", \"Pb(Fstatics)\", \"Pb(omnibus)\", \"Pb(jb)\", \"Dub-Wat\",\"Remarks\"])\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + C(no_passenger) +0\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist + C(no_passenger)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist):C(no_passenger) +0\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist : C(no_passenger)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist)*C(no_passenger) +0\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist * C(no_passenger)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist)/C(no_passenger) +0\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist / C(no_passenger)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + no_passenger \", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist + no_passenger')\n\nresults",
"_____no_output_____"
]
],
[
[
"## passenger_count",
"_____no_output_____"
]
],
[
[
"results = pd.DataFrame(columns = [\"R-square\", \"AIC\", \"BIC\", \"Cond.No.\", \"Pb(Fstatics)\", \"Pb(omnibus)\", \"Pb(jb)\", \"Dub-Wat\",\"Remarks\"])\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + C(passenger_count) +0\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist + C(passenger_count)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist)*C(passenger_count) +0\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist * C(passenger_count)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) * scale(passenger_count)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist * scale(passenger_count)')\n\nmodel1 = sm.OLS.from_formula(\"log_duration ~ scale(sqrt_log_dist) + scale(passenger_count)\", data = taxi)\nresult1 = model1.fit()\nstorage(result1, results, 'sqrt dist * scale(passenger_count)')\n\nresults",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e743aa4f0550dd3e4ea03d1ec8d20454dbe56156 | 30,642 | ipynb | Jupyter Notebook | new dataset CAF.ipynb | imagoodman/recommend-by-normalizing-flow | 06eacc0802855d23dce39901e75d6110e16bc4c7 | [
"MIT"
] | null | null | null | new dataset CAF.ipynb | imagoodman/recommend-by-normalizing-flow | 06eacc0802855d23dce39901e75d6110e16bc4c7 | [
"MIT"
] | null | null | null | new dataset CAF.ipynb | imagoodman/recommend-by-normalizing-flow | 06eacc0802855d23dce39901e75d6110e16bc4c7 | [
"MIT"
] | null | null | null | 81.712 | 9,132 | 0.81026 | [
[
[
"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport time\nfrom tensorflow.keras.regularizers import l2\n\nfrom tensorflow.keras.layers import (Input, Dense, Lambda, Flatten, Reshape, BatchNormalization, \n Activation, Dropout, Conv2D, Conv2DTranspose,\n Concatenate, Add, Multiply)\nfrom flows import MaskingDense, IAF, MAF\nimport pandas as pd\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"batch_size = 256\n# 生成的z的维度。如果要训练,则考虑选择更大的dim\nlatent_dim = 32\n\nlearning_rate = 0.0005\ndropout = 0.1\n\ndataset_path = r\"C:\\Users\\i9233\\recommend system\\dataset\\hetrec2011-lastfm-2k\"\nhottest = int(math.pow(25, 2))",
"_____no_output_____"
],
[
"user_representation_normalized = pd.read_csv(dataset_path + \"\\\\user_representation_sigmoid.csv\" )\nitem_representation = pd.read_csv(dataset_path + \"\\\\item_representation.csv\" )",
"_____no_output_____"
],
[
"dataset_len = user_representation_normalized.shape[0]\ntarget = np.zeros(dataset_len)",
"_____no_output_____"
],
[
"train_data = tf.data.Dataset.from_tensor_slices((tf.cast(user_representation_normalized.values, 'float32'), target))\n\ntrain_data = train_data.repeat().batch(batch_size).prefetch(1)",
"_____no_output_____"
],
[
"maf = MAF(original_size=hottest, latent_dim=latent_dim, num_flow=5)\noptimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)",
"_____no_output_____"
],
[
"# 训练maf\ndef train_maf(maf, dataset:tf.data.Dataset, optimizer, training_steps=1000, display_step=100):\n loss_list = []\n for step, (inputs, _y) in enumerate(dataset.take(training_steps + 1)):\n with tf.GradientTape() as g:\n z_k, e_0, _z_log_sigma_0, _z_sigmas = maf.inference(inputs)\n x_hat, _z_sigmas_2 = maf.generation(z_k)\n # 收敛非常快,暂时不知道原因\n loss = maf.flow_loss(inputs, x_hat, e_0, _z_log_sigma_0, _z_sigmas, z_k)\n \n loss_list.append(loss)\n if step % display_step == 0:\n print(\"step: %i, loss: %f\" % (step, loss))\n gradients = g.gradient(loss, maf.variables)\n optimizer.apply_gradients(grads_and_vars=zip(gradients, maf.variables))\n return loss_list",
"_____no_output_____"
],
[
"training_steps = 20000\ndisplay_step = 2000\nloss_list = train_maf(maf, dataset=train_data, optimizer=optimizer, \n training_steps=training_steps, display_step=display_step)\n\np = pd.DataFrame(loss_list)\np.plot()",
"step: 0, loss: 403.125366\nstep: 2000, loss: 308.935913\nstep: 4000, loss: 307.064209\nstep: 6000, loss: 306.569763\nstep: 8000, loss: 306.126923\nstep: 10000, loss: 305.372864\nstep: 12000, loss: 305.302887\nstep: 14000, loss: 305.180145\nstep: 16000, loss: 305.147949\nstep: 18000, loss: 305.168213\nstep: 20000, loss: 305.137848\n"
],
[
"place = \"./models/maf/lastfm_sigmoid_BinaryCrossentropy_%d_%d/\" % (latent_dim, training_steps)\ntf.saved_model.save(maf, place)",
"WARNING:tensorflow:Skipping full serialization of Keras model <flows.MAF object at 0x00000135D25DBD48>, because its inputs are not defined.\nWARNING:tensorflow:From e:\\ProgramData\\Anaconda3\\lib\\site-packages\\tensorflow_core\\python\\ops\\resource_variable_ops.py:1786: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\nInstructions for updating:\nIf using Keras pass *_constraint arguments to layers.\nINFO:tensorflow:Assets written to: ./models/maf/lastfm_sigmoid_BinaryCrossentropy_32_20000/assets\n"
],
[
"place = \"./models/maf/lastfm_sigmoid_BinaryCrossentropy_%d_%d/\" % (latent_dim, training_steps)\nmaf = tf.saved_model.load(place)",
"_____no_output_____"
],
[
"iaf = IAF(original_size=hottest, latent_dim=latent_dim)\noptimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)",
"_____no_output_____"
],
[
"def train_iaf_with_trained_maf(iaf, maf, dataset:tf.data.Dataset, optimizer, training_steps=1000, display_step=100):\n loss_list = []\n for step, (inputs, _y) in enumerate(dataset.take(training_steps + 1)):\n with tf.GradientTape() as g:\n z_k, e_0, _z_log_sigma_0, _z_sigmas2 = maf.inference(inputs)\n \n z_k_i, e_0, _z_log_sigma_0, _z_sigmas = iaf.inference(inputs)\n \n x_hat, _z_sigmas = iaf.generation(z_k_i) # 不计算会说有些没有训练到,故而注释掉\n \n iaf_loss = iaf.flow_loss(inputs, x_hat, e_0, _z_log_sigma_0, _z_sigmas, z_k_i)\n \n # CAF eq 20。最后一层的sigma,代表z自身的熵 \n H_z = tf.reduce_mean(np.log(2*math.pi * _z_sigmas[-1]))\n # CAF eq 21。maf和iaf生成的熵\n \n # 交叉熵最好先做归一化,如果不归一化,这里的z的预测会急剧变成全负数。目前考虑的是用sigmoid\n # 在正常训练中没有出现,是因为会保证其z约束在正态分布上。联合训练也会负,但是不快\n z_k = tf.math.sigmoid(z_k)\n z_k_i = tf.math.sigmoid(z_k_i)\n H_maf_iaf = tf.reduce_mean(tf.keras.metrics.binary_crossentropy(z_k_i, z_k))\n \n loss = H_z + H_maf_iaf + iaf_loss\n # 使用联合训练,iaf的loss下降很快,这里可以加入和直接iaf训练的对比图。因为求导的性质,也不需要担心对各个loss的减小的性能\n loss_list.append(iaf_loss)\n if step % display_step == 0:\n print(\"step: %i, H_z: %f, H_maf_iaf: %f, iaf_loss: %f\" % (step, H_z, H_maf_iaf, iaf_loss))\n gradients = g.gradient(loss, iaf.variables)\n optimizer.apply_gradients(grads_and_vars=zip(gradients, iaf.variables))\n return loss_list",
"_____no_output_____"
],
[
"training_steps = 50000\ndisplay_step = 2000\nloss_list = train_iaf_with_trained_maf(iaf, maf, dataset=train_data, optimizer=optimizer, \n training_steps=training_steps, display_step=display_step)\n\np = pd.DataFrame(loss_list)\np.plot()",
"step: 0, H_z: 1.346887, H_maf_iaf: 0.812742, iaf_loss: 646.379578\nstep: 2000, H_z: 1.507389, H_maf_iaf: 0.802144, iaf_loss: 474.010895\nstep: 4000, H_z: 1.517445, H_maf_iaf: 0.805421, iaf_loss: 473.171906\nstep: 6000, H_z: 1.521120, H_maf_iaf: 0.808984, iaf_loss: 472.908905\nstep: 8000, H_z: 1.522843, H_maf_iaf: 0.804396, iaf_loss: 472.741608\nstep: 10000, H_z: 1.523695, H_maf_iaf: 0.804233, iaf_loss: 472.587372\nstep: 12000, H_z: 1.524178, H_maf_iaf: 0.802132, iaf_loss: 472.653076\nstep: 14000, H_z: 1.524396, H_maf_iaf: 0.807201, iaf_loss: 472.582642\nstep: 16000, H_z: 1.524493, H_maf_iaf: 0.806177, iaf_loss: 472.530426\nstep: 18000, H_z: 1.524549, H_maf_iaf: 0.808906, iaf_loss: 472.617676\nstep: 20000, H_z: 1.524583, H_maf_iaf: 0.810612, iaf_loss: 472.560852\nstep: 22000, H_z: 1.524597, H_maf_iaf: 0.805693, iaf_loss: 472.640411\nstep: 24000, H_z: 1.524606, H_maf_iaf: 0.800851, iaf_loss: 472.597534\nstep: 26000, H_z: 1.524610, H_maf_iaf: 0.802191, iaf_loss: 472.438660\nstep: 28000, H_z: 1.524613, H_maf_iaf: 0.809296, iaf_loss: 472.594727\nstep: 30000, H_z: 1.524614, H_maf_iaf: 0.808634, iaf_loss: 472.569946\nstep: 32000, H_z: 1.524615, H_maf_iaf: 0.805052, iaf_loss: 472.626648\nstep: 34000, H_z: 1.524615, H_maf_iaf: 0.805111, iaf_loss: 472.614136\nstep: 36000, H_z: 1.524615, H_maf_iaf: 0.805372, iaf_loss: 472.490356\nstep: 38000, H_z: 1.524615, H_maf_iaf: 0.811465, iaf_loss: 472.594910\nstep: 40000, H_z: 1.524615, H_maf_iaf: 0.804078, iaf_loss: 472.580109\nstep: 42000, H_z: 1.524615, H_maf_iaf: 0.815852, iaf_loss: 472.589539\nstep: 44000, H_z: 1.524615, H_maf_iaf: 0.806289, iaf_loss: 472.602478\nstep: 46000, H_z: 1.524615, H_maf_iaf: 0.815898, iaf_loss: 472.545929\nstep: 48000, H_z: 1.524615, H_maf_iaf: 0.804007, iaf_loss: 472.605743\nstep: 50000, H_z: 1.524615, H_maf_iaf: 0.802588, iaf_loss: 472.585938\n"
],
[
"place = \"./models/iaf/lastfm_sigmoid_BinaryCrossentropy_%d_%d/\" % (latent_dim, training_steps)\ntf.saved_model.save(iaf, place)",
"WARNING:tensorflow:Skipping full serialization of Keras model <flows.IAF object at 0x000001375DBAA5C8>, because its inputs are not defined.\nINFO:tensorflow:Assets written to: ./models/iaf/lastfm_sigmoid_BinaryCrossentropy_32_50000/assets\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e743b376751e7a22236ff974bcf1c9ecb06f7110 | 60,999 | ipynb | Jupyter Notebook | 05_Contrib/TimeDomain/AntaresExample/AntaresSolarSystemObjectLightCurveExploration.ipynb | noaodatalab/notebooks_default | 3001f40c0de05445e65e205fdb3806f85e91dbfe | [
"BSD-3-Clause"
] | 15 | 2019-02-27T18:44:34.000Z | 2022-01-26T17:50:37.000Z | 05_Contrib/TimeDomain/AntaresExample/AntaresSolarSystemObjectLightCurveExploration.ipynb | noaodatalab/notebooks_default | 3001f40c0de05445e65e205fdb3806f85e91dbfe | [
"BSD-3-Clause"
] | 51 | 2019-02-11T19:14:45.000Z | 2022-02-07T22:28:23.000Z | 05_Contrib/TimeDomain/AntaresExample/AntaresSolarSystemObjectLightCurveExploration.ipynb | noaodatalab/notebooks_default | 3001f40c0de05445e65e205fdb3806f85e91dbfe | [
"BSD-3-Clause"
] | 22 | 2019-02-07T20:31:50.000Z | 2022-02-04T23:13:42.000Z | 182.631737 | 26,532 | 0.900818 | [
[
[
"__author__ = 'Chien-Hsiu Lee <[email protected]> and Thomas Matheson <[email protected]>'\n__version__ = '20211130' # yyyymmdd\n__datasets__ = ['']\n__keywords__ = ['ANTARES', 'movingobject']",
"_____no_output_____"
]
],
[
[
"# Exploring Elastic Search Database to Investigate Moving Object Lightcurves\n\n*Chien-Hsiu Lee, Thomas Matheson & ANTARES Team*",
"_____no_output_____"
],
[
"### Table of contents\n* [Goals & notebook summary](#goals)\n* [Disclaimer & Attribution](#attribution)\n* [Imports & setup](#import)\n* [Authentication](#auth)\n* [First chapter](#chapter1)\n* [Resources and references](#resources)",
"_____no_output_____"
],
[
"<a class=\"anchor\" id=\"goals\"></a>\n# Goals\nThis notebook is an example of how to explore the ANTARES alert database for moving objects. ZTF searches moving object database (JPL/HORIZON) and associates alerts to known moving objects. This piece of information is stored in the ANTARES alert database, so we can conveniently use the moving object information in the alerts to extract light curves.",
"_____no_output_____"
],
[
"# Summary\n\nMoving objects may undergo abrupt brightnes change due to outgassing activities or disintegration, which can be identified in their light curves. There is also a group of binary asteroids that manifest periodic brightness change.\n\nAs a showcase, we obtain the light curve of 809 Lundia, a known binary asteroids with prominent eclipses. We then remove the long-term trend in the light curve and fold with its period to reveal the binary eclipses.",
"_____no_output_____"
],
[
"# Disclaimer & attribution\nIf you use this notebook for your published science, please acknowledge the following:\n\n* Data Lab concept paper: Fitzpatrick et al., \"The NOAO Data Laboratory: a conceptual overview\", SPIE, 9149, 2014, http://dx.doi.org/10.1117/12.2057445\n\n* Data Lab disclaimer: http://datalab.noirlab.edu/disclaimers.php",
"_____no_output_____"
],
[
"<a class=\"anchor\" id=\"import\"></a>\n# Imports and setup",
"_____no_output_____"
]
],
[
[
"from antares_client.search import search\nimport matplotlib.pyplot as plt\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"<a class=\"anchor\" id=\"import\"></a>\n# Querying ANTARES alert database \n\nThis cell shows how to call elastic search with ANTARES API. It can search on ZTF object id, RA, Dec, or other properties. For our purpose, we search for ZTF alerts associated with 809 Lundia using the keyword ztf_ssnamenr. ",
"_____no_output_____"
]
],
[
[
"query = {\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match\": {\n \"properties.ztf_ssnamenr\": 809\n }\n },\n \n ]\n }\n }\n }\nresult_set = search(query)\n\n",
"_____no_output_____"
]
],
[
[
"<a class=\"anchor\" id=\"import\"></a>\n# Extracting light curve related properties\n\nNow the query is finished, let's extract relevant properties (MJD, Mag, Mag_err) for this moving object.",
"_____no_output_____"
]
],
[
[
"gmjd = []\ngmag = []\ngerr = []\nrmjd = []\nrmag = []\nrerr = []\nfor locus in search(query):\n for alert in locus.alerts:\n if 'ztf_ssnamenr' in alert.properties:\n if alert.properties['ant_passband'] == 'R': \n rmjd.append(alert.properties['ztf_jd'])\n rmag.append(alert.properties['ant_mag'])\n rerr.append(alert.properties['ant_magerr'])\n if alert.properties['ant_passband'] == 'g': \n gmjd.append(alert.properties['ztf_jd'])\n gmag.append(alert.properties['ant_mag'])\n gerr.append(alert.properties['ant_magerr'])\n",
"_____no_output_____"
]
],
[
[
"Having the time-series photometry in hand, we can plot the light curve.",
"_____no_output_____"
]
],
[
[
"plt.scatter(rmjd, rmag, c='red', alpha=0.5)\nplt.scatter(gmjd, gmag, c='green', alpha=0.5)\nplt.title('809 Lundia light curve from ZTF')\nplt.xlabel('Time [Julian date]')\nplt.ylabel('Magnitude in g- and r-passband')\nplt.show",
"_____no_output_____"
]
],
[
[
"Now we want to see if we can find the binary eclipses in the light curves. First we need to remove the long-term trend. This can be done by comparing with the apparent magnitude predicted by JPL/HORIZONS. It has been shown that Lundia has a period of 15.42 hours, we also fold the light curve with this period after de-trending.",
"_____no_output_____"
]
],
[
[
"from scipy import interpolate\n#we read in the predictions of the brightness (according to the distance to the sun) from JPL/HORIZONS\nlc = pd.read_csv('JPL809.csv')\njpl_jd = lc['JD']\njpl_mag = lc['Vmag']\nperiod=15.42/24.\nx0=[]\ny0=[]\nfor i in range(len(jpl_jd)):\n x0.append(float(jpl_jd[i]))\n y0.append(float(jpl_mag[i]))\n\ndef ref_mag(x):\n tck0 = interpolate.splrep(x0, y0)\n return interpolate.splev(x,tck0)\n\nmgdate=[]\nmrdate=[]\nmrmag=[]\nmgmag=[]\n\nfor i in range(len(rmjd)):\n mrdate.append((float(rmjd[i])%period)/period)\n mrmag.append(float(rmag[i])-ref_mag(float(rmjd[i])))\nfor i in range(len(gmjd)):\n mgdate.append((float(gmjd[i])%period)/period)\n mgmag.append(float(gmag[i])-ref_mag(float(gmjd[i])))\n",
"_____no_output_____"
]
],
[
[
"We can now plot the de-trend and folded light curve.",
"_____no_output_____"
]
],
[
[
"#plot folded light curve\nplt.ylim(max(mgmag)+0.5*(max(mgmag)-min(mgmag)),min(mrmag)-0.5*(max(mrmag)-min(mrmag)))\nplt.scatter(mrdate, mrmag, c='red', alpha=0.5)\nplt.scatter(mgdate, mgmag, c='green', alpha=0.5)\nplt.title('809 Lundia phase-folded light curve from ZTF')\nplt.xlabel('Phase (Period=15.42 hr)')\nplt.ylabel('Magnitude in g- and r-passband')\n\nplt.show",
"_____no_output_____"
]
],
[
[
"# Concluding remarks\n\nThe ZTF folded light curve manifests trace of binary eclipses, consistent with previous study by Kryszcynska et al. (2009) that first revealed the binary nature of 809 Lundia. \n<img src=\"809LundiaPhasedLC.jpeg\">",
"_____no_output_____"
],
[
"# Resources and references\nFurther reading:\n\nKryszcynska et al. (2009) \"New binary asteroid 809 Lundia. I. Photometry and modelling\". A&A, 501, 769:\nhttps://ui.adsabs.harvard.edu/abs/2009A%26A...501..769K/abstract\n",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e743b94afd4c8de5108e55a748f03a4b6df9a478 | 1,632 | ipynb | Jupyter Notebook | docs/contents/tools/files/file_pir/rewrite_to_style.ipynb | dprada/molsysmt | 83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d | [
"MIT"
] | null | null | null | docs/contents/tools/files/file_pir/rewrite_to_style.ipynb | dprada/molsysmt | 83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d | [
"MIT"
] | null | null | null | docs/contents/tools/files/file_pir/rewrite_to_style.ipynb | dprada/molsysmt | 83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d | [
"MIT"
] | null | null | null | 19.902439 | 84 | 0.536152 | [
[
[
"# Rewrite to style",
"_____no_output_____"
]
],
[
[
"from molsysmt.tools import file_pir",
"Warning: importing 'simtk.openmm' is deprecated. Import 'openmm' instead.\n"
],
[
"#file_pir.rewrite_to_style()",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
]
] |
e743d11f4b457e96ea5f21da508088b8c9f0d40c | 79,505 | ipynb | Jupyter Notebook | Softmax.ipynb | Mdcrab02/Udacity_DeepLearning | ca0110b255b123450ae305d90a9f6c71580ca39c | [
"MIT"
] | null | null | null | Softmax.ipynb | Mdcrab02/Udacity_DeepLearning | ca0110b255b123450ae305d90a9f6c71580ca39c | [
"MIT"
] | null | null | null | Softmax.ipynb | Mdcrab02/Udacity_DeepLearning | ca0110b255b123450ae305d90a9f6c71580ca39c | [
"MIT"
] | null | null | null | 327.18107 | 31,190 | 0.923778 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"scores = np.array([3.0, 1.0, 0.2])",
"_____no_output_____"
]
],
[
[
"If you increase the size of your outputs, the model becomes very confident about predictions\n scores x 10\n\nIf you reduce the size of your outputs, the model becomes very unsure\n scores / 10\n\nWe want it to be unsure in the beginning and become more confident as it learns",
"_____no_output_____"
]
],
[
[
"def softmax(x):\n \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n #pass # TODO: Compute and return softmax(x)\n e_x = np.exp(x)\n sum_e_x = np.sum(e_x, axis = 0)\n #return np.exp(x) / np.sum(np.exp(x), axis = 0)\n return e_x / sum_e_x\n ",
"_____no_output_____"
],
[
"print(softmax(scores))",
"[ 0.8360188 0.11314284 0.05083836]\n"
],
[
"# Plot softmax curves\nx = np.arange(-2.0, 6.0, 0.1)\nscores = np.vstack([x, np.ones_like(x), 0.2 * np.ones_like(x)])\n\nplt.plot(x, softmax(scores).T, linewidth=2)\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"If you multiply these by 10, their probabilities get super close to 1 or 0\nmult = np.multiply(np.array(scores),10)\nprint(softmax(mult))",
"_____no_output_____"
]
],
[
[
"# Plot softmax curves\nx = np.arange(-2.0, 6.0, 0.1)\nscores = np.vstack([x, np.ones_like(x), 0.2 * np.ones_like(x)])\n\nplt.plot(x, softmax(scores * 10).T, linewidth=2)\nplt.show()",
"_____no_output_____"
]
],
[
[
"If you divide them by 10\ndiv = np.divide(np.array(scores),10)\nprint(softmax(div))",
"_____no_output_____"
]
],
[
[
"# Plot softmax curves\nx = np.arange(-2.0, 6.0, 0.1)\nscores = np.vstack([x, np.ones_like(x), 0.2 * np.ones_like(x)])\n\nplt.plot(x, softmax(scores / 10).T, linewidth=2)\nplt.show()",
"_____no_output_____"
]
],
[
[
"So we wanted our probabilities for the correct class to be close to one (like 0.7)\n and all of the others to be close to zero (like 0.2 and 0.1)\n\nWe can create a vector as long as there are classes with 1.0 for the correct class and zero\n for everything else. This is one-hot encoding.\n\nFor classes a, b, c, d\nThe vectors could be as follows\na [0,0,0,1]\nb [0,0,1,0]\nc [0,1,0,0]\nd [1,0,0,0]\n\nThis is useful because then the output of the classifier (the numbers 0.7,0.2,0.1 in this case)\n can be compared to the vector for our labels (the one-hot encoding vector)\n\nThe natural distance between the two is called cross-entropy D(S,L), which is not symmetric\n\nSo, from start to finish, you have inputs which are put into a linear model\n These outputs are then logits\nThese logits (like the variable scores above) are used as input for softmax\n The output is S(y) (the 0.7,0.2,0.1 above)\n The distance between these and the 1-hot labels is computed with D(S,L)\nAs a result, you get the probabilities for your classification\nMathematically D(S(Wx+b),L)\n\nBut how do you get your weights, W, and bias term, b, to get the softmax to do what you want it\n to do? i.e. minimizing cross-entropy\n\nOne way is to measure that distance averaged over the entire training set for all inputs and\n labels\nThis is called the training loss\n loss = 1/n * sum(D(S(Wx+b),L))\nThis loss, or average cross entropy over the training set, is a huge matrix added up into a\n huge sum\nSo how do we traverse this big loss? with a familiar loss function: gradient descent\n",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e743df5bf9165889e411c76540057dd0d23d990a | 632,503 | ipynb | Jupyter Notebook | Him_Assignment2.ipynb | vathanahim/IAF603_Assignment2 | 4363958b02ab5dc4841941f0e50e1d674bcedb6f | [
"MIT"
] | null | null | null | Him_Assignment2.ipynb | vathanahim/IAF603_Assignment2 | 4363958b02ab5dc4841941f0e50e1d674bcedb6f | [
"MIT"
] | null | null | null | Him_Assignment2.ipynb | vathanahim/IAF603_Assignment2 | 4363958b02ab5dc4841941f0e50e1d674bcedb6f | [
"MIT"
] | null | null | null | 57.255635 | 72,556 | 0.596933 | [
[
[
"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\ndata_school = '/Users/vathanahim/Documents/UNCG-FALL2020/IAF603/IAF603_Assignment2/Chicago_Public_Schools_-_Progress_Report_Cards__2011-2012-v3.csv'\ndata_census = '/Users/vathanahim/Documents/UNCG-FALL2020/IAF603/IAF603_Assignment2/Census_Data_-_Selected_socioeconomic_indicators_in_Chicago__2008___2012-v2.csv'",
"_____no_output_____"
]
],
[
[
"# PART 1",
"_____no_output_____"
]
],
[
[
"df_census = pd.read_csv(data_census)\ndf_school = pd.read_csv(data_school)\npd.set_option(\"display.max_columns\", 200)\npd.set_option(\"display.max_rows\", 200)",
"_____no_output_____"
]
],
[
[
"# Census Data",
"_____no_output_____"
]
],
[
[
"df_census.head(5)",
"_____no_output_____"
],
[
"pd.set_option(\"display.max_rows\", 100)\ndf_census.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 78 entries, 0 to 77\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 COMMUNITY_AREA_NUMBER 77 non-null float64\n 1 COMMUNITY_AREA_NAME 78 non-null object \n 2 PERCENT OF HOUSING CROWDED 78 non-null float64\n 3 PERCENT HOUSEHOLDS BELOW POVERTY 78 non-null float64\n 4 PERCENT AGED 16+ UNEMPLOYED 78 non-null float64\n 5 PERCENT AGED 25+ WITHOUT HIGH SCHOOL DIPLOMA 78 non-null float64\n 6 PERCENT AGED UNDER 18 OR OVER 64 78 non-null float64\n 7 PER_CAPITA_INCOME 78 non-null int64 \n 8 HARDSHIP_INDEX 77 non-null float64\ndtypes: float64(7), int64(1), object(1)\nmemory usage: 5.6+ KB\n"
],
[
"print(\"Dimension: \" +str(df_census.shape))\ndf_census.isnull().sum()",
"Dimension: (78, 9)\n"
]
],
[
[
"The df_census dataframe contains census data that shows the socio-economic conditions in Chicago. The three primary data\ntype in this data frame is float, int, and pandas object. This dataframe has 9 variables (columns) and \n78 observations (rows). When checking for the number of missing values in this dataframe, it was founded that columns\n'COMMUNITY_AREA_NUMBER' and 'HARDSHIP_INDEX' have one missing value. ",
"_____no_output_____"
],
[
"# School Data",
"_____no_output_____"
]
],
[
[
"df_school.head(5)",
"_____no_output_____"
],
[
"df_school.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 566 entries, 0 to 565\nData columns (total 78 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 School ID 566 non-null int64 \n 1 NAME_OF_SCHOOL 566 non-null object \n 2 Elementary, Middle, or High School 566 non-null object \n 3 Street Address 566 non-null object \n 4 City 566 non-null object \n 5 State 566 non-null object \n 6 ZIP Code 566 non-null int64 \n 7 Phone Number 566 non-null object \n 8 Link 565 non-null object \n 9 Network Manager 566 non-null object \n 10 Collaborative Name 566 non-null object \n 11 Adequate Yearly Progress Made? 566 non-null object \n 12 Track Schedule 566 non-null object \n 13 CPS Performance Policy Status 566 non-null object \n 14 CPS Performance Policy Level 566 non-null object \n 15 HEALTHY_SCHOOL_CERTIFIED 566 non-null object \n 16 Safety Icon 566 non-null object \n 17 SAFETY_SCORE 513 non-null float64\n 18 Family Involvement Icon 566 non-null object \n 19 Family Involvement Score 566 non-null object \n 20 Environment Icon 566 non-null object \n 21 Environment Score 513 non-null float64\n 22 Instruction Icon 566 non-null object \n 23 Instruction Score 513 non-null float64\n 24 Leaders Icon 566 non-null object \n 25 Leaders Score 566 non-null object \n 26 Teachers Icon 566 non-null object \n 27 Teachers Score 566 non-null object \n 28 Parent Engagement Icon 566 non-null object \n 29 Parent Engagement Score 566 non-null object \n 30 Parent Environment Icon 566 non-null object \n 31 Parent Environment Score 566 non-null object \n 32 AVERAGE_STUDENT_ATTENDANCE 565 non-null object \n 33 Rate of Misconducts (per 100 students) 566 non-null float64\n 34 Average Teacher Attendance 566 non-null object \n 35 Individualized Education Program Compliance Rate 566 non-null object \n 36 Pk-2 Literacy % 566 non-null object \n 37 Pk-2 Math % 566 non-null object \n 38 Gr3-5 Grade Level Math % 566 non-null object \n 39 Gr3-5 Grade Level Read % 566 non-null object \n 40 Gr3-5 Keep Pace Read % 566 non-null object \n 41 Gr3-5 Keep Pace Math % 566 non-null object \n 42 Gr6-8 Grade Level Math % 566 non-null object \n 43 Gr6-8 Grade Level Read % 566 non-null object \n 44 Gr6-8 Keep Pace Math% 566 non-null object \n 45 Gr6-8 Keep Pace Read % 566 non-null object \n 46 Gr-8 Explore Math % 566 non-null object \n 47 Gr-8 Explore Read % 566 non-null object \n 48 ISAT Exceeding Math % 476 non-null float64\n 49 ISAT Exceeding Reading % 476 non-null float64\n 50 ISAT Value Add Math 468 non-null float64\n 51 ISAT Value Add Read 468 non-null float64\n 52 ISAT Value Add Color Math 566 non-null object \n 53 ISAT Value Add Color Read 566 non-null object \n 54 Students Taking Algebra % 566 non-null object \n 55 Students Passing Algebra % 566 non-null object \n 56 9th Grade EXPLORE (2009) 566 non-null object \n 57 9th Grade EXPLORE (2010) 566 non-null object \n 58 10th Grade PLAN (2009) 566 non-null object \n 59 10th Grade PLAN (2010) 566 non-null object \n 60 Net Change EXPLORE and PLAN 566 non-null object \n 61 11th Grade Average ACT (2011) 566 non-null object \n 62 Net Change PLAN and ACT 566 non-null object \n 63 College Eligibility % 566 non-null object \n 64 Graduation Rate % 566 non-null object \n 65 College Enrollment Rate % 566 non-null object \n 66 COLLEGE_ENROLLMENT 566 non-null int64 \n 67 General Services Route 566 non-null int64 \n 68 Freshman on Track Rate % 566 non-null object \n 69 X_COORDINATE 566 non-null float64\n 70 Y_COORDINATE 566 non-null float64\n 71 Latitude 566 non-null float64\n 72 Longitude 566 non-null float64\n 73 COMMUNITY_AREA_NUMBER 566 non-null int64 \n 74 COMMUNITY_AREA_NAME 566 non-null object \n 75 Ward 566 non-null int64 \n 76 Police District 566 non-null int64 \n 77 Location 566 non-null object \ndtypes: float64(12), int64(7), object(59)\nmemory usage: 345.0+ KB\n"
],
[
"print(\"Dimension: \" +str(df_school.shape))\ndf_school.isnull().sum()",
"Dimension: (566, 78)\n"
]
],
[
[
"The df_school dataframe contains data about public school assesment in the Chicago area. This dataframe has float, int \nand pandas object data types. df_school dataframe consists of 78 variables (columns) and 566 observations (rows). This \ndataframe has missing values in columns: \"LINK\", \"SAFETY_SCORE\", \"ENVIRONMENT SCORE\", \"INSTRUCTION SCORE\", \"AVERAGE_STUDENT_ATTENDANCE\",\"ISAT Exceeding Math %\", 'ISAT Exceeding Reading %', \"ISAT Value Add Math\", and \"ISAT Value Add Read\". This dataset has no variables with respective to time as each observation information is dependend on the School ID.",
"_____no_output_____"
]
],
[
[
"l = ['School ID','COMMUNITY_AREA_NUMBER','NAME_OF_SCHOOL','SAFETY_SCORE','Environment Score','Instruction Score','Parent Engagement Score',\n 'Average Teacher Attendance','COMMUNITY_AREA_NAME','College Enrollment Rate %']\ndf_school_selected = df_school[l]",
"_____no_output_____"
],
[
"df_school_selected.head(10)",
"_____no_output_____"
],
[
"df_school_selected.isnull().sum()",
"_____no_output_____"
]
],
[
[
"List-wise deletion was used for schools that doesn't have 'Parent Engagement Score'. However, for \n'College Enrollment Rate', we will not be deleting the missing values \nsince only high schools have them and pandas operations will be intrinsically skip those missing values. Instead it will be replace with 'nan. 'SAFETY_SCORE','Environment Score','Instruction Score' missing values will be replace with the mean value of the population for each of their respective column because this is not dependent on the level of education (elementary,middle,high).",
"_____no_output_____"
]
],
[
[
"#drops rows that conatins missing value for parent Engagement Score (list-wise deletion)\ndf_school_selected = df_school_selected[~df_school_selected['Parent Engagement Score'].isin(['NDA'])]\ndf_school_selected['Parent Engagement Score'].isnull().sum()",
"_____no_output_____"
],
[
"#replacing NDA with nan\ndf_school_selected = df_school_selected.replace('NDA', np.nan)",
"_____no_output_____"
],
[
"#replacing nan with mean for 'SAFETY_SCORE','Environment Score','Instruction Score'\nmean_list = ['SAFETY_SCORE','Environment Score','Instruction Score']\n\nfor i in mean_list:\n mean = int(df_school_selected[i].mean())\n df_school_selected[i] = df_school_selected[i].replace(np.nan, mean)",
"_____no_output_____"
],
[
"df_school_selected.isnull().sum()",
"_____no_output_____"
]
],
[
[
"# Part 2",
"_____no_output_____"
]
],
[
[
"import sqlite3",
"_____no_output_____"
],
[
"assign2db = \"assign2.db\"\nconn = sqlite3.connect(assign2db)\nprint(conn)",
"<sqlite3.Connection object at 0x7fea710efab0>\n"
],
[
"conn = sqlite3.connect(assign2db)\ncursor = conn.cursor()\n#Drop public school table if it exits\ncursor.execute(\"DROP TABLE IF EXISTS `PUBLIC_SCHOOL`\")\nprint(\"Table dropped\")\nconn.close()",
"Table dropped\n"
]
],
[
[
"Community area number or community area name can serve as foreign key but it's prefered that a non-string data type is foreign key",
"_____no_output_____"
]
],
[
[
"#create public school table with appropriate fields\nconn = sqlite3.connect(assign2db)\ncursor = conn.cursor()\ntry:\n cursor.execute(\"\"\"\n CREATE TABLE PUBLIC_SCHOOL (\n SCHOOL_ID INTEGER PRIMARY KEY,\n COMMUNITY_AREA_NUMBER INTEGER,\n NAME_OF_SCHOOL TEXT NOT NULL,\n SAFETY_SCORE FLOAT DEFAULT 0,\n Environment_Score FLOAT DEFAULT 0,\n Instruction_Score FLOAT DEFAULT 0,\n Parent_Engagement_Score INT DEFAULT 0,\n Average_Teacher_Attendance FLOAT DEFAULT 0,\n COMMUNITY_AREA_NAME TEXT NOT NULL,\n College_Enrollment_Rate_perc FLOAT DEFAULT 0\n );\n \"\"\")\n print(\"Success Creation\")\nexcept Exception as e:\n print(str(e))\n print(\"Table creation failed\")\nfinally:\n conn.close()",
"Success Creation\n"
],
[
"df_school_selected.isnull().sum()",
"_____no_output_____"
],
[
"#convert to list so it can be inserted into table\nschool_list = df_school_selected.values.tolist()\nschool_list",
"_____no_output_____"
],
[
"#inserting the fields into the table\nconn = sqlite3.connect(assign2db)\ncursor = conn.cursor()\ntry:\n cursor.executemany(\"\"\"\n INSERT INTO PUBLIC_SCHOOL (SCHOOL_ID, COMMUNITY_AREA_NUMBER, NAME_OF_SCHOOL, SAFETY_SCORE, \n Environment_Score, Instruction_Score, Parent_Engagement_Score, Average_Teacher_Attendance, COMMUNITY_AREA_NAME,\n College_Enrollment_Rate_perc)\n VALUES (?,?,?,?,?,?,?,?,?,?)\n \"\"\", school_list)\n conn.commit()\n print(\"Insert Successfully\")\nexcept Exception as e:\n print(str(e))\n print(\"Insert failed\")\nfinally:\n conn.close()",
"Insert Successfully\n"
],
[
"#test to see if value is inserted properly\nconn = sqlite3.connect(assign2db)\ncursor = conn.cursor()\ncursor.execute(\"SELECT * FROM PUBLIC_SCHOOL\")\nresult = cursor.fetchall()\nconn.close\nfor i in result:\n print(i)",
"(609679, 19, 'Charles Allen Prosser Career Academy High School', 59.0, 53.0, 51.0, 46, '94.40%', 'BELMONT CRAGIN', 58.0)\n(609682, 61, 'Ellen H Richards Career Academy High School', 30.0, 32.0, 19.0, 43, '95.70%', 'NEW CITY', 43.1)\n(609693, 23, 'George Westinghouse High School', 74.0, 77.0, 80.0, 54, '96.80%', 'HUMBOLDT PARK', None)\n(609694, 62, 'John Hancock College Preparatory High School', 51.0, 44.0, 41.0, 50, '95.90%', 'WEST ELSDON', 45.9)\n(609695, 4, 'Roald Amundsen High School', 51.0, 43.0, 42.0, 43, '95.90%', 'LINCOLN SQUARE', 49.1)\n(609698, 70, 'William J Bogan High School', 20.0, 18.0, 29.0, 41, '94.30%', 'ASHBURN', 50.0)\n(609702, 28, 'Richard T Crane Technical Preparatory High School', 43.0, 50.0, 48.0, 47, '93.90%', 'NEAR WEST SIDE', 32.6)\n(609711, 67, 'William Rainey Harper High School', 22.0, 39.0, 42.0, 49, '94.70%', 'WEST ENGLEWOOD', 40.3)\n(609719, 6, 'Lake View High School', 64.0, 52.0, 43.0, 49, '96.20%', 'LAKE VIEW', 62.8)\n(609726, 49, 'Gwendolyn Brooks College Preparatory Academy High School', 64.0, 44.0, 49.0, 47, '95.20%', 'ROSELAND', 87.9)\n(609733, 1, 'Roger C Sullivan High School', 30.0, 34.0, 34.0, 44, '95.80%', 'ROGERS PARK', 52.1)\n(609735, 61, 'Edward Tilden Career Community Academy High School', 34.0, 45.0, 37.0, 51, '95.20%', 'NEW CITY', 37.0)\n(609744, 13, 'Northside Learning Center High School', 99.0, 81.0, 34.0, 62, '95.60%', 'NORTH PARK', None)\n(609745, 67, 'Southside Occupational Academy High School', 50.0, 48.0, 49.0, 63, '95.20%', 'WEST ENGLEWOOD', None)\n(609749, 13, 'Northside College Preparatory High School', 99.0, 99.0, 88.0, 57, '96.80%', 'NORTH PARK', 90.7)\n(609751, 39, 'Dr Martin Luther King Jr College Prep High School', 50.0, 48.0, 49.0, 56, '96.30%', 'KENWOOD', 85.1)\n(609753, 74, 'Chicago High School for Agricultural Sciences', 87.0, 49.0, 47.0, 52, '95.20%', 'MOUNT GREENWOOD', 79.6)\n(609755, 28, 'Whitney M Young Magnet High School', 95.0, 69.0, 67.0, 53, '94.90%', 'NEAR WEST SIDE', 88.3)\n(609766, 15, 'Jacqueline B Vaughn Occupational High School', 57.0, 33.0, 20.0, 63, '95.50%', 'PORTAGE PARK', None)\n(609769, 33, 'Ray Graham Training Center High School', 90.0, 52.0, 39.0, 53, '94.30%', 'NEAR SOUTH SIDE', None)\n(609772, 52, 'Jane Addams Elementary School', 56.0, 44.0, 46.0, 53, '96.80%', 'EAST SIDE', None)\n(609773, 6, 'Louis A Agassiz Elementary School', 53.0, 23.0, 21.0, 56, '96.80%', 'LAKE VIEW', None)\n(609774, 7, 'Louisa May Alcott Elementary School', 50.0, 48.0, 49.0, 60, '97.10%', 'LINCOLN PARK', None)\n(609775, 67, 'John P Altgeld Elementary School', 48.0, 64.0, 58.0, 46, '95.50%', 'WEST ENGLEWOOD', None)\n(609777, 60, 'Phillip D Armour Elementary School', 36.0, 34.0, 14.0, 53, '95.80%', 'BRIDGEPORT', None)\n(609780, 22, 'Ames Middle School', 38.0, 27.0, 35.0, 50, '96.00%', 'LOGAN SQUARE', None)\n(609782, 5, 'John J Audubon Elementary School', 95.0, 75.0, 60.0, 61, '96.70%', 'NORTH CENTER', None)\n(609786, 45, 'Avalon Park Elementary School', 32.0, 38.0, 32.0, 46, '96.40%', 'AVALON PARK', None)\n(609788, 72, 'Alice L Barnard Computer Math & Science Center Elementary School', 46.0, 39.0, 58.0, 69, '95.00%', 'BEVERLY', None)\n(609789, 20, 'John Barry Elementary School', 35.0, 70.0, 81.0, 51, '96.10%', 'HERMOSA', None)\n(609790, 71, 'Clara Barton Elementary School', 25.0, 28.0, 34.0, 51, '96.40%', 'AUBURN GRESHAM', None)\n(609791, 68, 'Perkins Bass Elementary School', 28.0, 47.0, 57.0, 47, '96.40%', 'ENGLEWOOD', None)\n(609792, 16, 'Newton Bateman Elementary School', 55.0, 50.0, 47.0, 52, '96.70%', 'IRVING PARK', None)\n(609794, 14, 'Thomas A Edison Regional Gifted Center Elementary School', 91.0, 64.0, 56.0, 55, '96.30%', 'ALBANY PARK', None)\n(609795, 25, 'George Rogers Clark Elementary School', 39.0, 32.0, 36.0, 47, '96.40%', 'AUSTIN', None)\n(609798, 16, 'Hiram H Belding Elementary School', 64.0, 67.0, 69.0, 53, '96.20%', 'IRVING PARK', None)\n(609800, 49, 'Frank I Bennett Elementary School', 46.0, 55.0, 66.0, 47, '95.90%', 'ROSELAND', None)\n(609803, 6, 'James G Blaine Elementary School', 99.0, 76.0, 74.0, 40, '96.00%', 'LAKE VIEW', None)\n(609804, 2, 'Daniel Boone Elementary School', 52.0, 47.0, 48.0, 50, '95.40%', 'WEST RIDGE', None)\n(609805, 71, 'Scott Joplin Elementary School', 35.0, 44.0, 53.0, 52, '94.60%', 'AUBURN GRESHAM', None)\n(609806, 43, 'Myra Bradwell Communications Arts & Sciences Elementary School', 31.0, 41.0, 50.0, 49, '96.40%', 'SOUTH SHORE', None)\n(609807, 70, 'Lionel Hampton Fine & Performing Arts Elementary School', 42.0, 34.0, 28.0, 46, '96.60%', 'ASHBURN', None)\n(609808, 49, 'Alex Haley Elementary Academy', 28.0, 58.0, 60.0, 47, '94.70%', 'ROSELAND', None)\n(609810, 17, 'Norman A Bridge Elementary School', 67.0, 49.0, 35.0, 51, '96.60%', 'DUNNING', None)\n(609811, 51, 'Orville T Bright Elementary School', 18.0, 28.0, 27.0, 41, '95.30%', 'SOUTH DEERING', None)\n(609812, 28, 'William H Brown Elementary School', 36.0, 58.0, 78.0, 46, '94.70%', 'NEAR WEST SIDE', None)\n(609813, 69, 'Charles S Brownell Elementary School', 20.0, 79.0, 82.0, 48, '95.50%', 'GREATER GRAND CROSSING', None)\n(609815, 43, 'Edward A Bouchet Math & Science Academy Elementary School', 33.0, 55.0, 50.0, 50, '95.90%', 'SOUTH SHORE', None)\n(609817, 4, 'Lyman A Budlong Elementary School', 59.0, 33.0, 38.0, 50, '95.70%', 'LINCOLN SQUARE', None)\n(609818, 19, 'Luther Burbank Elementary School', 37.0, 42.0, 34.0, 47, '95.50%', 'BELMONT CRAGIN', None)\n(609819, 40, 'Edmond Burke Elementary School', 1.0, 13.0, 22.0, 48, '95.50%', 'WASHINGTON PARK', None)\n(609820, 6, 'Augustus H Burley Elementary School', 99.0, 78.0, 65.0, 59, '95.00%', 'LAKE VIEW', None)\n(609821, 51, 'Burnham Elementary Inclusive Academy', 61.0, 85.0, 99.0, 54, '96.90%', 'SOUTH DEERING', None)\n(609826, 30, 'Rosario Castellanos Elementary School', 36.0, 34.0, 20.0, 46, '96.90%', 'SOUTH LAWNDALE', None)\n(609827, 44, 'Burnside Elementary Scholastic Academy', 51.0, 58.0, 70.0, 48, '94.90%', 'CHATHAM', None)\n(609828, 24, 'Jonathan Burr Elementary School', 71.0, 68.0, 51.0, 56, '96.90%', 'WEST TOWN', None)\n(609829, 58, 'John C Burroughs Elementary School', 50.0, 39.0, 38.0, 50, '96.10%', 'BRIGHTON PARK', None)\n(609830, 25, 'Milton Brunson Math & Science Specialty Elementary School ', 30.0, 30.0, 45.0, 47, '95.00%', 'AUSTIN', None)\n(609832, 56, 'Michael M Byrne Elementary School', 63.0, 35.0, 38.0, 49, '95.80%', 'GARFIELD RIDGE', None)\n(609833, 45, 'Charles P Caldwell Academy of Math & Science Elementary School', 25.0, 40.0, 44.0, 44, '94.70%', 'AVALON PARK', None)\n(609834, 30, 'Little Village Elementary School', 45.0, 52.0, 66.0, 47, '96.70%', 'SOUTH LAWNDALE', None)\n(609835, 23, 'Daniel R Cameron Elementary School', 42.0, 49.0, 50.0, 46, '96.20%', 'HUMBOLDT PARK', None)\n(609836, 17, 'Arthur E Canty Elementary School', 87.0, 70.0, 64.0, 66, '96.10%', 'DUNNING', None)\n(609837, 42, 'Andrew Carnegie Elementary School', 48.0, 37.0, 63.0, 49, '94.40%', 'WOODLAWN', None)\n(609839, 70, 'Carroll-Rosenwald Specialty Elementary School', 35.0, 12.0, 12.0, 48, '95.60%', 'ASHBURN', None)\n(609842, 63, 'Rachel Carson Elementary School', 38.0, 45.0, 56.0, 50, '97.70%', 'GAGE PARK', None)\n(609844, 40, 'William W Carter Elementary School', 13.0, 33.0, 35.0, 46, '95.90%', 'WASHINGTON PARK', None)\n(609845, 54, 'George Washington Carver Primary School', 25.0, 33.0, 30.0, 52, '94.70%', 'RIVERDALE', None)\n(609848, 54, 'Ira F Aldridge Elementary School', 39.0, 36.0, 39.0, 49, '96.30%', 'RIVERDALE', None)\n(609849, 74, 'George F Cassell Elementary School', 76.0, 57.0, 36.0, 52, '95.10%', 'MOUNT GREENWOOD', None)\n(609850, 6, 'Horace Greeley Elementary School', 65.0, 48.0, 38.0, 52, '96.00%', 'LAKE VIEW', None)\n(609852, 4, 'Eliza Chappell Elementary School', 70.0, 53.0, 51.0, 50, '96.70%', 'LINCOLN SQUARE', None)\n(609854, 24, 'Frederic Chopin Elementary School', 86.0, 99.0, 99.0, 49, '96.00%', 'WEST TOWN', None)\n(609855, 63, 'Walter S Christopher Elementary School', 50.0, 48.0, 49.0, 52, '94.10%', 'GAGE PARK', None)\n(609857, 16, 'Grover Cleveland Elementary School', 64.0, 58.0, 47.0, 49, '96.30%', 'IRVING PARK', None)\n(609861, 75, 'Henry R Clissold Elementary School', 45.0, 39.0, 61.0, 52, '96.90%', 'MORGAN PARK', None)\n(609862, 46, 'Edward Coles Elementary Language Academy', 22.0, 40.0, 54.0, 50, '95.00%', 'SOUTH CHICAGO', None)\n(609863, 24, 'Christopher Columbus Elementary School', 66.0, 57.0, 55.0, 54, '96.90%', 'WEST TOWN', None)\n(609864, 71, 'John W Cook Elementary School', 50.0, 48.0, 49.0, 45, '94.00%', 'AUBURN GRESHAM', None)\n(609865, 1, 'Jordan Elementary Community School', 61.0, 57.0, 49.0, 52, '95.10%', 'ROGERS PARK', None)\n(609866, 5, 'John C Coonley Elementary School', 87.0, 65.0, 39.0, 58, '96.50%', 'NORTH CENTER', None)\n(609869, 67, 'Anna R. Langford Community Academy', 31.0, 54.0, 32.0, 53, '95.40%', 'WEST ENGLEWOOD', None)\n(609871, 75, 'Barbara Vick Early Childhood & Family Center', 50.0, 48.0, 49.0, 66, '95.50%', 'MORGAN PARK', None)\n(609872, 31, 'Manuel Perez Elementary School', 45.0, 42.0, 45.0, 52, '96.60%', 'LOWER WEST SIDE', None)\n(609873, 29, 'Crown Community Academy of Fine Arts Center Elementary School', 27.0, 51.0, 52.0, 49, '96.70%', 'NORTH LAWNDALE', None)\n(609875, 22, 'Charles R Darwin Elementary School', 58.0, 44.0, 34.0, 49, '96.20%', 'LOGAN SQUARE', None)\n(609876, 58, 'Nathan S Davis Elementary School', 43.0, 41.0, 46.0, 48, '95.90%', 'BRIGHTON PARK', None)\n(609879, 70, 'Charles Gates Dawes Elementary School', 44.0, 27.0, 30.0, 50, '95.60%', 'ASHBURN', None)\n(609880, 2, 'Stephen Decatur Classical Elementary School', 99.0, 99.0, 99.0, 52, '95.40%', 'WEST RIDGE', None)\n(609881, 26, 'Edward C Delano Elementary School', 38.0, 66.0, 52.0, 55, '94.70%', 'WEST GARFIELD PARK', None)\n(609884, 17, 'William E Dever Elementary School', 67.0, 60.0, 78.0, 50, '95.90%', 'DUNNING', None)\n(609885, 61, 'Dewey Elementary Academy of Fine Arts', 41.0, 42.0, 35.0, 52, '95.50%', 'NEW CITY', None)\n(609888, 27, 'Mary Mapes Dodge Elementary Renaissance Academy', 57.0, 68.0, 73.0, 56, '97.10%', 'EAST GARFIELD PARK', None)\n(609891, 35, 'James R Doolittle Jr Elementary School', 30.0, 25.0, 46.0, 44, '94.70%', 'DOUGLAS', None)\n(609893, 64, 'John C Dore Elementary School', 74.0, 69.0, 79.0, 58, '96.70%', 'CLEARING', None)\n(609894, 35, 'John B Drake Elementary School', 59.0, 57.0, 46.0, 50, '95.90%', 'DOUGLAS', None)\n(609896, 22, 'Thomas Drummond Elementary School', 72.0, 59.0, 46.0, 58, '96.70%', 'LOGAN SQUARE', None)\n(609897, 67, 'Charles W Earle Elementary School', 24.0, 62.0, 71.0, 53, '94.60%', 'WEST ENGLEWOOD', None)\n(609899, 9, 'Christian Ebinger Elementary School', 78.0, 66.0, 50.0, 52, '95.20%', 'EDISON PARK', None)\n(609900, 49, 'George W Curtis Elementary School', 23.0, 43.0, 58.0, 68, '98.10%', 'ROSELAND', None)\n(609901, 12, 'Edgebrook Elementary School', 99.0, 51.0, 53.0, 56, '96.60%', 'FOREST GLEN', None)\n(609902, 53, 'Ralph H Metcalfe Elementary Community Academy', 50.0, 48.0, 49.0, 48, '95.50%', 'WEST PULLMAN', None)\n(609903, 57, 'Richard Edwards Elementary School', 48.0, 38.0, 41.0, 49, '96.80%', 'ARCHER HEIGHTS', None)\n(609904, 25, 'Edward K Ellington Elementary School', 30.0, 37.0, 55.0, 49, '97.30%', 'AUSTIN', None)\n(609906, 25, 'Robert Emmet Elementary School', 37.0, 64.0, 71.0, 51, '94.80%', 'AUSTIN', None)\n(609907, 27, 'Leif Ericson Elementary Scholastic Academy', 33.0, 49.0, 52.0, 45, '95.10%', 'EAST GARFIELD PARK', None)\n(609908, 75, 'Esmond Elementary School', 36.0, 49.0, 43.0, 49, '95.70%', 'MORGAN PARK', None)\n(609910, 19, 'Laughlin Falconer Elementary School', 54.0, 48.0, 27.0, 49, '97.00%', 'BELMONT CRAGIN', None)\n(609912, 11, 'James B Farnsworth Elementary School', 55.0, 40.0, 38.0, 55, '95.60%', 'JEFFERSON PARK', None)\n(609913, 67, 'Elaine O Goodlow Elementary Magnet School', 44.0, 52.0, 56.0, 46, '96.00%', 'WEST ENGLEWOOD', None)\n(609917, 73, 'Fernwood Elementary School', 29.0, 17.0, 43.0, 48, '96.10%', 'WASHINGTON HEIGHTS', None)\n(609918, 1, 'Eugene Field Elementary School', 42.0, 47.0, 37.0, 44, '96.70%', 'ROGERS PARK', None)\n(609919, 42, 'John Fiske Elementary School', 13.0, 37.0, 55.0, 47, '91.70%', 'WOODLAWN', None)\n(609920, 30, 'Gerald Delgado Kanoon Elementary Magnet School', 44.0, 48.0, 48.0, 47, '95.60%', 'SOUTH LAWNDALE', None)\n(609921, 30, 'Telpochcalli Elementary School', 42.0, 19.0, 22.0, 55, '94.70%', 'SOUTH LAWNDALE', None)\n(609922, 19, 'Belmont-Cragin Elementary School', 75.0, 67.0, 66.0, 55, '96.80%', 'BELMONT CRAGIN', None)\n(609924, 73, 'Fort Dearborn Elementary School', 34.0, 61.0, 71.0, 47, '95.90%', 'WASHINGTON HEIGHTS', None)\n(609925, 27, 'Joseph Kellman Corporate Community Elementary School', 58.0, 42.0, 45.0, 59, '95.80%', 'EAST GARFIELD PARK', None)\n(609926, 8, 'Franklin Elementary Fine Arts Center', 87.0, 60.0, 55.0, 58, '95.30%', 'NEAR NORTH SIDE', None)\n(609927, 71, 'Foster Park Elementary School', 11.0, 20.0, 22.0, 46, '94.70%', 'AUBURN GRESHAM', None)\n(609928, 38, 'Melville W Fuller Elementary School', 22.0, 32.0, 33.0, 51, '93.90%', 'GRAND BOULEVARD', None)\n(609929, 61, 'Robert Fulton Elementary School', 46.0, 63.0, 63.0, 45, '96.80%', 'NEW CITY', None)\n(609930, 22, 'Frederick Funston Elementary School', 16.0, 14.0, 16.0, 40, '95.40%', 'LOGAN SQUARE', None)\n(609932, 53, 'Jesse Owens Elementary Community Academy', 50.0, 48.0, 49.0, 51, '93.50%', 'WEST PULLMAN', None)\n(609933, 1, 'Stephen F Gale Elementary Community Academy', 39.0, 35.0, 27.0, 45, '95.20%', 'ROGERS PARK', None)\n(609935, 52, 'Matthew Gallistel Elementary Language Academy', 49.0, 37.0, 32.0, 50, '96.30%', 'EAST SIDE', None)\n(609937, 10, 'John W Garvy Elementary School', 60.0, 50.0, 51.0, 56, '96.50%', 'NORWOOD PARK', None)\n(609938, 30, 'Joseph E Gary Elementary School', 32.0, 29.0, 28.0, 45, '96.20%', 'SOUTH LAWNDALE', None)\n(609941, 67, 'Asa Philip Randolph Elementary School', 15.0, 41.0, 48.0, 45, '95.30%', 'WEST ENGLEWOOD', None)\n(609942, 22, 'Johann W von Goethe Elementary School', 75.0, 68.0, 52.0, 57, '97.00%', 'LOGAN SQUARE', None)\n(609943, 53, 'Samuel Gompers Fine Arts Options Elementary School', 34.0, 48.0, 52.0, 49, '96.00%', 'WEST PULLMAN', None)\n(609947, 61, 'Alexander Graham Elementary School', 49.0, 31.0, 33.0, 48, '95.00%', 'NEW CITY', None)\n(609949, 15, 'William P Gray Elementary School', 57.0, 44.0, 35.0, 47, '95.90%', 'PORTAGE PARK', None)\n(609950, 30, 'Josefa Ortiz De Dominguez Elementary School', 50.0, 48.0, 49.0, 51, '97.00%', 'SOUTH LAWNDALE', None)\n(609951, 39, 'Ariel Elementary Community Academy', 50.0, 48.0, 49.0, 51, '96.00%', 'KENWOOD', None)\n(609952, 59, 'Nathanael Greene Elementary School', 50.0, 48.0, 49.0, 48, '96.20%', 'MCKINLEY PARK', None)\n(609954, 27, 'John Milton Gregory Elementary School', 44.0, 39.0, 43.0, 49, '94.60%', 'EAST GARFIELD PARK', None)\n(609955, 71, 'Walter Q Gresham Elementary School', 25.0, 7.0, 11.0, 48, '95.00%', 'AUBURN GRESHAM', None)\n(609956, 64, 'Robert L Grimes Elementary School', 79.0, 56.0, 59.0, 53, '95.60%', 'CLEARING', None)\n(609958, 58, 'Frank W Gunsaulus Elementary Scholastic Academy', 46.0, 30.0, 45.0, 49, '96.00%', 'BRIGHTON PARK', None)\n(609959, 34, 'John Charles Haines Elementary School', 32.0, 37.0, 37.0, 43, '96.20%', 'ARMOUR SQUARE', None)\n(609960, 64, 'Nathan Hale Elementary School', 55.0, 51.0, 56.0, 55, '96.20%', 'CLEARING', None)\n(609961, 46, 'Ninos Heroes Elementary Academic Center', 36.0, 61.0, 55.0, 48, '95.00%', 'SOUTH CHICAGO', None)\n(609963, 6, 'Alexander Hamilton Elementary School', 73.0, 60.0, 59.0, 57, '97.40%', 'LAKE VIEW', None)\n(609964, 61, 'John H Hamline Elementary School', 33.0, 29.0, 43.0, 44, '95.70%', 'NEW CITY', None)\n(609966, 30, 'Charles G Hammond Elementary School', 40.0, 41.0, 43.0, 43, '95.20%', 'SOUTH LAWNDALE', None)\n(609967, 31, 'William F Finkl Elementary School', 56.0, 50.0, 52.0, 49, '96.90%', 'LOWER WEST SIDE', None)\n(609968, 20, 'Sharon Christa McAuliffe Elementary School', 50.0, 48.0, 49.0, 52, '95.70%', 'HERMOSA', None)\n(609969, 41, 'Bret Harte Elementary School', 63.0, 77.0, 76.0, 47, '95.40%', 'HYDE PARK', None)\n(609971, 69, 'John Harvard Elementary School of Excellence', 51.0, 61.0, 61.0, 47, '96.00%', 'GREATER GRAND CROSSING', None)\n(609972, 14, 'Helge A Haugan Elementary School', 43.0, 57.0, 49.0, 46, '94.60%', 'ALBANY PARK', None)\n(609973, 30, 'Emiliano Zapata Elementary Academy', 60.0, 43.0, 42.0, 46, '96.80%', 'SOUTH LAWNDALE', None)\n(609974, 6, 'Hawthorne Elementary Scholastic Academy', 92.0, 61.0, 56.0, 58, '96.50%', 'LAKE VIEW', None)\n(609975, 25, 'John Hay Elementary Community Academy', 44.0, 49.0, 61.0, 52, '97.10%', 'AUSTIN', None)\n(609977, 42, 'Woodlawn Community Elementary School', 70.0, 80.0, 66.0, 59, '94.30%', 'WOODLAWN', None)\n(609978, 50, 'Wendell Smith Elementary School', 22.0, 1.0, 1.0, 42, '94.10%', 'PULLMAN', None)\n(609981, 56, 'Phobe Apperson Hearst Elementary School', 33.0, 31.0, 27.0, 45, '95.80%', 'GARFIELD RIDGE', None)\n(609983, 61, 'James Hedges Elementary School', 22.0, 22.0, 24.0, 41, '96.50%', 'NEW CITY', None)\n(609985, 26, 'Helen M Hefferan Elementary School', 29.0, 26.0, 63.0, 51, '95.90%', 'WEST GARFIELD PARK', None)\n(609986, 67, 'Charles R Henderson Elementary School', 16.0, 32.0, 38.0, 46, '93.60%', 'WEST ENGLEWOOD', None)\n(609987, 37, 'Thomas A Hendricks Elementary Community Academy', 33.0, 67.0, 78.0, 51, '95.60%', 'FULLER PARK', None)\n(609988, 16, 'Patrick Henry Elementary School', 45.0, 46.0, 43.0, 52, '95.90%', 'IRVING PARK', None)\n(609989, 28, 'Victor Herbert Elementary School', 50.0, 64.0, 72.0, 50, '96.20%', 'NEAR WEST SIDE', None)\n(609990, 33, 'South Loop Elementary School', 84.0, 34.0, 36.0, 54, '97.20%', 'NEAR SOUTH SIDE', None)\n(609993, 61, 'Agustin Lara Elementary Academy', 56.0, 45.0, 37.0, 53, '95.80%', 'NEW CITY', None)\n(609994, 14, 'William G Hibbard Elementary School', 50.0, 48.0, 49.0, 49, '96.90%', 'ALBANY PARK', None)\n(609995, 10, 'Rufus M Hitch Elementary School', 64.0, 55.0, 58.0, 55, '96.60%', 'NORWOOD PARK', None)\n(609996, 60, 'Charles N Holden Elementary School', 53.0, 50.0, 35.0, 54, '96.00%', 'BRIDGEPORT', None)\n(609997, 68, 'Oliver Wendell Holmes Elementary School', 21.0, 23.0, 41.0, 48, '92.40%', 'ENGLEWOOD', None)\n(610000, 25, 'Julia Ward Howe Elementary School of Excellence', 67.0, 90.0, 69.0, 57, '97.10%', 'AUSTIN', None)\n(610003, 71, 'Paul Cuffe Math-Science Technology Academy Elementary School', 51.0, 45.0, 54.0, 48, '95.40%', 'AUBURN GRESHAM', None)\n(610006, 65, 'Edward N Hurley Elementary School', 77.0, 50.0, 49.0, 49, '94.90%', 'WEST LAWN', None)\n(610009, 28, 'Galileo Math & Science Scholastic Academy Elementary School', 65.0, 40.0, 42.0, 49, '97.00%', 'NEAR WEST SIDE', None)\n(610010, 5, 'Friedrich Ludwig Jahn Elementary School', 72.0, 60.0, 59.0, 50, '94.70%', 'NORTH CENTER', None)\n(610011, 2, 'Minnie Mars Jamieson Elementary School', 64.0, 62.0, 66.0, 52, '96.20%', 'WEST RIDGE', None)\n(610012, 8, 'Edward Jenner Elementary Academy of the Arts', 50.0, 62.0, 41.0, 49, '94.80%', 'NEAR NORTH SIDE', None)\n(610015, 31, 'Joseph Jungman Elementary School', 36.0, 56.0, 53.0, 48, '96.30%', 'LOWER WEST SIDE', None)\n(610016, 72, 'Kate S Kellogg Elementary School', 76.0, 57.0, 63.0, 53, '95.40%', 'BEVERLY', None)\n(610017, 30, 'Maria Saucedo Elementary Scholastic Academy', 60.0, 55.0, 64.0, 43, '96.90%', 'SOUTH LAWNDALE', None)\n(610019, 68, 'Joshua D Kershaw Elementary School', 62.0, 69.0, 56.0, 58, '96.00%', 'ENGLEWOOD', None)\n(610020, 25, 'Francis Scott Key Elementary School', 37.0, 49.0, 64.0, 44, '94.00%', 'AUSTIN', None)\n(610021, 23, 'Pablo Casals Elementary School', 50.0, 48.0, 49.0, 42, '95.90%', 'HUMBOLDT PARK', None)\n(610022, 1, 'Joyce Kilmer Elementary School', 50.0, 48.0, 49.0, 49, '96.90%', 'ROGERS PARK', None)\n(610024, 30, 'Lazaro Cardenas Elementary School', 50.0, 48.0, 49.0, 52, '98.50%', 'SOUTH LAWNDALE', None)\n(610026, 56, 'John H Kinzie Elementary School', 64.0, 49.0, 54.0, 50, '95.30%', 'GARFIELD RIDGE', None)\n(610027, 73, 'Rudyard Kipling Elementary School', 52.0, 42.0, 42.0, 53, '96.00%', 'WASHINGTON HEIGHTS', None)\n(610028, 49, 'Alfred David Kohn Elementary School', 19.0, 22.0, 13.0, 47, '94.20%', 'ROSELAND', None)\n(610029, 24, 'Rodolfo Lozano Bilingual & International Center Elementary School', 55.0, 39.0, 37.0, 45, '96.20%', 'WEST TOWN', None)\n(610030, 41, 'Charles Kozminski Elementary Community Academy', 36.0, 35.0, 40.0, 39, '94.40%', 'HYDE PARK', None)\n(610031, 24, 'Jean D Lafayette Elementary School', 44.0, 43.0, 38.0, 52, '96.30%', 'WEST TOWN', None)\n(610032, 73, 'Wendell E Green Elementary School', 32.0, 47.0, 53.0, 53, '95.70%', 'WASHINGTON HEIGHTS', None)\n(610033, 7, 'LaSalle Elementary Language Academy', 99.0, 62.0, 52.0, 53, '97.60%', 'LINCOLN PARK', None)\n(610036, 25, 'Leslie Lewis Elementary School', 21.0, 36.0, 64.0, 42, '95.30%', 'AUSTIN', None)\n(610037, 61, 'Arthur A Libby Elementary School', 36.0, 60.0, 60.0, 48, '95.10%', 'NEW CITY', None)\n(610038, 7, 'Abraham Lincoln Elementary School', 99.0, 74.0, 66.0, 56, '96.40%', 'LINCOLN PARK', None)\n(610039, 21, 'Carl von Linne Elementary School', 68.0, 66.0, 75.0, 50, '96.30%', 'AVONDALE', None)\n(610040, 19, 'Henry D Lloyd Elementary School', 50.0, 48.0, 49.0, 48, '97.20%', 'BELMONT CRAGIN', None)\n(610041, 18, 'Josephine C Locke Elementary School', 57.0, 49.0, 56.0, 47, '95.10%', 'MONTCLARE', None)\n(610043, 25, 'Joseph Lovett Elementary School', 43.0, 32.0, 22.0, 49, '95.40%', 'AUSTIN', None)\n(610044, 23, 'James Russell Lowell Elementary School', 39.0, 39.0, 40.0, 45, '95.70%', 'HUMBOLDT PARK', None)\n(610046, 19, 'Mary Lyon Elementary School', 55.0, 42.0, 38.0, 45, '97.00%', 'BELMONT CRAGIN', None)\n(610048, 8, 'George Manierre Elementary School', 43.0, 49.0, 40.0, 42, '94.80%', 'NEAR NORTH SIDE', None)\n(610052, 46, 'Horace Mann Elementary School', 28.0, 31.0, 30.0, 44, '94.50%', 'SOUTH CHICAGO', None)\n(610053, 66, 'Marquette Elementary School', 15.0, 17.0, 17.0, 43, '95.30%', 'CHICAGO LAWN', None)\n(610054, 51, 'John L Marsh Elementary School', 72.0, 53.0, 39.0, 50, '95.50%', 'SOUTH DEERING', None)\n(610055, 27, 'Michael Faraday Elementary School', 35.0, 65.0, 66.0, 50, '94.60%', 'EAST GARFIELD PARK', None)\n(610056, 29, 'Roswell B Mason Elementary School', 37.0, 44.0, 58.0, 51, '95.00%', 'NORTH LAWNDALE', None)\n(610061, 35, 'William J & Charles H Mayo Elementary School', 52.0, 62.0, 65.0, 50, '95.20%', 'DOUGLAS', None)\n(610062, 60, 'George B McClellan Elementary School', 49.0, 33.0, 40.0, 53, '96.00%', 'BRIDGEPORT', None)\n(610063, 30, 'Cyrus H McCormick Elementary School', 50.0, 48.0, 49.0, 52, '95.90%', 'SOUTH LAWNDALE', None)\n(610065, 42, 'Emmett Louis Till Math and Science Academy', 31.0, 47.0, 40.0, 47, '93.30%', 'WOODLAWN', None)\n(610066, 44, 'James E McDade Elementary Classical School', 99.0, 57.0, 52.0, 61, '94.30%', 'CHATHAM', None)\n(610067, 66, 'Francis M McKay Elementary School', 23.0, 37.0, 54.0, 48, '95.10%', 'CHICAGO LAWN', None)\n(610068, 19, 'Hanson Park Elementary School', 35.0, 34.0, 33.0, 50, '94.90%', 'BELMONT CRAGIN', None)\n(610070, 4, 'James B McPherson Elementary School', 71.0, 48.0, 38.0, 52, '96.30%', 'LINCOLN SQUARE', None)\n(610072, 71, 'Garrett A Morgan Elementary School', 28.0, 58.0, 55.0, 47, '94.80%', 'AUBURN GRESHAM', None)\n(610073, 24, 'Ellen Mitchell Elementary School', 99.0, 95.0, 95.0, 56, '97.70%', 'WEST TOWN', None)\n(610074, 22, 'James Monroe Elementary School', 45.0, 26.0, 22.0, 47, '95.80%', 'LOGAN SQUARE', None)\n(610075, 28, 'Moses Montefiore Special Elementary School', 28.0, 42.0, 14.0, 46, '93.50%', 'NEAR WEST SIDE', None)\n(610077, 66, 'Donald Morrill Math & Science Elementary School', 28.0, 32.0, 39.0, 44, '94.80%', 'CHICAGO LAWN', None)\n(610078, 6, 'Inter-American Elementary Magnet School', 67.0, 50.0, 51.0, 51, '96.20%', 'LAKE VIEW', None)\n(610081, 60, 'Mark Sheridan Elementary Math & Science Academy', 74.0, 53.0, 63.0, 55, '97.00%', 'BRIDGEPORT', None)\n(610083, 10, 'Daniel C Beard Elementary School', 50.0, 48.0, 49.0, 57, '94.30%', 'NORWOOD PARK', None)\n(610084, 74, 'Annie Keller Elementary Gifted Magnet School', 99.0, 85.0, 82.0, 68, '96.50%', 'MOUNT GREENWOOD', None)\n(610086, 73, 'Mount Vernon Elementary School', 31.0, 42.0, 44.0, 47, '94.10%', 'WASHINGTON HEIGHTS', None)\n(610087, 64, 'Blair Early Childhood Center', 50.0, 48.0, 49.0, 58, '94.80%', 'CLEARING', None)\n(610088, 22, 'Wolfgang A Mozart Elementary School', 41.0, 56.0, 32.0, 50, '96.40%', 'LOGAN SQUARE', None)\n(610089, 16, 'John B Murphy Elementary School', 80.0, 75.0, 59.0, 58, '96.30%', 'IRVING PARK', None)\n(610090, 41, 'Phillip Murray Elementary Language Academy', 66.0, 34.0, 37.0, 56, '95.90%', 'HYDE PARK', None)\n(610091, 53, 'Ronald Brown Elementary Community Academy', 31.0, 52.0, 33.0, 52, '96.30%', 'WEST PULLMAN', None)\n(610093, 44, 'Jane A Neil Elementary School', 42.0, 27.0, 28.0, 52, '94.60%', 'CHATHAM', None)\n(610095, 7, 'Walter L Newberry Math & Science Academy Elementary School', 71.0, 52.0, 66.0, 54, '96.30%', 'LINCOLN PARK', None)\n(610096, 63, 'Florence Nightingale Elementary School', 35.0, 40.0, 48.0, 48, '95.80%', 'GAGE PARK', None)\n(610097, 20, 'William P Nixon Elementary School', 52.0, 70.0, 62.0, 46, '95.80%', 'HERMOSA', None)\n(610098, 23, 'Alfred Nobel Elementary School', 37.0, 37.0, 35.0, 48, '95.20%', 'HUMBOLDT PARK', None)\n(610099, 10, 'Norwood Park Elementary School', 99.0, 69.0, 68.0, 57, '95.60%', 'NORWOOD PARK', None)\n(610100, 23, 'West Park Elementary Academy', 29.0, 36.0, 40.0, 46, '96.70%', 'HUMBOLDT PARK', None)\n(610102, 71, 'Richard J Oglesby Elementary School', 44.0, 49.0, 49.0, 46, '96.90%', 'AUBURN GRESHAM', None)\n(610103, 43, \"Isabelle C O'Keeffe Elementary School\", 17.0, 28.0, 29.0, 40, '94.60%', 'SOUTH SHORE', None)\n(610104, 10, 'William J Onahan Elementary School', 64.0, 29.0, 27.0, 47, '96.10%', 'NORWOOD PARK', None)\n(610105, 10, 'Oriole Park Elementary School', 99.0, 88.0, 71.0, 60, '95.50%', 'NORWOOD PARK', None)\n(610107, 24, 'James Otis Elementary School', 83.0, 67.0, 58.0, 54, '96.50%', 'WEST TOWN', None)\n(610108, 67, \"Luke O'Toole Elementary School\", 5.0, 33.0, 40.0, 45, '94.60%', 'WEST ENGLEWOOD', None)\n(610109, 70, 'William Bishop Owen Scholastic Academy Elementary School', 49.0, 39.0, 56.0, 58, '95.00%', 'ASHBURN', None)\n(610111, 14, 'John Palmer Elementary School', 75.0, 61.0, 68.0, 65, '94.80%', 'ALBANY PARK', None)\n(610112, 68, 'Francis W Parker Elementary Community Academy', 30.0, 53.0, 57.0, 48, '94.90%', 'ENGLEWOOD', None)\n(610115, 69, 'Park Manor Elementary School', 28.0, 42.0, 48.0, 55, '96.30%', 'GREATER GRAND CROSSING', None)\n(610116, 43, 'Parkside Elementary Community Academy', 50.0, 48.0, 49.0, 42, '94.90%', 'SOUTH SHORE', None)\n(610119, 24, 'Elizabeth Peabody Elementary School', 51.0, 50.0, 61.0, 52, '97.00%', 'WEST TOWN', None)\n(610120, 62, 'Ferdinand Peck Elementary School', 51.0, 43.0, 47.0, 44, '97.20%', 'WEST ELSDON', None)\n(610121, 28, 'Washington Irving Elementary School', 43.0, 34.0, 44.0, 48, '95.90%', 'NEAR WEST SIDE', None)\n(610122, 77, 'Helen Peirce International Studies Elementary School', 58.0, 32.0, 22.0, 47, '95.80%', 'EDGEWATER', None)\n(610123, 29, 'William Penn Elementary School', 78.0, 99.0, 99.0, 52, '95.20%', 'NORTH LAWNDALE', None)\n(610124, 47, 'Harold Washington Elementary School', 28.0, 45.0, 63.0, 47, '96.20%', 'BURNSIDE', None)\n(610125, 31, 'Irma C Ruiz Elementary School', 57.0, 57.0, 62.0, 50, '95.00%', 'LOWER WEST SIDE', None)\n(610127, 13, 'Mary Gage Peterson Elementary School', 62.0, 49.0, 58.0, 51, '96.70%', 'NORTH PARK', None)\n(610128, 73, 'Marcus Moziah Garvey Elementary School', 50.0, 50.0, 48.0, 49, '96.00%', 'WASHINGTON HEIGHTS', None)\n(610129, 31, 'Josiah Pickard Elementary School', 50.0, 35.0, 37.0, 48, '96.40%', 'LOWER WEST SIDE', None)\n(610131, 29, 'Ambrose Plamondon Elementary School', 45.0, 32.0, 28.0, 45, '96.60%', 'NORTH LAWNDALE', None)\n(610132, 50, 'Edgar Allan Poe Elementary Classical School', 99.0, 66.0, 88.0, 55, '97.20%', 'PULLMAN', None)\n(610135, 15, 'Portage Park Elementary School', 62.0, 46.0, 32.0, 48, '95.90%', 'PORTAGE PARK', None)\n(610136, 7, 'William H Prescott Elementary School', 90.0, 51.0, 24.0, 50, '97.60%', 'LINCOLN PARK', None)\n(610137, 15, 'Ernst Prussing Elementary School', 71.0, 49.0, 42.0, 43, '96.00%', 'PORTAGE PARK', None)\n(610138, 22, 'Pulaski International Academy Elmentary School', 68.0, 54.0, 57.0, 47, '96.20%', 'LOGAN SQUARE', None)\n(610139, 50, 'George M Pullman Elementary School', 59.0, 66.0, 75.0, 50, '96.20%', 'PULLMAN', None)\n(610141, 6, 'Ravenswood Elementary School', 46.0, 42.0, 41.0, 53, '95.70%', 'LAKE VIEW', None)\n(610142, 41, 'William H Ray Elementary School', 69.0, 22.0, 12.0, 50, '94.90%', 'HYDE PARK', None)\n(610143, 39, 'William C Reavis Math & Science Specialty Elementary School', 48.0, 37.0, 26.0, 45, '94.80%', 'KENWOOD', None)\n(610144, 21, 'Frank W Reilly Elementary School', 48.0, 33.0, 34.0, 48, '96.70%', 'AVONDALE', None)\n(610145, 15, 'Peter A Reinberg Elementary School', 60.0, 35.0, 30.0, 47, '95.70%', 'PORTAGE PARK', None)\n(610146, 69, 'Paul Revere Elementary School', 36.0, 42.0, 56.0, 49, '94.60%', 'GREATER GRAND CROSSING', None)\n(610147, 2, 'Philip Rogers Elementary School', 81.0, 68.0, 62.0, 53, '95.10%', 'WEST RIDGE', None)\n(610148, 61, 'Cesar E Chavez Multicultural Academic Center Elementary School', 54.0, 63.0, 52.0, 45, '96.10%', 'NEW CITY', None)\n(610150, 40, 'Betsy Ross Elementary School', 39.0, 51.0, 50.0, 37, '93.90%', 'WASHINGTON PARK', None)\n(610152, 69, 'Martha Ruggles Elementary School', 43.0, 37.0, 41.0, 48, '96.70%', 'GREATER GRAND CROSSING', None)\n(610154, 23, 'Martin A Ryerson Elementary School', 40.0, 48.0, 43.0, 45, '96.80%', 'HUMBOLDT PARK', None)\n(610156, 25, 'Louis Armstrong Math & Science Elementary School', 42.0, 49.0, 62.0, 59, '97.90%', 'AUSTIN', None)\n(610157, 63, 'Sidney Sawyer Elementary School', 57.0, 41.0, 39.0, 49, '96.40%', 'GAGE PARK', None)\n(610158, 25, 'Harriet E Sayre Elementary Language Academy', 68.0, 41.0, 50.0, 44, '96.60%', 'AUSTIN', None)\n(610159, 16, 'Jonathan Y Scammon Elementary School', 44.0, 21.0, 20.0, 48, '95.30%', 'IRVING PARK', None)\n(610160, 53, 'Songhai Elementary Learning Institute', 17.0, 47.0, 54.0, 46, '95.40%', 'WEST PULLMAN', None)\n(610161, 67, 'Arna Wendell Bontemps Elementary School', 33.0, 48.0, 59.0, 53, '95.80%', 'WEST ENGLEWOOD', None)\n(610163, 9, 'Frederick Stock Elementary School', 50.0, 48.0, 49.0, 69, '95.70%', 'EDISON PARK', None)\n(610165, 19, 'Franz Peter Schubert Elementary School', 50.0, 48.0, 49.0, 47, '97.00%', 'BELMONT CRAGIN', None)\n(610167, 61, 'William H Seward Communication Arts Academy Elementary School', 40.0, 38.0, 41.0, 49, '95.70%', 'NEW CITY', None)\n(610169, 42, 'Austin O Sexton Elementary School', 23.0, 38.0, 34.0, 46, '94.10%', 'WOODLAWN', None)\n(610170, 58, 'Columbia Explorers Elementary Academy', 59.0, 59.0, 63.0, 52, '97.10%', 'BRIGHTON PARK', None)\n(610171, 46, 'Arnold Mireles Elementary Academy', 27.0, 43.0, 54.0, 48, '95.20%', 'SOUTH CHICAGO', None)\n(610172, 61, 'William T Sherman Elementary School', 32.0, 46.0, 55.0, 49, '95.00%', 'NEW CITY', None)\n(610173, 68, 'Jesse Sherwood Elementary School', 38.0, 47.0, 58.0, 51, '95.50%', 'ENGLEWOOD', None)\n(610174, 58, 'James Shields Elementary School', 38.0, 57.0, 58.0, 50, '97.00%', 'BRIGHTON PARK', None)\n(610175, 39, 'Beulah Shoesmith Elementary School', 45.0, 40.0, 39.0, 49, '94.80%', 'KENWOOD', None)\n(610176, 75, 'John D Shoop Math-Science Technical Academy Elementary School', 32.0, 43.0, 51.0, 52, '96.30%', 'MORGAN PARK', None)\n(610178, 50, 'Theophilus Schmid Elementary School', 46.0, 31.0, 53.0, 41, '96.30%', 'PULLMAN', None)\n(610180, 28, 'John M Smyth Elementary School', 37.0, 54.0, 64.0, 43, '93.30%', 'NEAR WEST SIDE', None)\n(610182, 13, 'Hannah G Solomon Elementary School', 78.0, 67.0, 62.0, 52, '95.70%', 'NORTH PARK', None)\n(610183, 25, 'Spencer Technology Academy', 35.0, 57.0, 71.0, 50, '96.00%', 'AUSTIN', None)\n(610184, 30, 'John Spry Elementary Community School', 66.0, 70.0, 67.0, 46, '97.40%', 'SOUTH LAWNDALE', None)\n(610185, 70, 'Adlai E Stevenson Elementary School', 61.0, 50.0, 36.0, 47, '94.70%', 'ASHBURN', None)\n(610187, 3, 'Graeme Stewart Elementary School', 49.0, 46.0, 57.0, 47, '96.10%', 'UPTOWN', None)\n(610188, 49, 'Dunne Technology Academy', 46.0, 65.0, 71.0, 49, '96.40%', 'ROSELAND', None)\n(610191, 2, 'Stone Elementary Scholastic Academy', 78.0, 55.0, 43.0, 55, '96.00%', 'WEST RIDGE', None)\n(610192, 23, 'Harriet Beecher Stowe Elementary School', 28.0, 26.0, 31.0, 44, '94.70%', 'HUMBOLDT PARK', None)\n(610193, 46, 'William K New Sullivan Elementary School', 27.0, 35.0, 40.0, 47, '94.00%', 'SOUTH CHICAGO', None)\n(610194, 26, 'Charles Sumner Math & Science Community Acad Elementary School', 41.0, 44.0, 48.0, 48, '96.90%', 'WEST GARFIELD PARK', None)\n(610195, 72, 'Elizabeth H Sutherland Elementary School', 86.0, 64.0, 59.0, 50, '96.20%', 'BEVERLY', None)\n(610196, 77, 'George B Swift Elementary Specialty School', 61.0, 62.0, 60.0, 49, '95.80%', 'EDGEWATER', None)\n(610197, 24, 'Mancel Talcott Elementary School', 63.0, 59.0, 71.0, 51, '96.90%', 'WEST TOWN', None)\n(610198, 52, 'Douglas Taylor Elementary School', 34.0, 20.0, 1.0, 42, '94.30%', 'EAST SIDE', None)\n(610199, 53, 'Johnnie Colemon Elementary Academy', 60.0, 56.0, 63.0, 53, '95.40%', 'WEST PULLMAN', None)\n(610201, 17, 'Ole A Thorp Elementary Scholastic Academy', 70.0, 47.0, 40.0, 50, '95.80%', 'DUNNING', None)\n(610202, 26, 'George W Tilton Elementary School', 6.0, 30.0, 41.0, 51, '95.10%', 'WEST GARFIELD PARK', None)\n(610203, 63, 'Enrico Tonti Elementary School', 50.0, 48.0, 49.0, 49, '97.20%', 'GAGE PARK', None)\n(610205, 77, 'Lyman Trumbull Elementary School', 56.0, 47.0, 25.0, 50, '95.20%', 'EDGEWATER', None)\n(610206, 56, 'Mark Twain Elementary School', 67.0, 46.0, 44.0, 60, '97.80%', 'GARFIELD RIDGE', None)\n(610207, 72, 'John H Vanderpoel Elementary Magnet School', 74.0, 24.0, 31.0, 55, '96.20%', 'BEVERLY', None)\n(610208, 49, 'Mildred I Lavizzo Elementary School', 15.0, 41.0, 46.0, 48, '95.00%', 'ROSELAND', None)\n(610209, 14, 'Alessandro Volta Elementary School', 43.0, 28.0, 37.0, 51, '95.90%', 'ALBANY PARK', None)\n(610210, 24, 'Alexander von Humboldt Elementary School', 31.0, 32.0, 45.0, 43, '96.00%', 'WEST TOWN', None)\n(610212, 14, 'Albany Park Multicultural Academy', 66.0, 66.0, 71.0, 46, '96.90%', 'ALBANY PARK', None)\n(610213, 42, 'James Wadsworth Elementary School', 50.0, 48.0, 49.0, 47, '95.50%', 'WOODLAWN', None)\n(610215, 30, 'Francisco I Madero Middle School', 49.0, 48.0, 47.0, 47, '97.00%', 'SOUTH LAWNDALE', None)\n(610216, 31, 'John A Walsh Elementary School', 60.0, 70.0, 91.0, 48, '95.30%', 'LOWER WEST SIDE', None)\n(610217, 34, 'James Ward Elementary School', 49.0, 50.0, 46.0, 48, '96.90%', 'ARMOUR SQUARE', None)\n(610218, 48, 'Joseph Warren Elementary School', 33.0, 49.0, 40.0, 46, '95.30%', 'CALUMET HEIGHTS', None)\n(610219, 52, 'George Washington Elementary School', 58.0, 33.0, 43.0, 52, '95.90%', 'EAST SIDE', None)\n(610220, 4, 'Thomas J Waters Elementary School', 85.0, 56.0, 54.0, 52, '95.10%', 'LINCOLN SQUARE', None)\n(610221, 26, 'Daniel Webster Elementary School', 48.0, 81.0, 66.0, 45, '97.20%', 'WEST GARFIELD PARK', None)\n(610224, 53, 'West Pullman Elementary School', 23.0, 55.0, 47.0, 47, '93.80%', 'WEST PULLMAN', None)\n(610225, 53, 'John Whistler Elementary School', 45.0, 41.0, 28.0, 52, '97.00%', 'WEST PULLMAN', None)\n(610226, 63, 'Socorro Sandoval Elementary School', 50.0, 48.0, 49.0, 44, '96.50%', 'GAGE PARK', None)\n(610230, 12, 'Wildwood Elementary School', 99.0, 77.0, 67.0, 55, '95.70%', 'FOREST GLEN', None)\n(610231, 33, 'National Teachers Elementary Academy', 67.0, 65.0, 60.0, 56, '96.30%', 'NEAR SOUTH SIDE', None)\n(610232, 35, 'Williams Multiplex Elementary School', 50.0, 48.0, 49.0, 50, '95.90%', 'DOUGLAS', None)\n(610233, 69, 'Elihu Yale Elementary School', 29.0, 43.0, 50.0, 49, '97.00%', 'GREATER GRAND CROSSING', None)\n(610234, 22, 'Richard Yates Elementary School', 42.0, 66.0, 73.0, 51, '96.20%', 'LOGAN SQUARE', None)\n(610235, 25, 'Ella Flagg Young Elementary School', 31.0, 44.0, 50.0, 44, '95.40%', 'AUSTIN', None)\n(610237, 38, 'Ludwig Van Beethoven Elementary School', 40.0, 58.0, 59.0, 46, '94.70%', 'GRAND BOULEVARD', None)\n(610238, 68, 'Carrie Jacobs Bond Elementary School', 54.0, 67.0, 63.0, 53, '94.30%', 'ENGLEWOOD', None)\n(610239, 61, 'Richard J Daley Elementary Academy', 29.0, 37.0, 46.0, 46, '96.00%', 'NEW CITY', None)\n(610240, 29, 'Matthew A Henson Elementary School', 37.0, 34.0, 22.0, 55, '95.70%', 'NORTH LAWNDALE', None)\n(610241, 26, 'Guglielmo Marconi Elementary Community Academy', 26.0, 65.0, 76.0, 52, '96.50%', 'WEST GARFIELD PARK', None)\n(610242, 3, 'Joseph Brennemann Elementary School', 50.0, 55.0, 59.0, 53, '97.70%', 'UPTOWN', None)\n(610243, 27, 'John Calhoun North Elementary School', 52.0, 44.0, 54.0, 43, '96.50%', 'EAST GARFIELD PARK', None)\n(610244, 25, 'Michele Clark Academic Prep Magnet High School', 50.0, 48.0, 49.0, 48, '95.40%', 'AUSTIN', 60.4)\n(610245, 25, 'Frederick A Douglass Academy High School', 44.0, 39.0, 37.0, 50, '94.70%', 'AUSTIN', 39.5)\n(610246, 40, 'Edward Beasley Elementary Magnet Academic Center', 47.0, 34.0, 48.0, 42, '94.70%', 'WASHINGTON PARK', None)\n(610248, 17, 'Chicago Academy Elementary School', 66.0, 40.0, 22.0, 47, '96.10%', 'DUNNING', None)\n(610249, 63, 'Talman Elementary School', 99.0, 99.0, 99.0, 57, '95.50%', 'GAGE PARK', None)\n(610250, 8, 'Rueben Salazar Elementary Bilingual Center', 64.0, 54.0, 53.0, 52, '95.80%', 'NEAR NORTH SIDE', None)\n(610251, 27, 'Willa Cather Elementary School', 69.0, 78.0, 81.0, 56, '96.40%', 'EAST GARFIELD PARK', None)\n(610252, 28, 'Robert Nathaniel Dett Elementary School', 48.0, 51.0, 65.0, 51, '94.80%', 'NEAR WEST SIDE', None)\n(610253, 29, 'Julia C Lathrop Elementary School', 67.0, 58.0, 26.0, 40, '95.50%', 'NORTH LAWNDALE', None)\n(610256, 36, 'Jackie Robinson Elementary School', 50.0, 48.0, 49.0, 45, '93.80%', 'OAKLAND', None)\n(610257, 23, 'Morton School of Excellence', 53.0, 66.0, 63.0, 52, '97.10%', 'HUMBOLDT PARK', None)\n(610258, 39, 'Florence B Price Elementary School', 20.0, 37.0, 46.0, 46, '95.20%', 'KENWOOD', None)\n(610263, 69, 'John Foster Dulles Elementary School', 42.0, 53.0, 42.0, 51, '97.30%', 'GREATER GRAND CROSSING', None)\n(610264, 68, 'Walter Reed Elementary School', 29.0, 62.0, 68.0, 47, '93.20%', 'ENGLEWOOD', None)\n(610266, 42, 'Dumas Technology Academy', 19.0, 25.0, 49.0, 48, '95.60%', 'WOODLAWN', None)\n(610268, 44, 'Arthur R Ashe Elementary School', 50.0, 48.0, 49.0, 39, '93.40%', 'CHATHAM', None)\n(610269, 3, 'John T McCutcheon Elementary School', 56.0, 67.0, 41.0, 52, '94.80%', 'UPTOWN', None)\n(610271, 27, 'Jensen Elementary Scholastic Academy', 35.0, 42.0, 67.0, 46, '95.30%', 'EAST GARFIELD PARK', None)\n(610273, 30, 'Ignance Paderewski Elementary Learning Academy', 31.0, 63.0, 98.0, 46, '95.20%', 'SOUTH LAWNDALE', None)\n(610274, 29, 'James Weldon Johnson Elementary School', 67.0, 61.0, 75.0, 51, '97.50%', 'NORTH LAWNDALE', None)\n(610276, 38, 'Irvin C Mollison Elementary School', 44.0, 46.0, 38.0, 52, '94.30%', 'GRAND BOULEVARD', None)\n(610277, 38, 'Anthony Overton Elementary School', 32.0, 64.0, 76.0, 52, '94.90%', 'GRAND BOULEVARD', None)\n(610281, 43, 'Adam Clayton Powell Paideia Community Academy Elementary School', 54.0, 74.0, 84.0, 46, '95.30%', 'SOUTH SHORE', None)\n(610282, 25, 'Ronald E McNair Elementary School', 31.0, 56.0, 59.0, 47, '96.50%', 'AUSTIN', None)\n(610283, 68, 'Simon Guggenheim Elementary School', 30.0, 38.0, 22.0, 48, '93.10%', 'ENGLEWOOD', None)\n(610284, 1, 'New Field Elementary School', 50.0, 48.0, 49.0, 51, '95.60%', 'ROGERS PARK', None)\n(610285, 67, 'Granville T Woods Math & Science Academy Elementary School', 35.0, 43.0, 31.0, 48, '95.20%', 'WEST ENGLEWOOD', None)\n(610287, 70, 'Ashburn Community Elementary School', 48.0, 52.0, 40.0, 51, '97.10%', 'ASHBURN', None)\n(610295, 53, 'Thomas J Higgins Elementary Community Academy', 50.0, 48.0, 49.0, 49, '95.20%', 'WEST PULLMAN', None)\n(610297, 43, 'High School of Leadership at South Shore', 50.0, 48.0, 49.0, 42, '95.20%', 'SOUTH SHORE', 68.8)\n(610298, 44, 'Lenart Elementary Regional Gifted Center', 79.0, 51.0, 67.0, 52, '97.80%', 'CHATHAM', None)\n(610299, 68, 'William A Hinton Elementary School', 18.0, 30.0, 29.0, 45, '95.10%', 'ENGLEWOOD', None)\n(610300, 44, 'Oliver S Westcott Elementary School', 30.0, 48.0, 54.0, 46, '94.80%', 'CHATHAM', None)\n(610304, 28, 'Phoenix Military Academy High School', 53.0, 59.0, 78.0, 52, '97.40%', 'NEAR WEST SIDE', 51.3)\n(610305, 25, 'George Leland Elementary School', 50.0, 48.0, 49.0, 60, '94.90%', 'AUSTIN', None)\n(610308, 28, 'Wilma Rudolph Elementary Learning Center', 50.0, 48.0, 49.0, 57, '95.60%', 'NEAR WEST SIDE', None)\n(610313, 24, 'Jose De Diego Elementary Community Academy', 57.0, 37.0, 40.0, 53, '96.80%', 'WEST TOWN', None)\n(610315, 53, 'Edward White Elementary Career Academy', 27.0, 44.0, 48.0, 51, '96.70%', 'WEST PULLMAN', None)\n(610319, 59, 'Evergreen Academy Middle School', 60.0, 47.0, 44.0, 43, '97.40%', 'MCKINLEY PARK', None)\n(610320, 24, 'Ana Roque de Duprey Elementary School', 57.0, 12.0, 14.0, 53, '95.60%', 'WEST TOWN', None)\n(610325, 21, 'Logandale Middle School', 56.0, 32.0, 28.0, 50, '95.60%', 'AVONDALE', None)\n(610329, 31, 'Orozco Fine Arts & Sciences Elementary School', 55.0, 34.0, 44.0, 50, '96.40%', 'LOWER WEST SIDE', None)\n(610336, 35, 'Williams Preparatory Academy Middle School', 49.0, 42.0, 48.0, 53, '92.40%', 'DOUGLAS', None)\n(610339, 68, 'Amos Alonzo Stagg Elementary School', 23.0, 35.0, 42.0, 44, '95.50%', 'ENGLEWOOD', None)\n(610345, 38, 'Carter G Woodson South Elementary School', 26.0, 2.0, 1.0, 44, '94.70%', 'GRAND BOULEVARD', None)\n(610347, 66, 'Claremont Academy Elementary School', 36.0, 50.0, 51.0, 46, '96.20%', 'CHICAGO LAWN', None)\n(610348, 26, 'Nathan R Goldblatt Elementary School', 49.0, 80.0, 99.0, 53, '95.60%', 'WEST GARFIELD PARK', None)\n(610350, 48, 'Robert A Black Magnet Elementary School', 68.0, 63.0, 58.0, 55, '93.70%', 'CALUMET HEIGHTS', None)\n(610352, 70, 'Durkin Park Elementary School', 61.0, 57.0, 58.0, 53, '95.80%', 'ASHBURN', None)\n(610353, 58, 'Calmeca Academy of Fine Arts and Dual Language', 58.0, 59.0, 72.0, 51, '96.90%', 'BRIGHTON PARK', None)\n(610354, 14, 'North River Elementary School', 67.0, 53.0, 51.0, 50, '96.00%', 'ALBANY PARK', None)\n(610355, 6, 'Mary E Courtenay Elementary Language Arts Center', 99.0, 95.0, 80.0, 59, '94.20%', 'LAKE VIEW', None)\n(610357, 30, 'Spry Community Links High School', 70.0, 92.0, 87.0, 56, '95.70%', 'SOUTH LAWNDALE', 65.2)\n(610362, 73, 'Medgar Evers Elementary School', 27.0, 35.0, 49.0, 56, '96.10%', 'WASHINGTON HEIGHTS', None)\n(610364, 54, 'William E B Dubois Elementary School', 47.0, 33.0, 42.0, 50, '94.40%', 'RIVERDALE', None)\n(610365, 27, 'Mary McLeod Bethune Elementary School', 53.0, 53.0, 39.0, 56, '97.60%', 'EAST GARFIELD PARK', None)\n(610366, 73, 'Charles H Wacker Elementary School', 32.0, 34.0, 32.0, 50, '93.40%', 'WASHINGTON HEIGHTS', None)\n(610367, 25, 'Oscar DePriest Elementary School', 35.0, 47.0, 53.0, 47, '95.60%', 'AUSTIN', None)\n(610368, 49, 'Langston Hughes Elementary School', 28.0, 16.0, 30.0, 46, '95.50%', 'ROSELAND', None)\n(610369, 71, 'Mahalia Jackson Elementary School', 38.0, 54.0, 66.0, 46, '94.60%', 'AUBURN GRESHAM', None)\n(610381, 38, 'Bronzeville Scholastic Academy High School', 41.0, 42.0, 43.0, 51, '95.60%', 'GRAND BOULEVARD', 73.2)\n(610383, 30, 'Greater Lawndale High School For Social Justice', 43.0, 46.0, 44.0, 53, '95.10%', 'SOUTH LAWNDALE', 57.4)\n(610384, 30, 'Infinity Math Science and Technology High School', 58.0, 66.0, 65.0, 51, '96.00%', 'SOUTH LAWNDALE', 55.7)\n(610385, 30, 'Multicultural Academy of Scholarship', 48.0, 47.0, 42.0, 56, '96.70%', 'SOUTH LAWNDALE', 58.5)\n(610390, 77, 'Hyman G Rickover Naval Academy High School', 64.0, 64.0, 42.0, 53, '95.90%', 'EDGEWATER', 63.5)\n(610391, 67, 'Robert Lindblom Math & Science Academy High School', 66.0, 75.0, 85.0, 55, '95.90%', 'WEST ENGLEWOOD', 85.9)\n(610392, 30, 'World Language Academy High School', 51.0, 49.0, 47.0, 50, '96.00%', 'SOUTH LAWNDALE', 48.1)\n(610396, 66, 'Tarkington School of Excellence Elementary School', 48.0, 39.0, 40.0, 49, '96.60%', 'CHICAGO LAWN', None)\n(610402, 5, 'DeVry University Advantage Academy High School', 50.0, 48.0, 49.0, 58, '94.50%', 'NORTH CENTER', 63.3)\n(610499, 29, 'Collins Academy High School', 60.0, 51.0, 46.0, 54, '96.60%', 'NORTH LAWNDALE', None)\n(610502, 28, 'Marine Military Math and Science Academy ', 41.0, 38.0, 34.0, 54, '95.30%', 'NEAR WEST SIDE', None)\n(610503, 29, 'Frazier Prospective IB Magnet Elementary School', 60.0, 84.0, 72.0, 66, '96.60%', 'NORTH LAWNDALE', None)\n(610504, 59, 'Velma F Thomas Early Childhood Center', 50.0, 48.0, 49.0, 55, '95.60%', 'MCKINLEY PARK', None)\n(610506, 68, 'TEAM Englewood Community Academy High School', 45.0, 55.0, 53.0, 58, '96.50%', 'ENGLEWOOD', None)\n(610513, 34, 'Air Force Academy High School', 49.0, 60.0, 55.0, 53, '96.90%', 'ARMOUR SQUARE', None)\n(610515, 16, 'Disney II Magnet School', 50.0, 48.0, 49.0, 62, '96.50%', 'IRVING PARK', None)\n(610520, 24, 'LaSalle II Magnet Elementary School', 66.0, 36.0, 52.0, 55, '94.80%', 'WEST TOWN', None)\n(610523, 10, 'Edison Park Elementary School', 95.0, 52.0, 49.0, 56, '96.60%', 'NORWOOD PARK', None)\n(610524, 5, 'Alcott High School for the Humanities', 70.0, 67.0, 51.0, 57, '96.90%', 'NORTH CENTER', None)\n(610530, 43, 'South Shore Fine Arts Academy', 50.0, 48.0, 49.0, 56, '96.30%', 'SOUTH SHORE', None)\n(610533, 19, 'Dr Jorge Prieto Math and Science', 50.0, 48.0, 49.0, 52, '96.90%', 'BELMONT CRAGIN', None)\n(610534, 8, 'Skinner North', 50.0, 48.0, 49.0, 64, '97.80%', 'NEAR NORTH SIDE', None)\n(610535, 29, 'Mason High School', 50.0, 59.0, 59.0, 50, '0.00%', 'NORTH LAWNDALE', None)\n(610539, 19, 'Marvin Camras Elementary School', 54.0, 37.0, 41.0, 51, '0.00%', 'BELMONT CRAGIN', None)\n(610541, 21, 'Federico Garcia Lorca Elementary School', 53.0, 53.0, 58.0, 51, '0.00%', 'AVONDALE', None)\n(610542, 2, 'West Ridge Elementary School', 67.0, 41.0, 28.0, 46, '0.00%', 'WEST RIDGE', None)\n(610543, 63, 'Eric Solorio Academy High School', 67.0, 53.0, 63.0, 53, '0.00%', 'GAGE PARK', None)\n(610544, 65, 'Mariano Azuela Elementary School', 54.0, 61.0, 81.0, 47, '0.00%', 'WEST LAWN', None)\n"
],
[
"#cleaning up data set for census so we can match based on community area number for later on \ndf_census = df_census.fillna(0)\ndf_census.isnull().sum()\ndf_census['COMMUNITY_AREA_NUMBER'] = df_census['COMMUNITY_AREA_NUMBER'].astype(int)",
"_____no_output_____"
],
[
"conn = sqlite3.connect(assign2db)\ncursor = conn.cursor()\n#Drop public school table if it exits\ncursor.execute(\"DROP TABLE IF EXISTS `CENSUS`\")\nprint(\"Table dropped\")\nconn.close()",
"Table dropped\n"
],
[
"conn = sqlite3.connect(assign2db)\ncursor = conn.cursor()\ntry:\n cursor.execute(\"\"\"\n CREATE TABLE CENSUS (\n COMMUNITY_AREA_NUMBER INTEGER DEFAULT 0 PRIMARY KEY,\n COMMUNITY_AREA_NAME TEXT NOT NULL,\n PERCENT_OF_HOUSING_CROWDED FLOAT DEFAULT 0,\n PERCENT_HOUSEHOLDS_BELOW_POVERTY FLOAT DEFAULT 0,\n PERCENT_AGED_16_UNEMPLOYED FLOAT DEFAULT 0,\n PERCENT_AGED_25_WITHOUT_HIGH_SCHOOL_DIPLOMA FLOAT DEFAULT 0,\n PERCENT_AGED_UNDER_18_OR_OVER_64 FLOAT DEFAULT 0,\n PER_CAPITA_INCOME INT DEFAULT 0,\n HARDSHIP_INDEX FLOAT DEFAULT 0\n );\n \"\"\")\n print(\"Success Creation\")\nexcept Exception as e:\n print(str(e))\n print(\"Table creation failed\")\nfinally:\n conn.close()",
"Success Creation\n"
],
[
"census_list = df_census.values.tolist()\ncensus_list",
"_____no_output_____"
],
[
"#inserting the fields into the table\nconn = sqlite3.connect(assign2db)\ncursor = conn.cursor()\ntry:\n cursor.executemany(\"\"\"\n INSERT INTO CENSUS (COMMUNITY_AREA_NUMBER,\n COMMUNITY_AREA_NAME,\n PERCENT_OF_HOUSING_CROWDED,\n PERCENT_HOUSEHOLDS_BELOW_POVERTY,\n PERCENT_AGED_16_UNEMPLOYED,\n PERCENT_AGED_25_WITHOUT_HIGH_SCHOOL_DIPLOMA,\n PERCENT_AGED_UNDER_18_OR_OVER_64,\n PER_CAPITA_INCOME,\n HARDSHIP_INDEX)\n VALUES (?,?,?,?,?,?,?,?,?)\n \"\"\", census_list)\n conn.commit()\n print(\"Insert Successfully\")\nexcept Exception as e:\n print(str(e))\n print(\"Insert failed\")\nfinally:\n conn.close()",
"Insert Successfully\n"
],
[
"conn = sqlite3.connect(assign2db)\ncursor = conn.cursor()\ncursor.execute(\"SELECT * FROM CENSUS\")\nresult = cursor.fetchall()\nconn.close()",
"_____no_output_____"
],
[
"for i in result:\n print(i)",
"(0, 'CHICAGO', 4.7, 19.7, 12.9, 19.5, 33.5, 28202, 0.0)\n(1, 'Rogers Park', 7.7, 23.6, 8.7, 18.2, 27.5, 23939, 39.0)\n(2, 'West Ridge', 7.8, 17.2, 8.8, 20.8, 38.5, 23040, 46.0)\n(3, 'Uptown', 3.8, 24.0, 8.9, 11.8, 22.2, 35787, 20.0)\n(4, 'Lincoln Square', 3.4, 10.9, 8.2, 13.4, 25.5, 37524, 17.0)\n(5, 'North Center', 0.3, 7.5, 5.2, 4.5, 26.2, 57123, 6.0)\n(6, 'Lake View', 1.1, 11.4, 4.7, 2.6, 17.0, 60058, 5.0)\n(7, 'Lincoln Park', 0.8, 12.3, 5.1, 3.6, 21.5, 71551, 2.0)\n(8, 'Near North Side', 1.9, 12.9, 7.0, 2.5, 22.6, 88669, 1.0)\n(9, 'Edison Park', 1.1, 3.3, 6.5, 7.4, 35.3, 40959, 8.0)\n(10, 'Norwood Park', 2.0, 5.4, 9.0, 11.5, 39.5, 32875, 21.0)\n(11, 'Jefferson Park', 2.7, 8.6, 12.4, 13.4, 35.5, 27751, 25.0)\n(12, 'Forest Glen', 1.1, 7.5, 6.8, 4.9, 40.5, 44164, 11.0)\n(13, 'North Park', 3.9, 13.2, 9.9, 14.4, 39.0, 26576, 33.0)\n(14, 'Albany Park', 11.3, 19.2, 10.0, 32.9, 32.0, 21323, 53.0)\n(15, 'Portage Park', 4.1, 11.6, 12.6, 19.3, 34.0, 24336, 35.0)\n(16, 'Irving Park', 6.3, 13.1, 10.0, 22.4, 31.6, 27249, 34.0)\n(17, 'Dunning', 5.2, 10.6, 10.0, 16.2, 33.6, 26282, 28.0)\n(18, 'Montclaire', 8.1, 15.3, 13.8, 23.5, 38.6, 22014, 50.0)\n(19, 'Belmont Cragin', 10.8, 18.7, 14.6, 37.3, 37.3, 15461, 70.0)\n(20, 'Hermosa', 6.9, 20.5, 13.1, 41.6, 36.4, 15089, 71.0)\n(21, 'Avondale', 6.0, 15.3, 9.2, 24.7, 31.0, 20039, 42.0)\n(22, 'Logan Square', 3.2, 16.8, 8.2, 14.8, 26.2, 31908, 23.0)\n(23, 'Humboldt park', 14.8, 33.9, 17.3, 35.4, 38.0, 13781, 85.0)\n(24, 'West Town', 2.3, 14.7, 6.6, 12.9, 21.7, 43198, 10.0)\n(25, 'Austin', 6.3, 28.6, 22.6, 24.4, 37.9, 15957, 73.0)\n(26, 'West Garfield Park', 9.4, 41.7, 25.8, 24.5, 43.6, 10934, 92.0)\n(27, 'East Garfield Park', 8.2, 42.4, 19.6, 21.3, 43.2, 12961, 83.0)\n(28, 'Near West Side', 3.8, 20.6, 10.7, 9.6, 22.2, 44689, 15.0)\n(29, 'North Lawndale', 7.4, 43.1, 21.2, 27.6, 42.7, 12034, 87.0)\n(30, 'South Lawndale', 15.2, 30.7, 15.8, 54.8, 33.8, 10402, 96.0)\n(31, 'Lower West Side', 9.6, 25.8, 15.8, 40.7, 32.6, 16444, 76.0)\n(32, 'Loop', 1.5, 14.7, 5.7, 3.1, 13.5, 65526, 3.0)\n(33, 'Near South Side', 1.3, 13.8, 4.9, 7.4, 21.8, 59077, 7.0)\n(34, 'Armour Square', 5.7, 40.1, 16.7, 34.5, 38.3, 16148, 82.0)\n(35, 'Douglas', 1.8, 29.6, 18.2, 14.3, 30.7, 23791, 47.0)\n(36, 'Oakland', 1.3, 39.7, 28.7, 18.4, 40.4, 19252, 78.0)\n(37, 'Fuller Park', 3.2, 51.2, 33.9, 26.6, 44.9, 10432, 97.0)\n(38, 'Grand Boulevard', 3.3, 29.3, 24.3, 15.9, 39.5, 23472, 57.0)\n(39, 'Kenwood', 2.4, 21.7, 15.7, 11.3, 35.4, 35911, 26.0)\n(40, 'Washington Park', 5.6, 42.1, 28.6, 25.4, 42.8, 13785, 88.0)\n(41, 'Hyde Park', 1.5, 18.4, 8.4, 4.3, 26.2, 39056, 14.0)\n(42, 'Woodlawn', 2.9, 30.7, 23.4, 16.5, 36.1, 18672, 58.0)\n(43, 'South Shore', 2.8, 31.1, 20.0, 14.0, 35.7, 19398, 55.0)\n(44, 'Chatham', 3.3, 27.8, 24.0, 14.5, 40.3, 18881, 60.0)\n(45, 'Avalon Park', 1.4, 17.2, 21.1, 10.6, 39.3, 24454, 41.0)\n(46, 'South Chicago', 4.7, 29.8, 19.7, 26.6, 41.1, 16579, 75.0)\n(47, 'Burnside', 6.8, 33.0, 18.6, 19.3, 42.7, 12515, 79.0)\n(48, 'Calumet Heights', 2.1, 11.5, 20.0, 11.0, 44.0, 28887, 38.0)\n(49, 'Roseland', 2.5, 19.8, 20.3, 16.9, 41.2, 17949, 52.0)\n(50, 'Pullman', 1.5, 21.6, 22.8, 13.1, 38.6, 20588, 51.0)\n(51, 'South Deering', 4.0, 29.2, 16.3, 21.0, 39.5, 14685, 65.0)\n(52, 'East Side', 6.8, 19.2, 12.1, 31.9, 42.8, 17104, 64.0)\n(53, 'West Pullman', 3.3, 25.9, 19.4, 20.5, 42.1, 16563, 62.0)\n(54, 'Riverdale', 5.8, 56.5, 34.6, 27.5, 51.5, 8201, 98.0)\n(55, 'Hegewisch', 3.3, 17.1, 9.6, 19.2, 42.9, 22677, 44.0)\n(56, 'Garfield Ridge', 2.6, 8.8, 11.3, 19.3, 38.1, 26353, 32.0)\n(57, 'Archer Heights', 8.5, 14.1, 16.5, 35.9, 39.2, 16134, 67.0)\n(58, 'Brighton Park', 14.4, 23.6, 13.9, 45.1, 39.3, 13089, 84.0)\n(59, 'McKinley Park', 7.2, 18.7, 13.4, 32.9, 35.6, 16954, 61.0)\n(60, 'Bridgeport', 4.5, 18.9, 13.7, 22.2, 31.3, 22694, 43.0)\n(61, 'New City', 11.9, 29.0, 23.0, 41.5, 38.9, 12765, 91.0)\n(62, 'West Elsdon', 11.1, 15.6, 16.7, 37.0, 37.7, 15754, 69.0)\n(63, 'Gage Park', 15.8, 23.4, 18.2, 51.5, 38.8, 12171, 93.0)\n(64, 'Clearing', 2.7, 8.9, 9.5, 18.8, 37.6, 25113, 29.0)\n(65, 'West Lawn', 5.8, 14.9, 9.6, 33.6, 39.6, 16907, 56.0)\n(66, 'Chicago Lawn', 7.6, 27.9, 17.1, 31.2, 40.6, 13231, 80.0)\n(67, 'West Englewood', 4.8, 34.4, 35.9, 26.3, 40.7, 11317, 89.0)\n(68, 'Englewood', 3.8, 46.6, 28.0, 28.5, 42.5, 11888, 94.0)\n(69, 'Greater Grand Crossing', 3.6, 29.6, 23.0, 16.5, 41.0, 17285, 66.0)\n(70, 'Ashburn', 4.0, 10.4, 11.7, 17.7, 36.9, 23482, 37.0)\n(71, 'Auburn Gresham', 4.0, 27.6, 28.3, 18.5, 41.9, 15528, 74.0)\n(72, 'Beverly', 0.9, 5.1, 8.0, 3.7, 40.5, 39523, 12.0)\n(73, 'Washington Height', 1.1, 16.9, 20.8, 13.7, 42.6, 19713, 48.0)\n(74, 'Mount Greenwood', 1.0, 3.4, 8.7, 4.3, 36.8, 34381, 16.0)\n(75, 'Morgan Park', 0.8, 13.2, 15.0, 10.8, 40.3, 27149, 30.0)\n(76, \"O'Hare\", 3.6, 15.4, 7.1, 10.9, 30.3, 25828, 24.0)\n(77, 'Edgewater', 4.1, 18.2, 9.2, 9.7, 23.8, 33385, 19.0)\n"
],
[
"conn = sqlite3.connect(assign2db)\ncursor = conn.cursor()\ncursor.execute(\"SELECT COUNT(*) FROM PUBLIC_SCHOOL\")\npublic_school_count = cursor.fetchone()\ncursor.execute(\"SELECT COUNT(*) FROM CENSUS\")\ncensus_count = cursor.fetchone()\nconn.close()\nprint(\"Total Rows In Public_School Table: \" + str(public_school_count[0]))\nprint(\"Ttoal Rows In Census Table: \" + str(census_count[0]))",
"Total Rows In Public_School Table: 432\nTtoal Rows In Census Table: 78\n"
]
],
[
[
"The count of rows for public school is 432.\nThe count of rows for census is 78.",
"_____no_output_____"
],
[
"# Part 3",
"_____no_output_____"
]
],
[
[
"conn = sqlite3.connect(assign2db)\ncursor = conn.cursor()\n#Drop public school table if it exits\ncursor.execute(\"DROP TABLE IF EXISTS `Totaldata`\")\nprint(\"Table dropped\")\nconn.close()",
"Table dropped\n"
],
[
"#Joining the two tables based on community area number\nconn = sqlite3.connect(assign2db)\ncursor = conn.cursor()\ncursor.execute(\"\"\"\nCREATE TABLE Totaldata AS SELECT * FROM (\nSELECT * FROM PUBLIC_SCHOOL\nLEFT JOIN CENSUS ON CENSUS.COMMUNITY_AREA_NUMBER = PUBLIC_SCHOOL.COMMUNITY_AREA_NUMBER\n)\n\"\"\")",
"_____no_output_____"
],
[
"conn = sqlite3.connect(assign2db)\nsql = \"SELECT * FROM Totaldata\"\ndf_total = pd.read_sql(sql, conn)\ndf_total",
"_____no_output_____"
]
],
[
[
"## Q1 What is the relationship between per capita income in EAST SIDE and the safety score of schools in EAST SIDE?",
"_____no_output_____"
]
],
[
[
"sql = '''SELECT PER_CAPITA_INCOME, SAFETY_SCORE FROM Totaldata WHERE COMMUNITY_AREA_NAME = \"EAST SIDE\"'''\nq1 = pd.read_sql(sql, conn)\nq1",
"_____no_output_____"
],
[
"x = q1['PER_CAPITA_INCOME'].values.reshape(-1,1)\ny = q1['SAFETY_SCORE'].values.reshape(-1,1)\nlinear_regression = LinearRegression()\nfit = linear_regression.fit(x,y)\nprint(fit.coef_)\nprint(fit.intercept_)",
"[[0.]]\n[49.25]\n"
],
[
"sns.lmplot(x=\"PER_CAPITA_INCOME\", y=\"SAFETY_SCORE\", data=q1);",
"_____no_output_____"
]
],
[
[
"#### There is no relationship between PER_CAPITA INCOME and SAFETY_SCORE with slope of 0.",
"_____no_output_____"
],
[
"## Q2 What is the relationship between enviroment score and hardship-index of schools that based on for schools with per capita income greater than 15000",
"_____no_output_____"
]
],
[
[
"sql = '''SELECT Environment_Score, HARDSHIP_INDEX FROM Totaldata WHERE PER_CAPITA_INCOME>15000'''\nq2 = pd.read_sql(sql, conn)\nq2",
"_____no_output_____"
],
[
"x = q2['Environment_Score'].values.reshape(-1,1)\ny = q2['HARDSHIP_INDEX'].values.reshape(-1,1)\nlinear_regression = LinearRegression()\nfit = linear_regression.fit(x,y)\nprint(fit.coef_)\nprint(fit.intercept_)",
"[[-0.28318272]]\n[55.02024999]\n"
],
[
"sns.regplot(x=\"Environment_Score\", y=\"HARDSHIP_INDEX\",data=q2,fit_reg=True)",
"_____no_output_____"
]
],
[
[
"#### There's slight evidence that HARDSHIP_INDEX decreases with increase in ENVIRONMENT_SCORE with slope of -0.283",
"_____no_output_____"
],
[
"### Q3 How do safety scores of school vary from different areas of the city ",
"_____no_output_____"
]
],
[
[
"sql = \"\"\"Select COMMUNITY_AREA_NAME, sum(SAFETY_SCORE) from Totaldata group by COMMUNITY_AREA_NAME\"\"\"\nq3 = pd.read_sql(sql,conn)\nq3",
"_____no_output_____"
],
[
"q3.describe()",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nfig.set_size_inches(11.7, 8.27)\np = sns.scatterplot(data=q3, x=\"COMMUNITY_AREA_NAME\", y=\"sum(SAFETY_SCORE)\")\np = plt.setp(p.get_xticklabels(), rotation=90)",
"_____no_output_____"
]
],
[
[
"#### The mean of safety score amount all schools from all city is ~292 with a S.D of ~183 but according to the scatter plot we can see that it randomly scatterred",
"_____no_output_____"
],
[
"## Q4 Determine relationship between Instruction_Score to Average_Teacher_Attendence",
"_____no_output_____"
]
],
[
[
"sql = \"Select CAST(Average_Teacher_Attendance as INT), Instruction_Score from Totaldata\"\nq4 = pd.read_sql(sql, conn)\nq4",
"_____no_output_____"
],
[
"x = q4['CAST(Average_Teacher_Attendance as INT)'].values.reshape(-1,1)\ny = q4['Instruction_Score'].values.reshape(-1,1)\nlinear_regression = LinearRegression()\nfit = linear_regression.fit(x,y)\nprint(fit.coef_)\nprint(fit.intercept_)",
"[[-0.03777897]]\n[52.95268932]\n"
],
[
"sns.regplot(x=\"CAST(Average_Teacher_Attendance as INT)\", y=\"Instruction_Score\",data=q4,fit_reg=True)",
"_____no_output_____"
]
],
[
[
"#### From the linear regression and regression plot we can see that there is no relationship between Average_Attendence and institution score",
"_____no_output_____"
],
[
"## Q5 Is the relationship between PERCENT_HOUSEHOLDS_BELOW_POVERTY between different cities normally distributed",
"_____no_output_____"
]
],
[
[
"sql = \" SELECT PERCENT_HOUSEHOLDS_BELOW_POVERTY, COMMUNITY_AREA_NAME FROM Totaldata \"\nq5 = pd.read_sql(sql,conn)\nq5_2 = q5.pivot(columns='COMMUNITY_AREA_NAME',values='PERCENT_HOUSEHOLDS_BELOW_POVERTY')\n",
"_____no_output_____"
],
[
"q5_2.describe()",
"_____no_output_____"
],
[
"sns.distplot(q5['PERCENT_HOUSEHOLDS_BELOW_POVERTY'], color='b')",
"_____no_output_____"
]
],
[
[
"#### The percent households is not normally distributed between different cities",
"_____no_output_____"
],
[
"## Q6 Find the relationship between per_capita income to number of schools in each community area",
"_____no_output_____"
]
],
[
[
"sql = \"SELECT COMMUNITY_AREA_NAME, sum(PER_CAPITA_INCOME), count(SCHOOL_ID) from Totaldata group by COMMUNITY_AREA_NAME\"\nq6 = pd.read_sql(sql, conn)\nq6",
"_____no_output_____"
],
[
"x = q6['count(SCHOOL_ID)'].values.reshape(-1,1)\ny = q6['sum(PER_CAPITA_INCOME)'].values.reshape(-1,1)\nlinear_regression = LinearRegression()\nfit = linear_regression.fit(x,y)\nprint(fit.coef_)\nprint(fit.intercept_)",
"[[17677.08381689]]\n[36615.5377176]\n"
],
[
"sns.regplot(x=\"count(SCHOOL_ID)\", y=\"sum(PER_CAPITA_INCOME)\",data=q6,fit_reg=True)",
"_____no_output_____"
]
],
[
[
"#### There's a strong positive linear relationship between the number of school in an area to the per_capita_income",
"_____no_output_____"
],
[
"## Q7 What is the relationship between safety scores of schools in \"AUSTIN\" and \"ENGLEWOOD\"",
"_____no_output_____"
]
],
[
[
"sql = \"\"\"SELECT SAFETY_SCORE, COMMUNITY_AREA_NAME FROM Totaldata WHERE COMMUNITY_AREA_NAME='AUSTIN'\nor COMMUNITY_AREA_NAME='ENGLEWOOD'\"\"\"\nq7 = pd.read_sql(sql,conn)\nq7_2 = q7.pivot(columns='COMMUNITY_AREA_NAME',values='SAFETY_SCORE')\n",
"_____no_output_____"
],
[
"q7_2.describe()",
"_____no_output_____"
],
[
"q7.boxplot(column=\"SAFETY_SCORE\", by=\"COMMUNITY_AREA_NAME\", figsize=(8,6))\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### The average safety score of schools in the AUSTIN area is better than that of ENGLEWOOD",
"_____no_output_____"
],
[
"# Q8 What is the relationship between PERCENT_AGED_16_UNEMPLOYED and Instruction_Score for schools with safety score over 60?",
"_____no_output_____"
]
],
[
[
"q8 = \"SELECT PERCENT_AGED_16_UNEMPLOYED, INSTRUCTION_Score FROM Totaldata where SAFETY_SCORE > 60.0\"\nq8 = pd.read_sql(q8,conn)\nq8",
"_____no_output_____"
],
[
"x = q8['Instruction_Score'].values.reshape(-1,1)\ny = q8['PERCENT_AGED_16_UNEMPLOYED'].values.reshape(-1,1)\nlinear_regression = LinearRegression()\nfit = linear_regression.fit(x,y)\nprint(fit.coef_)\nprint(fit.intercept_)",
"[[0.07900946]]\n[6.15279682]\n"
],
[
"sns.regplot(x=\"Instruction_Score\", y=\"PERCENT_AGED_16_UNEMPLOYED\",data=q8,fit_reg=True)",
"_____no_output_____"
]
],
[
[
"#### There's not a strong linear relationship between instruction scores and percent_aged_16_unemployed for schools where safey score is greater than 60.",
"_____no_output_____"
],
[
"## Q9 How does PERCENT_OF_HOUSING_CROWDED compare for areas where school safety score is greater than 70.",
"_____no_output_____"
]
],
[
[
"sql = \" SELECT COMMUNITY_AREA_NAME, sum(PERCENT_OF_HOUSING_CROWDED) FROM Totaldata WHERE SAFETY_SCORE > 70 GROUP BY COMMUNITY_AREA_NAME\"\nq9 = pd.read_sql(sql, conn)\nq9",
"_____no_output_____"
],
[
"q9.describe()",
"_____no_output_____"
],
[
"sns.distplot(q9['sum(PERCENT_OF_HOUSING_CROWDED)'], color='b')",
"_____no_output_____"
]
],
[
[
"#### Percentage of crowded housing is most likely between 0-10% for schools with safety scores greater than 70.",
"_____no_output_____"
],
[
"# Q10 is there a relationship between community hardship to school environmental scores for school who's safety score is less than 50?",
"_____no_output_____"
]
],
[
[
"sql = \"SELECT HARDSHIP_INDEX, Environment_Score From Totaldata Where SAFETY_SCORE < 50\"\nq10 = pd.read_sql(sql, conn)\nq10",
"_____no_output_____"
],
[
"x = q10['Environment_Score'].values.reshape(-1,1)\ny = q10['HARDSHIP_INDEX'].values.reshape(-1,1)\nlinear_regression = LinearRegression()\nfit = linear_regression.fit(x,y)\nprint(fit.coef_)\nprint(fit.intercept_)",
"[[0.22114077]]\n[56.96111156]\n"
],
[
"sns.regplot(x=\"Environment_Score\", y=\"HARDSHIP_INDEX\",data=q10,fit_reg=True)",
"_____no_output_____"
]
],
[
[
"#### There's not a strong linear relationship between school environment scores and Hardship index in the community.",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
e743e7f5efa044ffac354662af3a45b00f0a175d | 666,751 | ipynb | Jupyter Notebook | Step 4 - Machine Learning on combined city data.ipynb | maireadmanifold/USA-Major-City-Crimes-Project | 8188d6317f4207f0b259d4f2bfcfbdfda764cecb | [
"MIT"
] | 1 | 2021-07-01T13:46:23.000Z | 2021-07-01T13:46:23.000Z | Step 4 - Machine Learning on combined city data.ipynb | maireadmanifold/USA-Major-City-Crimes-Project | 8188d6317f4207f0b259d4f2bfcfbdfda764cecb | [
"MIT"
] | null | null | null | Step 4 - Machine Learning on combined city data.ipynb | maireadmanifold/USA-Major-City-Crimes-Project | 8188d6317f4207f0b259d4f2bfcfbdfda764cecb | [
"MIT"
] | null | null | null | 58.445915 | 12,452 | 0.301339 | [
[
[
"## Machine Learning on combined USA crime data \n##\nimport pandas as pd\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\nurl_AllCleanedandShaped = 'https://raw.githubusercontent.com/maireadmanifold/files/main/AllCleanedandShapedCSV_Dec11.csv'\ndf_ML = pd.read_csv(url_AllCleanedandShaped, error_bad_lines=False, index_col=0)\n\n",
"_____no_output_____"
],
[
"print(\"No of rows\", df_ML.shape)\ndf_ML.head()",
"No of rows (5309, 71)\n"
],
[
"df_ML[\"State\"] = df_ML[\"STATEFP\"]\ndf_ML[\"County\"] = df_ML[\"COUNTYFP\"]\ndf_ML[\"Tract\"] = df_ML[\"TRACTCE\"]",
"_____no_output_____"
],
[
"df_ML.drop('STATEFP', inplace=True, axis=1)\ndf_ML.drop('COUNTYFP', inplace=True, axis=1)\ndf_ML.drop('TRACTCE', inplace=True, axis=1)",
"_____no_output_____"
],
[
"df_ML",
"_____no_output_____"
],
[
"df_ML.describe()",
"_____no_output_____"
],
[
"## save NAME, State, County and Tract before dropping these columns \ndata = [df_ML['NAME'],df_ML['State'], df_ML['County'], df_ML['Tract']]\nheaders = [\"NAME\", \"State\", \"County\", \"Tract\"]\ndf_ML_StateCountyTract = pd.concat(data, axis=1, keys = headers)\ndf_ML_StateCountyTract",
"_____no_output_____"
],
[
"df_ML_cleaned_NoCrime = df_ML.drop(['NAME','State', 'County', 'Tract', 'crime'], axis = 1)",
"_____no_output_____"
],
[
"\ndf_ML_cleaned_NoCrime.drop('index', inplace=True, axis=1)\ndf_ML_cleaned_NoCrime.drop('GEOID', inplace=True, axis=1)",
"_____no_output_____"
],
[
"##df_ML_cleaned_NoCrime \n#import numpy as np\n#x = np.asarray(x).astype('float32')\n\n#df_ML_cleaned_NoCrime = df_ML_cleaned_NoCrime.astype(float)\n\n#df_ML_cleaned_NoCrime = pd.to_numeric(df_ML_cleaned_NoCrime, errors='coerce')\n\nfor col in df_ML_cleaned_NoCrime:\n df_ML_cleaned_NoCrime[col] = pd.to_numeric(df_ML_cleaned_NoCrime[col], errors='coerce')",
"_____no_output_____"
],
[
"target = df_ML[['crime']]",
"_____no_output_____"
],
[
"target[1:10]",
"_____no_output_____"
],
[
"## not working df_ML_cleaned_NoCrime = df_ML_cleaned_NoCrime.astype('float32')",
"_____no_output_____"
],
[
"df_ML_cleaned_NoCrime",
"_____no_output_____"
],
[
"target",
"_____no_output_____"
],
[
"cleanup_nums = {\"crime\": {\"high\":1, \"low\":0}}\ntarget1 = target.replace(cleanup_nums)\ntarget1.dtypes\ntarget1",
"_____no_output_____"
],
[
"## adding back crime into df_ML_cleaned_WithCrime = df_ML_cleaned_NoCrime\n#df_ML_cleaned_WithCrime\ndf_ML_cleaned_WithCrime = df_ML_cleaned_NoCrime.join(target1)\ndf_ML_cleaned_WithCrime",
"_____no_output_____"
],
[
"df_ML_cleaned_WithCrime['crime'].unique()",
"_____no_output_____"
],
[
"df_ML_cleaned_NoCrime.isnull().values.any()",
"_____no_output_____"
],
[
"import numpy as np\n## df[:] = np.nan_to_num(df)\n## https://datascience.stackexchange.com/questions/11928/valueerror-input-contains-nan-infinity-or-a-value-too-large-for-dtypefloat32\n## might need to use the following if there were any NaN values but give former code\n## block there are none\ndf_ML_cleaned_NoCrime[:] = np.nan_to_num(df_ML_cleaned_NoCrime)",
"_____no_output_____"
],
[
"## checking variances \ndf_ML_cleaned_NoCrime.var()",
"_____no_output_____"
],
[
"## might need to do the following if there were any inf enteries but we got rid of them before\n## we imported the csv file from previous python code. \n##https://datascience.stackexchange.com/questions/11928/valueerror-input-contains-nan-infinity-or-a-value-too-large-for-dtypefloat32\nimport numpy as np\ndf_ML_cleaned_NoCrime[~df_ML_cleaned_NoCrime.isin([np.nan, np.inf, -np.inf]).any(1)]",
"_____no_output_____"
],
[
"## start of scaling data ",
"_____no_output_____"
],
[
"import keras\nfrom keras.layers import Dense\nfrom keras.models import Sequential\n# Save the number of columns in predictors: n_cols\nn_cols = df_ML_cleaned_NoCrime.shape[1]",
"_____no_output_____"
],
[
"model = Sequential()\nmodel.add(Dense(100, activation='relu', input_shape = (n_cols,)))\nmodel.add(Dense(100, activation='relu'))\nmodel.add(Dense(1))\nmodel.compile(optimizer='adam', loss='mean_squared_error')",
"_____no_output_____"
],
[
"## After compile you fit it; prime example of applying back\n## propagation and gradient descent with your data to update the\n## weights. Scalling Data before fitting can ease optimization.\n# not working \n# model.fit(df_ML_cleaned_NoCrime, target)",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"##X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)\n## https://stackabuse.com/k-nearest-neighbors-algorithm-in-python-and-scikit-learn/\n## remember that we need to replace the columns that have been standardized\n## in the df_ML_cleaned_NoCrime dataframe\nX_train, X_test, y_train, y_test = train_test_split(df_ML_cleaned_NoCrime, target1, test_size=0.20)",
"_____no_output_____"
],
[
"## Datacamp tutorial on creating a KNN model\nfrom sklearn.neighbors import KNeighborsClassifier\nknn = KNeighborsClassifier(n_neighbors=3)\n#model = knn(n_neighbors=3)\n\n# Train the model using the training sets\nknn.fit(X_train,y_train)\nknn.score(X_test, y_test)\n#model.score\n\n#Predict Output\n#predicted= model.predict([[0,1]]) # 0:low, 1:high\n#predicted",
"<ipython-input-54-3553287696a0>:7: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n knn.fit(X_train,y_train)\n"
],
[
"y_pred = knn.predict(X_test)",
"_____no_output_____"
],
[
"X_train",
"_____no_output_____"
],
[
"## removing redundant features from df_ML_cleaned_NoCrime\ndf_ML_cleaned_NoCrime.corr()",
"_____no_output_____"
],
[
"##to_drop = [\"DP02_0010PE-% Non-Fam HHs\", \"DP02_0016E-Avg Fam Size\",\"DP02_0022PE-%Non Rel in Hs\", \"DP03_0037PE-Retail\",\"DP03_0040PE-%Fin&Ins\",\"DP03_0041PE-%ProfSc,Mgt,Admin\",\"DP03_0050PE-%UnpaidFamilyWk\",\"DP03_0052PE-Inc<10k\", \"DP03_0054PE-Inc15-24k\", \"DP03_0058PE-Inc75-99k\", \"DP03_0061PE-Inc200k>\",\"DP03_0066PE-%HHsWithSocSec\", \"DP03_0072PE-%HH Inc with Cash public Ass\", \"DP03_0074PE-%HH Inc with Food Stamps SNAP\", \"DP03_0085PE-%Families Inc>200k\",\"DP04_0002PE-%Occ H U\", \"DP04_0013PE-%20+HU\", \"DP04_0017PE-%Built2014+\",\"DP04_0029PE-%2rooms_only_HU\",\"DP04_0039PE-Zero bedrooms\",\"DP04_0045PE-%Occup HU\",\"DP04_0046PE-%Owner Occup HU\",\"DP04_0051PE-%moved since 2017+\",\"DP04_0054PE-%moved 2000 to 2009\"]\n##df_ML_cleaned_NoCrime_Subset = df_ML_cleaned_NoCrime.drop(to_drop, axis = 1)\n",
"_____no_output_____"
],
[
"#df_ML_cleaned_NoCrime_Subset.corr()",
"_____no_output_____"
],
[
"## from datacamp - Preprocessing ML with Python\n# Import StandardScaler from scikit-learn\nfrom sklearn.preprocessing import StandardScaler\nknn = KNeighborsClassifier(n_neighbors=3)\n## as k increases the decision boundry gets smoother and less curvy\n## higher k = less complex but overfitting and perform less well on \n## training and test data\n## lower k = complex model with lower k are sensitive to noise \n# Create the scaler\nss = StandardScaler()\n\n\ndf_ML_scaled = ss.fit_transform(df_ML_cleaned_NoCrime)\nX_train, X_test, y_train, y_test = train_test_split(df_ML_scaled, target1.values.ravel(), test_size=0.20, stratify = target1)\n\n# Train the model using the training sets\nknn.fit(X_train,y_train)\ny_pred = knn.predict(X_test)\nknn.score(X_test, y_test)\n",
"_____no_output_____"
],
[
"##from Datacamp Preprocessing for ML in Python\nfrom sklearn.decomposition import PCA\n\n# Set up PCA and the X vector for diminsionality reduction\npca = PCA()\n\n# Apply PCA to the wine dataset X vector\ndf_ML_scaled_pca = pca.fit_transform(df_ML_scaled)\n# Look at the percentage of variance explained by the different components\npca.explained_variance_ratio_",
"_____no_output_____"
],
[
"df_ML_scaled_pca ",
"_____no_output_____"
],
[
"## using df_ML_cleaned_NoCrime_Subset_pca instead of former df_ML_cleaned_NoCrime_Subset in \n## the knn analysis\n\n\nX_train, X_test, y_train, y_test = train_test_split(df_ML_scaled_pca, target1.values.ravel(), test_size=0.20)\n\n# Train the model using the training sets\nknn.fit(X_train,y_train)\nknn.score(X_test, y_test)",
"_____no_output_____"
],
[
"##Datacamp course on Scitkit-learn \nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')",
"_____no_output_____"
],
[
"type(df_ML_scaled_pca)",
"_____no_output_____"
],
[
"type(df_ML_cleaned_NoCrime)",
"_____no_output_____"
],
[
"df_ML_cleaned_NoCrime.keys()",
"_____no_output_____"
],
[
"df_ML_cleaned_NoCrime.columns",
"_____no_output_____"
],
[
"X = df_ML_cleaned_NoCrime\ny = target1",
"_____no_output_____"
],
[
"pr",
"_____no_output_____"
],
[
"pd.plotting.scatter_matrix(df_ML_cleaned_NoCrime, hist_kwds={'bins':30})",
"_____no_output_____"
],
[
"pd.plotting.scatter_matrix(df_ML_cleaned_NoCrime, diagonal = 'kde')",
"_____no_output_____"
],
[
"pd.plotting.scatter_matrix(df_ML_cleaned_NoCrime.iloc[:, 1:9])",
"_____no_output_____"
],
[
"import seaborn as sns\nplt.figure()\nsns.countplot(x='DP02_0008PE-%F-led HHs No H', hue='crime', data=df_ML_cleaned_WithCrime, palette='RdBu')\nplt.xticks([0,1], ['Low', 'High'])\nplt.show()",
"_____no_output_____"
],
[
"df_ML_cleaned_WithCrime[1:10]",
"_____no_output_____"
],
[
"df_ML_cleaned_WithCrime_WithSCT[1:10]",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e744054a4960239fe4ab2c619ad7c9ae5d9168ab | 11,057 | ipynb | Jupyter Notebook | notebooks/community/sdk/SDK_Tabular_Custom_Model_Training_asynchronous.ipynb | nayaknishant/vertex-ai-samples | 3ce120b953f1cdc2ec2c5a3f4509cfeab106b7d0 | [
"Apache-2.0"
] | 418 | 2019-06-26T05:55:42.000Z | 2022-03-31T10:46:57.000Z | notebooks/community/sdk/SDK_Tabular_Custom_Model_Training_asynchronous.ipynb | nayaknishant/vertex-ai-samples | 3ce120b953f1cdc2ec2c5a3f4509cfeab106b7d0 | [
"Apache-2.0"
] | 362 | 2019-06-26T20:41:17.000Z | 2022-02-10T16:02:16.000Z | notebooks/community/sdk/SDK_Tabular_Custom_Model_Training_asynchronous.ipynb | nayaknishant/vertex-ai-samples | 3ce120b953f1cdc2ec2c5a3f4509cfeab106b7d0 | [
"Apache-2.0"
] | 229 | 2019-06-29T17:55:33.000Z | 2022-03-14T15:52:58.000Z | 30.885475 | 442 | 0.528715 | [
[
[
"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Feedback or issues?\nFor any feedback or questions, please open an [issue](https://github.com/googleapis/python-aiplatform/issues).",
"_____no_output_____"
],
[
"# Vertex SDK for Python: Custom Tabular Training (asynchronous) Example\n\nTo use this Colaboratory notebook, you copy the notebook to your own Google Drive and open it with Colaboratory (or Colab). You can run each step, or cell, and see its results. To run a cell, use Shift+Enter. Colab automatically displays the return value of the last line in each cell. For more information about running notebooks in Colab, see the [Colab welcome page](https://colab.research.google.com/notebooks/welcome.ipynb).\n\nThis notebook demonstrate how to create a custom model based on a tabular dataset (asynchronously). It will require you provide a bucket where the dataset CSV will be stored.\n\nNote: you may incur charges for training, prediction, storage or usage of other GCP products in connection with testing this SDK.",
"_____no_output_____"
],
[
"# Install Vertex SDK for Python, Authenticate, and upload of a Dataset to your GCS bucket\n\n\nAfter the SDK installation the kernel will be automatically restarted. You may see this error message `Your session crashed for an unknown reason` which is normal.",
"_____no_output_____"
]
],
[
[
"!pip3 uninstall -y google-cloud-aiplatform\n!pip3 install google-cloud-aiplatform\nimport IPython\n\napp = IPython.Application.instance()\napp.kernel.do_shutdown(True)",
"_____no_output_____"
]
],
[
[
"### Enter your project and GCS bucket\n\nEnter your Project Id in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook.",
"_____no_output_____"
]
],
[
[
"import sys\n\nif \"google.colab\" in sys.modules:\n from google.colab import auth\n\n auth.authenticate_user()",
"_____no_output_____"
],
[
"MY_PROJECT = \"YOUR PROJECT\"\nMY_STAGING_BUCKET = \"gs://YOUR BUCKET\" # bucket should be in same region as Vertex AI",
"_____no_output_____"
]
],
[
[
"The dataset we are using is the Abalone Dataset. For more information about this dataset please visit: https://archive.ics.uci.edu/ml/datasets/abalone",
"_____no_output_____"
]
],
[
[
"!wget https://storage.googleapis.com/download.tensorflow.org/data/abalone_train.csv\n!gsutil cp abalone_train.csv {MY_STAGING_BUCKET}/data/\n\ngcs_csv_path = f\"{MY_STAGING_BUCKET}/data/abalone_train.csv\"",
"_____no_output_____"
]
],
[
[
"# Initialize Vertex SDK for Python\n\nInitialize the *client* for Vertex AI",
"_____no_output_____"
]
],
[
[
"from google.cloud import aiplatform\n\naiplatform.init(project=MY_PROJECT, staging_bucket=MY_STAGING_BUCKET)",
"_____no_output_____"
]
],
[
[
"# Create a Managed Tabular Dataset from CSV\n\nA Managed dataset can be used to create an AutoML model or a custom model. ",
"_____no_output_____"
]
],
[
[
"ds = aiplatform.TabularDataset.create(\n display_name=\"abalone\", gcs_source=[gcs_csv_path], sync=False\n)",
"_____no_output_____"
]
],
[
[
"# Write Training Script\n- Write this cell as a file which will be used for custom training.",
"_____no_output_____"
]
],
[
[
"%%writefile training_script.py\n\nimport pandas as pd\nimport os\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\n\n# uncomment and bump up replica_count for distributed training\n# strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()\n# tf.distribute.experimental_set_strategy(strategy)\n\ncol_names = [\"Length\", \"Diameter\", \"Height\", \"Whole weight\", \"Shucked weight\", \"Viscera weight\", \"Shell weight\", \"Age\"]\ntarget = \"Age\"\n\ndef aip_data_to_dataframe(wild_card_path):\n return pd.concat([pd.read_csv(fp.numpy().decode(), names=col_names)\n for fp in tf.data.Dataset.list_files([wild_card_path])])\n\ndef get_features_and_labels(df):\n return df.drop(target, axis=1).values, df[target].values\n\ndef data_prep(wild_card_path):\n return get_features_and_labels(aip_data_to_dataframe(wild_card_path))\n\n\nmodel = tf.keras.Sequential([layers.Dense(64), layers.Dense(1)])\nmodel.compile(loss='mse', optimizer='adam')\n\nmodel.fit(*data_prep(os.environ[\"AIP_TRAINING_DATA_URI\"]),\n epochs=10 ,\n validation_data=data_prep(os.environ[\"AIP_VALIDATION_DATA_URI\"]))\nprint(model.evaluate(*data_prep(os.environ[\"AIP_TEST_DATA_URI\"])))\n\n# save as Vertex AI Managed model\ntf.saved_model.save(model, os.environ[\"AIP_MODEL_DIR\"])",
"_____no_output_____"
]
],
[
[
"# Launch a Training Job to Create a Model\n\nOnce we have defined your training script, we will create a model.",
"_____no_output_____"
]
],
[
[
"job = aiplatform.CustomTrainingJob(\n display_name=\"train-abalone-dist-1-replica\",\n script_path=\"training_script.py\",\n container_uri=\"gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest\",\n requirements=[\"gcsfs==0.7.1\"],\n model_serving_container_image_uri=\"gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest\",\n)\nmodel = job.run(ds, replica_count=1, model_display_name=\"abalone-model\", sync=False)",
"_____no_output_____"
]
],
[
[
"# Deploy Your Model\n\nDeploy your model, then wait until the model FINISHES deployment before proceeding to prediction.",
"_____no_output_____"
]
],
[
[
"endpoint = model.deploy(machine_type=\"n1-standard-4\", sync=False)",
"_____no_output_____"
]
],
[
[
"Wait for the deployment to complete",
"_____no_output_____"
]
],
[
[
"endpoint.wait()",
"_____no_output_____"
]
],
[
[
"# Predict on the Endpoint",
"_____no_output_____"
]
],
[
[
"prediction = endpoint.predict(\n [\n [0.435, 0.335, 0.11, 0.33399999999999996, 0.1355, 0.0775, 0.0965],\n [0.585, 0.45, 0.125, 0.874, 0.3545, 0.2075, 0.225],\n ]\n)\nprediction",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7441c4fd3147fa200dfbe8e06065bbb8901ee74 | 198,615 | ipynb | Jupyter Notebook | Task_2.ipynb | sassysoul/The-Spark-Foundation | b7eacb9f85faa66a091cc7c08707e240da6c765c | [
"MIT"
] | null | null | null | Task_2.ipynb | sassysoul/The-Spark-Foundation | b7eacb9f85faa66a091cc7c08707e240da6c765c | [
"MIT"
] | null | null | null | Task_2.ipynb | sassysoul/The-Spark-Foundation | b7eacb9f85faa66a091cc7c08707e240da6c765c | [
"MIT"
] | null | null | null | 265.883534 | 112,442 | 0.894137 | [
[
[
"**Task 2**\r\n\r\n**From the given ‘Iris’ dataset, predict the optimum number of clusters\r\nand represent it visually**\r\n\r\n**Dataset : https://bit.ly/3kXTdox**",
"_____no_output_____"
]
],
[
[
"from sklearn import datasets\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.cluster import KMeans\r\nimport matplotlib.patches as mpatches\r\nimport sklearn.metrics as sm\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nfrom scipy.cluster.hierarchy import linkage,dendrogram\r\nfrom sklearn.cluster import DBSCAN \r\nfrom sklearn.decomposition import PCA",
"_____no_output_____"
],
[
"iris = datasets.load_iris()\r\nprint(iris.data)",
"[[5.1 3.5 1.4 0.2]\n [4.9 3. 1.4 0.2]\n [4.7 3.2 1.3 0.2]\n [4.6 3.1 1.5 0.2]\n [5. 3.6 1.4 0.2]\n [5.4 3.9 1.7 0.4]\n [4.6 3.4 1.4 0.3]\n [5. 3.4 1.5 0.2]\n [4.4 2.9 1.4 0.2]\n [4.9 3.1 1.5 0.1]\n [5.4 3.7 1.5 0.2]\n [4.8 3.4 1.6 0.2]\n [4.8 3. 1.4 0.1]\n [4.3 3. 1.1 0.1]\n [5.8 4. 1.2 0.2]\n [5.7 4.4 1.5 0.4]\n [5.4 3.9 1.3 0.4]\n [5.1 3.5 1.4 0.3]\n [5.7 3.8 1.7 0.3]\n [5.1 3.8 1.5 0.3]\n [5.4 3.4 1.7 0.2]\n [5.1 3.7 1.5 0.4]\n [4.6 3.6 1. 0.2]\n [5.1 3.3 1.7 0.5]\n [4.8 3.4 1.9 0.2]\n [5. 3. 1.6 0.2]\n [5. 3.4 1.6 0.4]\n [5.2 3.5 1.5 0.2]\n [5.2 3.4 1.4 0.2]\n [4.7 3.2 1.6 0.2]\n [4.8 3.1 1.6 0.2]\n [5.4 3.4 1.5 0.4]\n [5.2 4.1 1.5 0.1]\n [5.5 4.2 1.4 0.2]\n [4.9 3.1 1.5 0.2]\n [5. 3.2 1.2 0.2]\n [5.5 3.5 1.3 0.2]\n [4.9 3.6 1.4 0.1]\n [4.4 3. 1.3 0.2]\n [5.1 3.4 1.5 0.2]\n [5. 3.5 1.3 0.3]\n [4.5 2.3 1.3 0.3]\n [4.4 3.2 1.3 0.2]\n [5. 3.5 1.6 0.6]\n [5.1 3.8 1.9 0.4]\n [4.8 3. 1.4 0.3]\n [5.1 3.8 1.6 0.2]\n [4.6 3.2 1.4 0.2]\n [5.3 3.7 1.5 0.2]\n [5. 3.3 1.4 0.2]\n [7. 3.2 4.7 1.4]\n [6.4 3.2 4.5 1.5]\n [6.9 3.1 4.9 1.5]\n [5.5 2.3 4. 1.3]\n [6.5 2.8 4.6 1.5]\n [5.7 2.8 4.5 1.3]\n [6.3 3.3 4.7 1.6]\n [4.9 2.4 3.3 1. ]\n [6.6 2.9 4.6 1.3]\n [5.2 2.7 3.9 1.4]\n [5. 2. 3.5 1. ]\n [5.9 3. 4.2 1.5]\n [6. 2.2 4. 1. ]\n [6.1 2.9 4.7 1.4]\n [5.6 2.9 3.6 1.3]\n [6.7 3.1 4.4 1.4]\n [5.6 3. 4.5 1.5]\n [5.8 2.7 4.1 1. ]\n [6.2 2.2 4.5 1.5]\n [5.6 2.5 3.9 1.1]\n [5.9 3.2 4.8 1.8]\n [6.1 2.8 4. 1.3]\n [6.3 2.5 4.9 1.5]\n [6.1 2.8 4.7 1.2]\n [6.4 2.9 4.3 1.3]\n [6.6 3. 4.4 1.4]\n [6.8 2.8 4.8 1.4]\n [6.7 3. 5. 1.7]\n [6. 2.9 4.5 1.5]\n [5.7 2.6 3.5 1. ]\n [5.5 2.4 3.8 1.1]\n [5.5 2.4 3.7 1. ]\n [5.8 2.7 3.9 1.2]\n [6. 2.7 5.1 1.6]\n [5.4 3. 4.5 1.5]\n [6. 3.4 4.5 1.6]\n [6.7 3.1 4.7 1.5]\n [6.3 2.3 4.4 1.3]\n [5.6 3. 4.1 1.3]\n [5.5 2.5 4. 1.3]\n [5.5 2.6 4.4 1.2]\n [6.1 3. 4.6 1.4]\n [5.8 2.6 4. 1.2]\n [5. 2.3 3.3 1. ]\n [5.6 2.7 4.2 1.3]\n [5.7 3. 4.2 1.2]\n [5.7 2.9 4.2 1.3]\n [6.2 2.9 4.3 1.3]\n [5.1 2.5 3. 1.1]\n [5.7 2.8 4.1 1.3]\n [6.3 3.3 6. 2.5]\n [5.8 2.7 5.1 1.9]\n [7.1 3. 5.9 2.1]\n [6.3 2.9 5.6 1.8]\n [6.5 3. 5.8 2.2]\n [7.6 3. 6.6 2.1]\n [4.9 2.5 4.5 1.7]\n [7.3 2.9 6.3 1.8]\n [6.7 2.5 5.8 1.8]\n [7.2 3.6 6.1 2.5]\n [6.5 3.2 5.1 2. ]\n [6.4 2.7 5.3 1.9]\n [6.8 3. 5.5 2.1]\n [5.7 2.5 5. 2. ]\n [5.8 2.8 5.1 2.4]\n [6.4 3.2 5.3 2.3]\n [6.5 3. 5.5 1.8]\n [7.7 3.8 6.7 2.2]\n [7.7 2.6 6.9 2.3]\n [6. 2.2 5. 1.5]\n [6.9 3.2 5.7 2.3]\n [5.6 2.8 4.9 2. ]\n [7.7 2.8 6.7 2. ]\n [6.3 2.7 4.9 1.8]\n [6.7 3.3 5.7 2.1]\n [7.2 3.2 6. 1.8]\n [6.2 2.8 4.8 1.8]\n [6.1 3. 4.9 1.8]\n [6.4 2.8 5.6 2.1]\n [7.2 3. 5.8 1.6]\n [7.4 2.8 6.1 1.9]\n [7.9 3.8 6.4 2. ]\n [6.4 2.8 5.6 2.2]\n [6.3 2.8 5.1 1.5]\n [6.1 2.6 5.6 1.4]\n [7.7 3. 6.1 2.3]\n [6.3 3.4 5.6 2.4]\n [6.4 3.1 5.5 1.8]\n [6. 3. 4.8 1.8]\n [6.9 3.1 5.4 2.1]\n [6.7 3.1 5.6 2.4]\n [6.9 3.1 5.1 2.3]\n [5.8 2.7 5.1 1.9]\n [6.8 3.2 5.9 2.3]\n [6.7 3.3 5.7 2.5]\n [6.7 3. 5.2 2.3]\n [6.3 2.5 5. 1.9]\n [6.5 3. 5.2 2. ]\n [6.2 3.4 5.4 2.3]\n [5.9 3. 5.1 1.8]]\n"
],
[
"print(iris.target_names)",
"['setosa' 'versicolor' 'virginica']\n"
],
[
"print(iris.target)",
"[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2\n 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2\n 2 2]\n"
],
[
"x = iris.data\r\ny = iris.target",
"_____no_output_____"
],
[
"fig = plt.figure(1, figsize=(7,5))\r\nax = Axes3D(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134)\r\nax.scatter(x[:, 3], x[:, 0], x[:, 2], edgecolor=\"k\", s=50)\r\nax.set_xlabel(\"Petal width\")\r\nax.set_ylabel(\"Sepal length\")\r\nax.set_zlabel(\"Petal length\")\r\nplt.title(\"Iris Clustering K Means=3\", fontsize=14)\r\nplt.show()\r\nhier=linkage(x,\"ward\")\r\nmax_d=7.08\r\nplt.figure(figsize=(15,8))\r\nplt.title('Iris Hierarchical Clustering Dendrogram')\r\nplt.xlabel('Species')\r\nplt.ylabel('distance')\r\ndendrogram(\r\n hier,\r\n truncate_mode='lastp', \r\n p=50, \r\n leaf_rotation=90., \r\n leaf_font_size=8., \r\n)\r\nplt.axhline(y=max_d, c='k')\r\nplt.show()\r\n",
"_____no_output_____"
],
[
"\r\nx = pd.DataFrame(iris.data, columns=['Sepal Length', 'Sepal Width', 'Petal Length', 'Petal Width'])\r\ny = pd.DataFrame(iris.target, columns=['Target'])",
"_____no_output_____"
],
[
"x.head()",
"_____no_output_____"
],
[
"y.head()",
"_____no_output_____"
],
[
"iris_k_mean_model=KMeans(n_clusters=3)\r\niris_k_mean_model.fit(x)",
"_____no_output_____"
],
[
"print(iris_k_mean_model.labels_)",
"[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 0 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2\n 2 2 2 0 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 0 2 0 0 0 0 2 0 0 0 0\n 0 0 2 2 0 0 0 0 2 0 2 0 2 0 0 2 2 0 0 0 0 0 2 0 0 0 0 2 0 0 0 2 0 0 0 2 0\n 0 2]\n"
],
[
"print(iris_k_mean_model.cluster_centers_)",
"[[6.85 3.07368421 5.74210526 2.07105263]\n [5.006 3.428 1.462 0.246 ]\n [5.9016129 2.7483871 4.39354839 1.43387097]]\n"
],
[
"plt.figure(figsize=(14,6))\r\n\r\ncolors = np.array(['red', 'green', 'blue'])\r\nred_patch=mpatches.Patch(color='red',label='Setosa')\r\ngreen_patch=mpatches.Patch(color='green',label='Versicolor')\r\nblue_patch=mpatches.Patch(color='blue',label='Virgicina')\r\n\r\npredictedY = np.choose(iris_k_mean_model.labels_, [1, 0, 2]).astype(np.int64)\r\nplt.subplot(1, 2, 1)\r\nplt.scatter(x['Petal Length'], x['Petal Width'], c=colors[y['Target']])\r\nplt.title('Before classification')\r\nplt.legend(handles=[red_patch,green_patch,blue_patch])\r\nplt.subplot(1, 2, 2)\r\nplt.scatter(x['Petal Length'], x['Petal Width'], c=colors[predictedY])\r\nplt.title(\"Model's classification\")\r\nplt.legend(handles=[red_patch,green_patch,blue_patch])",
"_____no_output_____"
],
[
"print(\"Accuracy= \",sm.accuracy_score(predictedY, y['Target']))",
"Accuracy= 0.44\n"
],
[
"print(\"Confusion Matrix:-\\n\")\r\nsm.confusion_matrix(predictedY, y['Target'])",
"Confusion Matrix:-\n\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7442693e3dfb59574c628363c33b635f86ce55e | 513,955 | ipynb | Jupyter Notebook | ReGex-Cookies.ipynb | miteshrj/projects | 56168b92c7bc610122963bf74360a2115834fcea | [
"MIT"
] | null | null | null | ReGex-Cookies.ipynb | miteshrj/projects | 56168b92c7bc610122963bf74360a2115834fcea | [
"MIT"
] | null | null | null | ReGex-Cookies.ipynb | miteshrj/projects | 56168b92c7bc610122963bf74360a2115834fcea | [
"MIT"
] | null | null | null | 114.390162 | 138,316 | 0.644333 | [
[
[
"# RegEx and Cookies using Python",
"_____no_output_____"
],
[
"### Importing libraries",
"_____no_output_____"
]
],
[
[
"import requests\nimport time\nfrom bs4 import BeautifulSoup\nimport re\nimport pandas as pd\nimport numpy as np\nimport collections as cl",
"_____no_output_____"
]
],
[
[
"### URL to be scraped",
"_____no_output_____"
]
],
[
[
"url = \"https://www.thyssenkrupp-elevator.com/kr/products/multi/\"",
"_____no_output_____"
]
],
[
[
"### Getting the source code",
"_____no_output_____"
]
],
[
[
"source_code = requests.get(url, headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36 Edg/79.0.309.71'})",
"_____no_output_____"
]
],
[
[
"### Creating a BeautifulSoup object",
"_____no_output_____"
]
],
[
[
"soup_object = BeautifulSoup(source_code.content, 'html.parser')",
"_____no_output_____"
]
],
[
[
"### Saving the source_code to htm file",
"_____no_output_____"
]
],
[
[
"file_to_Save = open(\"elevator.htm\", \"w\", encoding = 'utf-8')\nfile_to_Save.write(source_code.text)\nfile_to_Save.close()",
"_____no_output_____"
]
],
[
[
"### Opening and reading the \"elevator.htm\"",
"_____no_output_____"
]
],
[
[
"file_data = open(\"elevator.htm\", encoding = \"utf-8\").read()",
"_____no_output_____"
]
],
[
[
"### Stripping all `<tag>`s",
"_____no_output_____"
]
],
[
[
"print(re.sub(r'<.*?>', r'', file_data))",
"\n \n\t\n\thtml[lang=\"vi\"],html[lang=\"vi\"] body,a,h1,h2,h3,h4,div,li,ul {font-family: Arial, Verdana, sans-serif !important}\n\t\n\t\n\t\n\t\n\t\n\tMULTI: 승객 수송의 획기적인 변화 – 티센크루프엘리베이터\n\t\t\n\t\n\t\t\n\t\t\t\n\t\n \n \n \n \t\n \t \t\n \n \t\t \n\t\t\n \t\t\n \t\t\n \t\t\n \t\t\n \t\t\n\t\n \t\n \t\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\t\n\t\t\t\n\t\t\t\n\t\t\t\n\t\n\t \n\t\n\t\n\t\n\twindow.addEventListener(\"load\", function(){\n\t\t(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':\n\t\tnew Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],\n\t\tj=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=\n\t\t'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);\n\t\t})(window,document,'script','dataLayer','GTM-N6L68KP');\n\t})\n\t\n\t\n\t\n\nSkip Navigation \n\n\t\n \n 이 페이지는 쿠키를 이용합니다. 사이트를 계속 탐색하면 쿠키 이용에 동의하게 됩니다. 자세한 내용은..\n 법률 자료\n \n \n \n 동의\n \n \n\n\t\n\t\t\t\t\t\n\t\t\t\n\t\t\t\tElevator Technology\n\t\t\t\tKorea\n\t\t\t\n\t\t\t\n\t\t\t\tthyssenkrupp\n\t\t\t\tengineering. tomorrow. together.\n\t\t\t\n\t\t\t\n\t \t더 읽기\n\t \n\t\t\t\t\t\t \n\t\t\t\t\t\t\t>로그인\n\t\t\t\t\t\t\n\t\t\t\t\t\t \n\t\t\t\t\t\t\t인재채용\n\t\t\t\t\t\t\n\t\t\t\t\t\t \n\t\t\t\t\t\t\t뉴스\n\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\t한국어\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\tEnglish\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t\n\t\t\n \n\t\n \n\t\t\t\n\t\t\t\t메뉴\n\t\t\t\n \n \n\t\t\t\n\t\t\t\t\t\n제품\n\n\n뒤로\n닫기\n\n\n\n\n개요\n제품\n\n\n엘리베이터\n\n\n\n\n개요\n엘리베이터\n\n\nenta200\n\n\n\nmeta200\n\n\n\nzeta200\n\n\n\nTWIN\n\n\n\n\n\n에스컬레이터\n\n\n\n\n개요\n에스컬레이터\n\n\ntugela\n\n\n\nVelino\n\n\n\nVelino classic\n\n\n\nvictoria\n\n\n\n\n\n무빙워크\n\n\n\n\n개요\n무빙워크\n\n\niwalk\n\n\n\nOrinoco\n\n\n\n\n\nAGILE\n\n\n\nMULTI\n\n\n\nACCEL\n\n\n\nMAX\n\n\n\n홈 솔루션\n\n\n\n탑승교\n\n\n\n\n\n\n\n\n적용 범위\n\n\n뒤로\n닫기\n\n\n\n\n개요\n적용 범위\n\n\n교육\n\n\n\n엔터테인먼트\n\n\n\n병원\n\n\n\n호텔\n\n\n\n산업용\n\n\n\n교통시설\n\n\n\n오피스\n\n\n\n주거용\n\n\n\n상가 건물\n\n\n\n\n\n\n\n\n 서비스\n\n교체공사\n\n회사 소개\n\n\n뒤로\n닫기\n\n\n\n\n회사 개요\n\n\n\n연혁\n\n\n\n지속가능경영\n\n\n\n\n개요\n지속가능경영\n\n\n채용 정보\n\n\n\n환경\n\n\n\n협력사\n\n\n\n사회 공헌\n\n\n\n\n\n공정거래 자율준수\n\n\n\n\n\n\n\n\n연락처\n\n\n뒤로\n닫기\n\n\n\n\n개요\n연락처\n\n\n설치 및 서비스문의\n\n\n\n고객불만접수\n\n\n\n\n\n\n\n\t\t\t\n\t\t\t\n\t\t\n\t\n\t\n\t\n\t\t\n\t\t\tclose\n\t\t\t\n\t\t\t\t\n\t\t\t\t\t\n\t\t\t\tF\n\t\t\t\tsearch\n\t\t\t\n\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\t\t\n\t\t\n\t\n\n\n\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tMULTI\n\t\t\t\t\t\t\t\n\t\t\t\t\n\n\t\n\t\tHome\n 제품\n MULTI\n\t\n\n\n\n\n\n\n\t\n\t\t\t\t\n\t\t\n\t\n\t\t건물 내 이동의 획기적인 변화엘리베이터의 무게와 공간은 줄어들고 수송능력을 늘릴 수 있다고 상상해보십시오. 기존 엘리베이터가 가진 한계를 뛰어 넘어 자유로운 고층건물 설계가 가능해집니다.세계 최초의 로프 없는 엘리베이터 MULTI와 함께라면 모두 가능합니다. 하나의 승강로에 여러 대의 엘리베이터가 수직, 수평으로 움직이는 MULTI는 승강기의 새로운 가능성을 제시했습니다.MULTI의 특장점 수송능력의 획기적 증가엘리베이터 공간 감소엘리베이터 무게 및 부피 감소 MULTI는 자기부상열차에 사용되는 리니어 모터를 적용해 도시 이동성의 새로운 시대를 열어갑니다.더 많은 정보를 찾아보세요! 엘리베이터 시스템의 수송능력과 유연성을 향상시키는 방법에 대해 알아보세요.\n\t\n\n\n \n\t\t\n\t \n\t \t\n\n\t\n\t\t\n\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t\t기술 혁명\n\t\t \t\t더 읽기\n\t \t\n\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t티센크루프는 엘리베이터 기술 및 서비스 혁신을 주도하고 있습니다. 우리는 엘리베이터가 발명된 이후 160년 동안 사용된 로프 대신 리니어 모터를 적용했고, 가능성은 확대되었습니다.\t\n\t\t\t\t\n\t\t\t접기\n\t\t\n\n\n\t\n\t\t\n\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t\t신축 테스타타워에서 MULTI 운행 예정\n\t\t \t\t더 읽기\n\t \t\n\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t2016년 완공된 독일 로트바일 테스트 타워는 엘리베이터 기술 혁신의 허브이며, 독일에서 가장 높은 전망대(232m)를 보유하고 있기도 합니다.\t\n\t\t\t\t\n\t\t\t접기\n\t\t\n\n\n\t\n\t\t\n\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t\t세계 최초의 로프 없는 엘리베이터 - MULTI \n\t\t \t\t더 읽기\n\t \t\n\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\tMULTI는 리니어 모터 기술을 적용해 단일화된 승강로에서 여러 대의 카를 수직 및 수평으로 이동시킵니다.\t\n\t\t\t\t\n\t\t\t접기\n\t\t\n\n\t\t\t\n\t \n\t \n\t \n\t \n\t \n\t \n\t \n\t \t \n\t \n\t\t \n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\n\t \n\t\t \n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\n\t \n\t\t \n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\n\t \n\t Prev\n\t \tNext\n\t \n\t \n\t\n\n\t\n\t\n\t\t\n\n\nMULTI micro site\n\n\n\n\n\n\n\n\n\n세계 최초 로프 없는 엘리베이터 MULTI는 고층건물 건축에 혁명적인 변화를 가져올 것입니다.\n\n\n\n상세 정보\n\n\n\n\n\n동영상\n\n\n\nMULTI 제품 영상\n\n\n\n\n\n\n\nMULTI 3D\n\n\n\t\t\t\t\n\t\t\t\t\t도시와 함께 건물도 진화합니다.\n\t\t\t\t\n\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t Urban Hub 건축물에 혁명적인 변화를 가져올 미래형 엘리베이터 \n\n\t\t\t\t \t\n\t\t\t\n\n\t\n\n\n\t\n\n\n\t\n\t\t\t\n\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\tElevator Technology\n\t\t\t\t\t\tKorea\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t제품\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t교체 공사\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t서비스\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t유지보수 관련자료\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t보수부품 단가조회 \n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t#e\n\t\t\t\t\t\t\t\tFacebook\n\t\t\t\t\t\t\t\tTwitter\n\t\t\t\t\t\t\t\tYoutube\n\t\t\t\t\t\t\t\tLinkedIn\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t\n\t\n\t\t\t\n\t\n\t\t티센크루프엘리베이터 © 2019\n\t\t\t\t\t\t\n\t\t\t\n\t\t\t\tSite map\n\t\t\t\tImprint\n\t\t\t\tLegal notes\n\t\t\t\t\n\t\t\t\t\tShares\n\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\tFacebook\n\t\t\t\t\t\t\tTwitter\n\t\t\t\t\t\t\tGplus\n\t\t\t\t\t\t\tMail\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\n\t\n\n\t\n\t \n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n \t\n\n"
]
],
[
[
"### Selecting Korean characters that come right before \".\"",
"_____no_output_____"
]
],
[
[
"korean_char_before_dot = (re.findall(r'([^a-zA-Z0-9\\s\\[\\]\\{},\\<\\\\>\\!\\@\\#\\$\\%\\^\\&\\*\\(\\)\\/\\\"\\'\\.\\|])\\.[^\\.]', source_code.text))",
"_____no_output_____"
],
[
"str(korean_char_before_dot)",
"_____no_output_____"
]
],
[
[
"### Saving the characters to the file \"Korean_char.txt\"",
"_____no_output_____"
]
],
[
[
"Korean_char_file = open(\"Korean_char.txt\", \"w\", encoding = 'utf-8')\nKorean_char_file.write(str(korean_char_before_dot))\nKorean_char_file.close()",
"_____no_output_____"
]
],
[
[
"### Printing the most common Korean character that occurs before the dot",
"_____no_output_____"
]
],
[
[
"cl.Counter(korean_char_before_dot).most_common(1)",
"_____no_output_____"
]
],
[
[
"### Selecting all Korean characters that occur on the page",
"_____no_output_____"
],
[
"\\[^ -~\\] deselects all ASCII characters",
"_____no_output_____"
],
[
"\\[^ \\s\\] deselcts all white-spaces including \\n, \\t, etc.",
"_____no_output_____"
]
],
[
[
"all_korean_characters = (re.findall(r'[^ -~\\s–]', source_code.text))",
"_____no_output_____"
]
],
[
[
"### Printing the most comon Korean character on the webpage",
"_____no_output_____"
]
],
[
[
"cl.Counter(all_korean_characters).most_common(1)",
"_____no_output_____"
]
],
[
[
"### Details of the email ID, password and username used to create an account on www.allrecipes.com \n\n__Email ID__ = [email protected] <br>\n__Password__ = 12345678aA <br>\n__Username__ = 'Test'",
"_____no_output_____"
],
[
"### Sign-in URL for www.allrecipes.com",
"_____no_output_____"
]
],
[
[
"signin_url = \"https://www.allrecipes.com/account/signin/\"",
"_____no_output_____"
]
],
[
[
"### Getting the code of the webpage",
"_____no_output_____"
]
],
[
[
"signin_code = requests.get(signin_url, headers = {'user-agent':'Mozilla/5.0'})",
"_____no_output_____"
]
],
[
[
"### Creating a BeautifulSoup object",
"_____no_output_____"
]
],
[
[
"signin_soup = BeautifulSoup(signin_code.content, 'html.parser')",
"_____no_output_____"
]
],
[
[
"### Printing the code of the webpage",
"_____no_output_____"
]
],
[
[
"print(signin_soup)",
"\n<!DOCTYPE html>\n\n<html lang=\"en-us\">\n<head>\n<title>Allrecipes - Signin</title>\n<script async=\"true\" src=\"https://secureimages.allrecipes.com/assets/deployables/v-1.185.0.5222/karma.bundled.js\"></script>\n<!--Make our website baseUrl available to the client-side code-->\n<script type=\"text/javascript\">\r\n var AR = AR || {};\r\n\r\n AR.segmentWriteKey = \"RnmsxUrjIjM7W62olfjKgJrcsVlxe68V\";\r\n AR.baseWebsiteUrl = 'https://www.allrecipes.com';\r\nwindow.dataLayer={\"version\":\"1.0\",\"pageInstanceId\":\"www.allrecipes.com/account/signin/\",\"externalLinkId\":\"\",\"page\":{\"pageInfo\":{\"pageId\":\"\",\"pageName\":\"/account/signin/\",\"destinationUrl\":\"https://www.allrecipes.com/account/signin/?testcookieredirect=1\",\"sysEnv\":\"RD0003FFB37290\",\"variant\":\"Control\",\"version\":\"\",\"issueDate\":\"01/24/2020 00:52:35\",\"effectiveDate\":\"01/24/2020 00:52:35\",\"domain\":\"www.allrecipes.com\",\"parameters\":{}},\"category\":{\"primaryCategory\":\"tools\",\"contentType\":\"sign in\",\"subContentType\":\"\",\"adZone\":\"\",\"contentSource\":\"\"},\"attributes\":{\"contentId\":\"\",\"title\":\"\",\"country\":\"USA\"}},\"event\":[{\"eventInfo\":{\"eventName\":\"Login Started\"},\"category\":{\"primaryCategory\":\"Internal Source\"},\"attributes\":{\"itemId\":\"anonymous\",\"sourceContentType\":\"\",\"clickId\":\"\"}}],\"user\":[{\"analyticsId\":\"\",\"segment\":{\"adStatus\":\"visitor\",\"visitorType\":\"anonymous\",\"loginStatus\":\"no\"},\"profile\":[{\"profileInfo\":{\"profileId\":\"0\",\"loginType\":\"None\"}}],\"magFollower\":false}],\"newsletter\":{\"mailingId\":\"\",\"mailingName\":\"\",\"mailingDate\":\"\",\"mailingLinkGroup\":\"\",\"mailingLinkName\":\"\"}}; </script>\n<script type=\"text/javascript\">\r\n //Remove Ref_Hub from session after first recipe visited\r\n var hubId = window.sessionStorage[\"Ref_Hub_Id\"];\r\n var count = window.sessionStorage[\"Ref_Hub_Recipe_Count\"];\r\n if (hubId && count) {\r\n if (count > 0) {\r\n window.sessionStorage.removeItem(\"Ref_Hub_Id\");\r\n window.sessionStorage.removeItem(\"Ref_Hub_Recipe_Count\");\r\n }\r\n }\r\n </script>\n<meta content=\"Allrecipes - Signin\" property=\"og:title\"/>\n<meta content=\"Allrecipes\" property=\"og:site_name\"/>\n<meta charset=\"utf-8\"/>\n<meta content=\"width=device-width, initial-scale=1.0\" name=\"viewport\"/>\n<meta content=\"noindex,nofollow\" name=\"robots\"/>\n<link href=\"https://secureimages.allrecipes.com/assets/deployables/v-1.185.0.5222/main-css.bundled.Css\" rel=\"stylesheet\"/>\n<link href=\"https://secureimages.allrecipes.com/assets/deployables/v-1.185.0.5222/account-css.bundled.Css\" rel=\"stylesheet\"/>\n<script src=\"https://secureimages.allrecipes.com/assets/deployables/v-1.185.0.5222/social-signin-js.bundled.js\"></script>\n<link href=\"https://secureimages.allrecipes.com/assets/deployables/v-1.185.0.5222/croppie-css.bundled.Css\" rel=\"stylesheet\"/>\n<meta content=\"66102450266\" property=\"fb:app_id\"/>\n<meta content=\"71158748377\" property=\"fb:pages\"/>\n<script type=\"text/javascript\">window.NREUM||(NREUM={});NREUM.info = {\"beacon\":\"bam.nr-data.net\",\"errorBeacon\":\"bam.nr-data.net\",\"licenseKey\":\"55db0cb698\",\"applicationID\":\"161785443,90586318\",\"transactionName\":\"YwABYUUDXUIABRZbCVpKIllbEFZSCBYHQTFRBxBcQwcccAIFDUcIQCYMW0MQXF0NAxAcNV0CDXxZSho=\",\"queueTime\":0,\"applicationTime\":26,\"agent\":\"\",\"atts\":\"\"}</script><script type=\"text/javascript\">(window.NREUM||(NREUM={})).loader_config={licenseKey:\"55db0cb698\",applicationID:\"161785443\"};window.NREUM||(NREUM={}),__nr_require=function(n,e,t){function r(t){if(!e[t]){var i=e[t]={exports:{}};n[t][0].call(i.exports,function(e){var i=n[t][1][e];return r(i||e)},i,i.exports)}return e[t].exports}if(\"function\"==typeof __nr_require)return __nr_require;for(var i=0;i<t.length;i++)r(t[i]);return r}({1:[function(n,e,t){function r(){}function i(n,e,t){return function(){return o(n,[u.now()].concat(f(arguments)),e?null:this,t),e?void 0:this}}var o=n(\"handle\"),a=n(4),f=n(5),c=n(\"ee\").get(\"tracer\"),u=n(\"loader\"),s=NREUM;\"undefined\"==typeof window.newrelic&&(newrelic=s);var p=[\"setPageViewName\",\"setCustomAttribute\",\"setErrorHandler\",\"finished\",\"addToTrace\",\"inlineHit\",\"addRelease\"],d=\"api-\",l=d+\"ixn-\";a(p,function(n,e){s[e]=i(d+e,!0,\"api\")}),s.addPageAction=i(d+\"addPageAction\",!0),s.setCurrentRouteName=i(d+\"routeName\",!0),e.exports=newrelic,s.interaction=function(){return(new r).get()};var m=r.prototype={createTracer:function(n,e){var t={},r=this,i=\"function\"==typeof e;return o(l+\"tracer\",[u.now(),n,t],r),function(){if(c.emit((i?\"\":\"no-\")+\"fn-start\",[u.now(),r,i],t),i)try{return e.apply(this,arguments)}catch(n){throw c.emit(\"fn-err\",[arguments,this,n],t),n}finally{c.emit(\"fn-end\",[u.now()],t)}}}};a(\"actionText,setName,setAttribute,save,ignore,onEnd,getContext,end,get\".split(\",\"),function(n,e){m[e]=i(l+e)}),newrelic.noticeError=function(n,e){\"string\"==typeof n&&(n=new Error(n)),o(\"err\",[n,u.now(),!1,e])}},{}],2:[function(n,e,t){function r(n,e){var t=n.getEntries();t.forEach(function(n){\"first-paint\"===n.name?a(\"timing\",[\"fp\",Math.floor(n.startTime)]):\"first-contentful-paint\"===n.name&&a(\"timing\",[\"fcp\",Math.floor(n.startTime)])})}function i(n){if(n instanceof c&&!s){var e,t=Math.round(n.timeStamp);e=t>1e12?Date.now()-t:f.now()-t,s=!0,a(\"timing\",[\"fi\",t,{type:n.type,fid:e}])}}if(!(\"init\"in NREUM&&\"page_view_timing\"in NREUM.init&&\"enabled\"in NREUM.init.page_view_timing&&NREUM.init.page_view_timing.enabled===!1)){var o,a=n(\"handle\"),f=n(\"loader\"),c=NREUM.o.EV;if(\"PerformanceObserver\"in window&&\"function\"==typeof window.PerformanceObserver){o=new PerformanceObserver(r);try{o.observe({entryTypes:[\"paint\"]})}catch(u){}}if(\"addEventListener\"in document){var s=!1,p=[\"click\",\"keydown\",\"mousedown\",\"pointerdown\",\"touchstart\"];p.forEach(function(n){document.addEventListener(n,i,!1)})}}},{}],3:[function(n,e,t){function r(n,e){if(!i)return!1;if(n!==i)return!1;if(!e)return!0;if(!o)return!1;for(var t=o.split(\".\"),r=e.split(\".\"),a=0;a<r.length;a++)if(r[a]!==t[a])return!1;return!0}var i=null,o=null,a=/Version\\/(\\S+)\\s+Safari/;if(navigator.userAgent){var f=navigator.userAgent,c=f.match(a);c&&f.indexOf(\"Chrome\")===-1&&f.indexOf(\"Chromium\")===-1&&(i=\"Safari\",o=c[1])}e.exports={agent:i,version:o,match:r}},{}],4:[function(n,e,t){function r(n,e){var t=[],r=\"\",o=0;for(r in n)i.call(n,r)&&(t[o]=e(r,n[r]),o+=1);return t}var i=Object.prototype.hasOwnProperty;e.exports=r},{}],5:[function(n,e,t){function r(n,e,t){e||(e=0),\"undefined\"==typeof t&&(t=n?n.length:0);for(var r=-1,i=t-e||0,o=Array(i<0?0:i);++r<i;)o[r]=n[e+r];return o}e.exports=r},{}],6:[function(n,e,t){e.exports={exists:\"undefined\"!=typeof window.performance&&window.performance.timing&&\"undefined\"!=typeof window.performance.timing.navigationStart}},{}],ee:[function(n,e,t){function r(){}function i(n){function e(n){return n&&n instanceof r?n:n?c(n,f,o):o()}function t(t,r,i,o){if(!d.aborted||o){n&&n(t,r,i);for(var a=e(i),f=v(t),c=f.length,u=0;u<c;u++)f[u].apply(a,r);var p=s[y[t]];return p&&p.push([b,t,r,a]),a}}function l(n,e){h[n]=v(n).concat(e)}function m(n,e){var t=h[n];if(t)for(var r=0;r<t.length;r++)t[r]===e&&t.splice(r,1)}function v(n){return h[n]||[]}function g(n){return p[n]=p[n]||i(t)}function w(n,e){u(n,function(n,t){e=e||\"feature\",y[t]=e,e in s||(s[e]=[])})}var h={},y={},b={on:l,addEventListener:l,removeEventListener:m,emit:t,get:g,listeners:v,context:e,buffer:w,abort:a,aborted:!1};return b}function o(){return new r}function a(){(s.api||s.feature)&&(d.aborted=!0,s=d.backlog={})}var f=\"nr@context\",c=n(\"gos\"),u=n(4),s={},p={},d=e.exports=i();d.backlog=s},{}],gos:[function(n,e,t){function r(n,e,t){if(i.call(n,e))return n[e];var r=t();if(Object.defineProperty&&Object.keys)try{return Object.defineProperty(n,e,{value:r,writable:!0,enumerable:!1}),r}catch(o){}return n[e]=r,r}var i=Object.prototype.hasOwnProperty;e.exports=r},{}],handle:[function(n,e,t){function r(n,e,t,r){i.buffer([n],r),i.emit(n,e,t)}var i=n(\"ee\").get(\"handle\");e.exports=r,r.ee=i},{}],id:[function(n,e,t){function r(n){var e=typeof n;return!n||\"object\"!==e&&\"function\"!==e?-1:n===window?0:a(n,o,function(){return i++})}var i=1,o=\"nr@id\",a=n(\"gos\");e.exports=r},{}],loader:[function(n,e,t){function r(){if(!x++){var n=E.info=NREUM.info,e=l.getElementsByTagName(\"script\")[0];if(setTimeout(s.abort,3e4),!(n&&n.licenseKey&&n.applicationID&&e))return s.abort();u(y,function(e,t){n[e]||(n[e]=t)}),c(\"mark\",[\"onload\",a()+E.offset],null,\"api\");var t=l.createElement(\"script\");t.src=\"https://\"+n.agent,e.parentNode.insertBefore(t,e)}}function i(){\"complete\"===l.readyState&&o()}function o(){c(\"mark\",[\"domContent\",a()+E.offset],null,\"api\")}function a(){return O.exists&&performance.now?Math.round(performance.now()):(f=Math.max((new Date).getTime(),f))-E.offset}var f=(new Date).getTime(),c=n(\"handle\"),u=n(4),s=n(\"ee\"),p=n(3),d=window,l=d.document,m=\"addEventListener\",v=\"attachEvent\",g=d.XMLHttpRequest,w=g&&g.prototype;NREUM.o={ST:setTimeout,SI:d.setImmediate,CT:clearTimeout,XHR:g,REQ:d.Request,EV:d.Event,PR:d.Promise,MO:d.MutationObserver};var h=\"\"+location,y={beacon:\"bam.nr-data.net\",errorBeacon:\"bam.nr-data.net\",agent:\"js-agent.newrelic.com/nr-1158.min.js\"},b=g&&w&&w[m]&&!/CriOS/.test(navigator.userAgent),E=e.exports={offset:f,now:a,origin:h,features:{},xhrWrappable:b,userAgent:p};n(1),n(2),l[m]?(l[m](\"DOMContentLoaded\",o,!1),d[m](\"load\",r,!1)):(l[v](\"onreadystatechange\",i),d[v](\"onload\",r)),c(\"mark\",[\"firstbyte\",f],null,\"api\");var x=0,O=n(6)},{}],\"wrap-function\":[function(n,e,t){function r(n){return!(n&&n instanceof Function&&n.apply&&!n[a])}var i=n(\"ee\"),o=n(5),a=\"nr@original\",f=Object.prototype.hasOwnProperty,c=!1;e.exports=function(n,e){function t(n,e,t,i){function nrWrapper(){var r,a,f,c;try{a=this,r=o(arguments),f=\"function\"==typeof t?t(r,a):t||{}}catch(u){d([u,\"\",[r,a,i],f])}s(e+\"start\",[r,a,i],f);try{return c=n.apply(a,r)}catch(p){throw s(e+\"err\",[r,a,p],f),p}finally{s(e+\"end\",[r,a,c],f)}}return r(n)?n:(e||(e=\"\"),nrWrapper[a]=n,p(n,nrWrapper),nrWrapper)}function u(n,e,i,o){i||(i=\"\");var a,f,c,u=\"-\"===i.charAt(0);for(c=0;c<e.length;c++)f=e[c],a=n[f],r(a)||(n[f]=t(a,u?f+i:i,o,f))}function s(t,r,i){if(!c||e){var o=c;c=!0;try{n.emit(t,r,i,e)}catch(a){d([a,t,r,i])}c=o}}function p(n,e){if(Object.defineProperty&&Object.keys)try{var t=Object.keys(n);return t.forEach(function(t){Object.defineProperty(e,t,{get:function(){return n[t]},set:function(e){return n[t]=e,e}})}),e}catch(r){d([r])}for(var i in n)f.call(n,i)&&(e[i]=n[i]);return e}function d(e){try{n.emit(\"internal-error\",e)}catch(t){}}return n||(n=i),t.inPlace=u,t.flag=a,t}},{}]},{},[\"loader\"]);</script>\n<link href=\"/ar-favicon-192.png\" rel=\"apple-touch-icon-precomposed\" sizes=\"192x192\"/>\n<link href=\"/ar-favicon-180.png\" rel=\"apple-touch-icon-precomposed\" sizes=\"180x180\"/>\n<link href=\"/ar-favicon-152.png\" rel=\"apple-touch-icon-precomposed\" sizes=\"152x152\"/>\n<link href=\"/ar-favicon-120.png\" rel=\"apple-touch-icon-precomposed\" sizes=\"120x120\"/>\n<link href=\"/ar-favicon-114.png\" rel=\"apple-touch-icon-precomposed\" sizes=\"114x114\"/>\n<link href=\"/ar-favicon-96.png\" rel=\"apple-touch-icon-precomposed\" sizes=\"96x96\"/>\n<link href=\"/ar-favicon-76.png\" rel=\"apple-touch-icon-precomposed\" sizes=\"76x76\"/>\n<link href=\"/ar-favicon-72.png\" rel=\"apple-touch-icon-precomposed\" sizes=\"72x72\"/>\n<link href=\"/ar-favicon-57.png\" rel=\"apple-touch-icon-precomposed\" sizes=\"57x57\"/>\n<link href=\"/ar-favicon-114.png\" rel=\"icon\" sizes=\"114x114\" type=\"image/png\"/>\n<link href=\"/ar-favicon-16.png\" rel=\"icon\" sizes=\"16x16\" type=\"image/png\"/>\n<link href=\"/ar-favicon-32.png\" rel=\"icon\" sizes=\"32x32\" type=\"image/png\"/>\n<meta content=\"#ffffff\" name=\"msapplication-TileColor\"/>\n<style>\n /*Critical Foft with DataUri*/\n @font-face {\n font-family: 'Source Sans Pro';\n font-style: normal;\n font-weight: 400;\n src: local('Source Sans Pro Regular'), local('SourceSansPro-Regular'), url(\"data:application/x-font-woff;charset=utf-8;base64,d09GMgABAAAAAD4kABEAAAAAmwQAAD3CAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGkYbsmAchiAGYACNFggqCYJzEQgKgcwQgbINC4QaAAE2AiQDiC4EIAWFMgeJGwxWG7iKF9g27YNhtwPg/rvfFGaDabcDJUr8QyMRehxgrSqz//+/JydjFGDHtmla38MUyOqcy5zIuDkDS9xYoi4IaZMtNh1MPC2t/apAKGMpT3108cseeN4t8voHIXYgRGUG5V9RClMhyEHdX5Vt9IlskYi9ZNm8MNs4tacyTIssTmEjaarGIxLh7jRSNPmH/bOT/3nH54vMNvvCG2UKq/R4eLRhtch0U9LrTV+nYYrCEVFp0frOgTl6BraN/ElOXvh/su3r3KrqRvSbN4yaZSuxikfOn3XJzKIJ1xR4f869LxkBpxkhpIjpB6F2U24SUIv9qLO+J7FlSUYZZEjsOICzk8zMzn4gqOiAKsQWsGivJSjvmu7qzwP8/vrOm5IXd2EmCMmIkD+E3ZBEDc//c4je9344YNsDmm0bky4q9lRwCtS1Yv0ExKVtCmDGz1M51rjUAtaIw6FQoqHnH/8O9AM0t25s5JEC67htbA0DlsVgTYdBqaBgFmaB/xhoPwYYFe/L65dV74e9NV2uwjO78iw3beW0jBnZMreE8y9RKrjg0IajnNqsFR/MWMEyGEoAC7ZUhMfnjqPv6usiG6hsgiIEJudyamreTU24qUkA5MeWU0i+TQEvdxtac2ahqCUIEULfBrWQReRN5KQA/nn8555diXtZ4B8Grgnu+ARL8D9/kO9vH6y7KMEeZhSIbks5qQYUALj8/9v0c3dD72oy+aNFTYh1cpwjhxdr/wBQR1y/ee/JM2/ejNg5ksYgy7s+trwg+CQ5IM/YP7IcYhB80F9gKIMVAHdQtNyUVDYLXcJ9uhRNkzIhjJ3wAYbh3F5HGAaGcf6Xqtny/U8I0jliYLnHwKkpSVPjorG7glx8cgdcYvYC7jLtXF2InROEXVIBvBBD5aJz5aIp7f//qZqf19udscQLmItfSwEtWE5pvq4JNMU/YLFodajJmJIXlDjiKq9004jx5/bVVxOPyli62dZxHXnrWFYheGvjPzbO5hbWJdvbq3tNRCRIkCFk6zE2qov+SjfKwkKQScrN39EIkAcAOHmwccicOciefcitW4hCgdgcSEICkpSG/fsqggBZZ7jMomnvDOrnbaEN3esmxYKRQyl3YUEmADMBxUA0EKOMd2JfGbpQ7ycAov/SkwkDgmJPkXd7FFIuExA06zVjDZIpR41Rr2CSJrJOmZhKoiKScbIl2DPCC0IGpoRkmWwZDv0JCvTQPZxWVdBmtYbxKtZdiiHRp+5bWMH2wK5IfYlmYBj2Baqdu5wB43Zbrh49UDCGwoILI7wksuDcy8nAL7TbiwN/p3/Brp5qNJIXZBjbfUPXZQztk8kQ5nf+4I/+5C9wMNFwMQ+X8vAi/uRP/cu8I9gjWIryJC/UinkpL5dxlEKoZFQiNcdLNAptlnpztUFlJE0aS4Gd2kHtSueB3jXtzLrI3Yjd5uDO41JPHu9lby73W94Veg8S7/8RPvSR0sc+r/YFX9b4mq+rfdN39L7re+IX7Kr8wi/7G0B+nxjiXed6ETfw3ChwE8/NGbjFrcRtEreLuUPizoy5y72GPPvSxxXLBSg++L4x3kSYWY4TZwnc5uf+owsq6Tw0aQ/w/uQTI9A1xExp+8YgSYSM/8T64kWuS+K35LIXRLaAcshc0xJ50s1iKATO5y0pkFwTWWIOlRETk850stt5Ai49JbNizSD29RR9fmMhPMtM8bRGUomsRY4UBOZ3F3BMGTBYHApslzgnIkopRya6xp4mgKwHdOvzBlWqsvBqnB9lJlvOnRboVQXYq44wRyKikEPl8aFqrMU2PATMFM2caYqBnuzeCIS47VtHqPHm1BOwtiQDqSJczeJzFVNmlj6sosjRgPxFV81y9yxPXU92DPNH5fuTYBIbwuXwBEm07SfSo4cH0VLn0kW9ZD7qAsXGqQTIJrMlhEy9l3/SpPlauJqZfc0/ZMG6tjWnHYQKFKKudR0xSwdtmtF+e3DcQiVG19tsYNIMIjKFT8tVJQED1a6Nj3HeB0vAPNgFBznWGmt/uRa1galk/e33YYa5OXtWmAYe8rTMEX4CRZkE8tm56/1Zpibtj9e9a0TKpRIIFH7iL7uY/m4K9lkzWxK1qdlq2EJ2VGrOU12F7HiwZJgrVhgqGHvAj8ay9KqxqavGWJKnMy2mS4z3ae39P1+vMAZZ3iwenNPNSSqdJryDHY/hY7eCnncx/HRAXK5HW/eergoosPhDPZkRsR/ppeqqj0YCWe6OyehSIjnTc8/01Jho9o2wuWbLUDD7XCwaPQXfKj5Pa9NLEGYbZzEOC4nDlM308uQXFfdUvMTN09kt0MP6tEFqnkqbQqW5mEeKaoC0IxHZXRBdbfQ6S1Ha+/zeiNWR3+Bnik6Nshajj5bJKxNs4KfB/Midqett2NzPS9nqVvr7Iluy1qwrlgrSDNtxNJiyY/Th5VF8rJ4reJKJnJQlFedJehRrUt2Yp2NOjMXx1p+vMMvOOtXmD4WNbO7NGQOLnWJn1j0naRgJdhRZtlRAA1IvzXWZbBV7t9lcyA1vw6P3d9oGIOnL/7884ld5vbGnn608Zo/ap7XwEMXN3/hHpoIuj5kBPCNoDyNifr7RHl1p4w2zZh0c1PuupqjecbsXOVyN6LmEXXX0bVikI5AjITYzJXfoLCIoYlpwZd1nuTEzEkZzldr4pzswI2tasTN2Y086wff7txQpE1HMRKsAqOwa4dc559TO+1lxnY3D7OYldbEp0hoUQUnH3Qf6euhj2JOEH91oaHEoNz/FVujYE/bsb8Q8EoCDjgbzIcpf+cKl7Pt/T0eEYxBAgoaAqQRFpRYFRmvTsDwg2I8gGOVwUkgzrFhK05yCU/E6M4OFpbUtgBwBwAhKBgAgkshkcrleqdbqzY0mKxto1cdRXDZ3rrkrcLPXX3Ma3oMhGMlg9Ul16mMCPaxCAXJCS0t93v8wfp8dxjpwzDaifyUCpFuBqWgA8+mmrBmAvQkAwLaw5zwTQ5I83rZTWjJ8/3cisHwkpw+bAQDQIeE82GPbPV8FSFAC2K4FoBBMlbX7yj0O18RABQ6PFlQMnTgEdkXJBkADCUD7fwpgokzg3LsA7p8RypGArRHIhAd0vOlc+3SQ6YVNz+38PBJNPqum+vu/vP/4UsAKndIMxAfWGlMgZfgEcW0v73FjDFkchyhCfDRNuIj4l/n/cYT78BgkPnP3wUH4UkermrW1zE8+7Ks7Mo209bGND2YVNwFb+vN+I+nulU+LBuoqfLeerP91v8/exr++LQbMwnbTCXjfJ4Ds2g9SLmuZ7Yi51FV4SQjx9L1SPDirXR5Wuz26fevUkF+o4Sh6jW+LNgNJNiTgMJxs0g9iD2C1FKpYriacKhSqVKpKpWq1mvEq0ai5VhS4bWSvWzO1JVUiG1s9CYaNTO9AKnNUeyfVu1K5azVwVkMXNXWjGrfbn7nzuNITwr10laQgHam4fuYrvqoX7EXrQCM1onklLbWiYymWrBRJVoq++sbf/A2FACj/CG37tCIdpKeWvcMkf16rOnvoiBSpjxsWiHkFkrOMZFOh7yW7afTtZC+EKwkl0bAPOOMbDqWcdueMlil039yckSv1Zgr49Xg6wODv+p/tdgfDiUf5t80qgUv3c5rt4caj+PvjcIV7+whAOMAjPAEigsQZI1KU0WWZaU2IygYDRBoRcx0YYmT1J+/YKJZ3nXXOeRdcdMllV1x1w03XXI+C6s9zzUGHDBh02BFHHXPcKaedcFIIjrPNAPCgHtEppXnq48LOSE+eqdzgiFYw+gEIiHGId10FIBxSpnvg2qCj5vTsH/wmFo97eY3BGA2kCWWVk1JrjjNRuGkHRewOUNP4n2wZUhs4FQJjhFAxVghTwBAV8290tPhELcmO3AU0nfAck1REv8aHpoR3WV0vFNyVDQLYkB1GHSYODcDlIu+Ddy4AY/wpD8Cye78DGuTzjPF60wH/ZRISgN0HAB3rEgAJOBAAMHJ2AP0EjsLLwcmqCH34sT4fGYJeAgoaeUq063TUVQ+98R3Wlqdner8PTpOwiA6WsIYehiji5ODpumm1wyKQIUtO39TR1cu7H/+hiZt3ncSkO+m/afN+yR1E/DzxcogvJ76MeHv+p+6vaXh4UaHI6h890a/91JOERLqiBTAB2BgBcJPmDX8APnxQ/6e2kK3abYp5bI0FJk1ZBmw0pNa8eg2g197absO0RTM0p5lmLZijhwLbUO645a50K1xrvh2GDVvniXin0C6LYdWYwLYfR/CeemaHTCZmFlY2dk8NksUlWw43rxFaValWo1ades/MNdIojZo0G62F03ALderRpVuvDIiyvTcDZiBAgAigDtSBragLdWEX6kE9yEN9qA+30QAawGM0hIawBo2gEexEY2gCZ9AUmkIGmkEzeI7m0Bw2ogW0gKNoCS1hA1pBK0hBa+gCa9EVusEmdIfu8AY9oAe8Rk/oCW/RC3rBdvRGb+SiDyYgBjARE5GASZiENEzGZCRiCqbgPaZiKk5jGqbhM6ZjNWrAGqxBEtZiLVKxDuuQjPVYjw84hVNEkTiN09iGMziD+ziLs7iDBCTgFhKRiLtIQhLSkYxkZCIFKfiOVKQiG2lIw1WkIx2HkYEMfEQmMnEMWchCDrKRjR/IQQ5uIhe5eIc85KMQBSjAFxSiEJ9QhCJkoRjFOIkSlOAXSlGKApShDNdQjnIcQQWq8ATVqMYOnMMFdISLuIiRuITrGIgbuIGXuImbGIRbuIchuI/7GIUHeID5eIgPGI2P+Ihl+ITvmIMf+IFX+ImfmItf+IcF+I//WI444lhKJCISYTaRm8iNwUQeIg/mETtD7Ay+ErtP7D5+EntA7AG+EdoQ2uE4oRuhB3YTehF6YT+hH2EA9hIGEQbhAKGKUIVThNEIo2ELYR7CfDhIWElYiQeEkwkno5jwNOFp3KNuK+q2wgvqdqBuBzwUo6CkYYz2kjeJaB0T7WLybjHR7lzR7nrR7n3RNcDEByFy69tHsGvfRd+n1cBw9CUSf+RLXYBNTDDahKySCyFkYg3Y+EpdgE9MYvaM1f1pmVAnet1cJzGukzkHhtNetlnofU1kPG8bWd37pngpmX9rFkReXUBISAEbMQ3hli1ttZL/d6RDJ26gA+mZW6e9KZ8fmrw0JFMpjPj17Cf0ekYRGaJhKrU48tzk9UJ4PGx8aUUIGDu8sbTcldw/Vwa5suLz/aSpR5B/QcKx+W0VIGuSglilZ6wSTXytNybCNB1eMcfIPP0SFdaFyIkbBtMoNJWiOgDIviuA8gCuG4CfAS3HAHQeAoRvAX4nYPQPQgGHRzg0wwXO1aR/C1MvfaIvlV18d7XHDC6yZnk147YLdxtfWa/H+kIMaCeuYlhJ6qUuKk3Ou0sLwm0TdHulFKEZLuofhm1c/MjWMF/ICc7yALvnsxqAEMfeXz4l8yAYw0CrjaaJ4X2e51GQL+LgRno7+li1uoxHF3VBcR+njupYm0dsqCoTF6Vn78qzsFM89mGYBZFJgnMkaKPUNhf3kyuT7eHp7clanZr+JlTrwUYbG9tBp1xz3c4teJoKQSnNKRWkPN/QzRpRfE9NnveM04P1UtRSrYNmH/JFq6NRG93+s5wPcj2vMeanY38nZtsENVopTVXF+eNStmXC6KSK45AZ6r1zzJQSuSKlUISVPGlwm5N8gibhCwgfpoOY2QR5yYWfGwAsgNdwxdOG0CIXMwchtvn0UY0QYA7WjOnAmsQz17vSADptnJi9tgwk2E0vIzdKXhN8EIhJscjJvKKwoA5dJvLQd6GEJyVW7TENakPD57mVtwl9vjtdylYY5OkF9dZaOk08ghaHuWn+ro3cgGPpGY+3WYbTF+MojJZEJQF1FATdRUxQS+HOXFNWv3pDenMQjQPPXxXzEfSv8q0P0WPsb6nj/Qwhr+RLc9wbo/EN6fZ15OETo4Epsnb6k+7liVX7K/BqZypRnCYfatUnzOG/mrBW+qkbIOOFqeJBWmG3gqUCmQkY9kE9szKspOTmNTehvs6HKPIYziUez7fpwLXQO3AivePJdc1JdQCtm2gcPoAxYbItElfIkXRPj1YUjqC8yo3CG2YEJ6b2vwFnEo3HdwbSeR8z4YwJQtvaSTlPmSJq4J9X2uxPG8pOQgVIt17Ogr53qTHlUY2CuIXTBLzk8C3zlpLJXuEI4inckLZlUjcbduVDa/NgNtbWIQ1IRm04CX8WmZzN/rKOcyfg32n4ZogPYb3nxoBlOgi1Us6RgUz1GuXEqXMelmIMpEgoXzLj/nL2tabt6QzhQziE4lAawydYwJT7UEvToj9AJfDfhPOpz3Ll5K0WHeoGPMafR7qSEs2cDPcFhsUIAqQR4DXfHHTD8VAXKzyLBnkOaY05OrkqUIQxPf/CZz/FMy+1VY1/309h71NI4l3CazUu3Vd9MXxi9IMMEpTF5LZjp4+O/qMfXZfW3eR1INAc9PzrCS9NpKAZeOhllwmwQ5crlkChyjAV0/XwBJbSZhPraPR11uzzi1+owRyH55atj8PEsKXqL/Fi2sOsSjWp2kcEoKybAX7jz6c+SCuk83GHtxNvLHhJRSGr7nXJ/R4+8b7QOsyg6/F69flto8QyIEseFoev1d9XT4NbV9LPBVCIKAj9NUwdTq4fP946KF1Bo2sOXGfK74jZhPxMwiac5mRg34nzOrghn5Af1ed8lcp82/1MreRpVxpW64w8/UNJ7Iiw7DbqxHczMA1kwjlGXiHYC9pMGco7iob4Sn4lMJujWtsCc+OqtC4xNvsPbxhnlnyPbIVmYu4RJmZ90sTJ1VffRpE4oYg6JsDs2zqqB3uWqQRXJ/cv5XFbnCO7GmpLoOP5j8MVmD1Lc5/Ot/Q87IXnJBLirI+nIBmnih+KdZ6uZRmQCwDKepmM/zlSfu6RAr+FS/7ubcwfy/p+fvUz76JVfJb6sm19AQbUTP4d8uN3w6qoUsqMwlYexF8JePINmQZ6UExesQMjbfFSHFf02ojrZ+4hYwsfhjDrEd6IoKmFZDZkLMYo49dd+XBi9GPKmoQEWMu1YO/rjRsj9Y4oXLZWC+GScY74rOqT/zFcmcWTDx74NYV0Kn+BlpXBoV525CM9m2UOhdqJyLpCVbQu7wCyUIAWg2RMukKPEBUlJUFsN2u+xNNpioCUoV8JBv2rasy5+7HnUUwEQkcfiivr4+kwVx1ARkRMza6rtM78HAWHtkAyLP7UZzIfO90WCJBjPjJHk1U3QuKi4OfqoPiX2SbgzkdybGD8cI06UiY4KciDZCdWdmgBzOjvp22bKv3xvsnkzpOeONOZ9hkGyZpqiFRsCDxQ0xCc+dRnJf3hZuOo1PuwXdkPi/KRPV15bz+as4sJVUnlIhGY5/PBQXVQYezqKEwLbSWG7ddnAkPMKvnjoguRgmR1b09ZkniYt79NZVNEHLq1grrJP63VHHrlxL8qy4785cv13PooSyMDG3Pj6PElR3hlumx9vpUpMFW5LSVxbt+qxNtn5h141Or+Y88q+Sy6oP+Y89/rIAAlmVrKhVqm5HIBilEAE3VktZZgtJmMeFXxNCBieaELcTRJtbXQcoTYBpuk3O4HQtAyDASCZUQHjX4RumPO+61bzLlLvEDmyslrO/GpticCvJIAO8xMgQe/MsTrgkYlXbp9ip/OvriWBCWOqDhi/E6PduZG0ELsMni88XTQXmLPUPxNf4hf6Zejf9GftkGeA+E0sZcpOV4oEmosF0uk3Wu+7w88opljfRkI3iciGs3aLV0F68PAl2aWYV601SXO4beghfmuzpIrj9L1Ml5qs9D5lKHstVFpXrYMrCilyBfs34Yemhkr7BadBNYqijwPV/0ggKl1LIQTUwTKNILR7K820JXxdapx+FNEf786MjbqOz8jvbUtwurc8rl9lpoPqat/8oByaZOUZ39tRXwrQAUVt1+6iAv5wnZDRQXpxoS/cg23sshqMOCcrX/z/zqCZq32/EyFzK17IEVJ1odCLKSBMeV5C4j8K2LIdDWNpEsK+ThQ2YDskKBa0x6XvslV0wtbV7z1MRmfOuOiNCR5beFJopyfN6bIf8ele3gKnY3jJpSBMaGlMczxQwzzydLL/qU3sdgQKLgHr51/J5M0aJOgFPnROt4BU7l66K6HlmKhtWmzL4iUwar8rSdwskouytffieamkuuXCrCzViw7Bmfd8gZvwoxEv6/zI8Y9sjkKpGmzKIfVVerFRDJl1iNPthwO4k8yyT1l4NM2+h3JqlzEGIy15rygIA8cnEtqVXQTMqMKcYU0i606r1apxNhCSK4WtYLeUSt5gV3kFytkLJHUiZLdES5T1MUyNl0Q7MV3BGd65nhfaGRiDbGfMwga+3OlNQGkj0mdHKh5ldm0qtK5UPo6ksbwllvJ+8Ek7PQvet6Rx+YJkPe0iGgzCcf9GzEvmwSbNAl1tnrQn+c7tl4Jdw0I+KITAb0nBQLw9FWBs99A/sTbRvipW7mwfraZqduEz/t4iBnSe5Gr3XxLCnjrwZrAwgx2pq2HeVXAr84Ss75M//VGHqrGvMvvSnkZrBzwegGfLwALBGB+P6wUmAZ4wc4Hc6OeGCh4q2cfOuRJxg7j4KT9NyBK3qIcFg2LCSn2e3yhn95XpArklGiLAqAv7cjgf0bxWypbt1b04U2re3ZqtLEFpy/dSTKu2NB3QPcdctCmHlFN3nfAuOFWESwGOHSsjWAxryYz9JFn3AS6/rTfsp4scxBQzdmGtqLNU3Ev8exAVX5IHgC9Px18yN3CA5m75MGSkQqYZYqkCiGrkp1CCAu1o1+ceo9t6vyr80ng8pWOJ40XRhDLTxdFSFLD/4inPPXzD0MLinARlas8+0Z11pvXdVLsTvghmisSZr1GHLI6DUKaWSvIR6V2jC31WhVihUPgrIxu8hWqdMr8tEBTIAdUVfMdfqgz28XtcAZqU9SZecKrp1drKZDATpcZ8KVKFb5MYrQxBEIz8/3NhlKRIq9r04q1vWuzWO1ObyPP5tXbm5tmdXQ0TWm2y/K62k2sKx/NH1+LzM6DU+zXT6ZPTH0+qG3gO/3czmwXr8MVGJGipc577Pj19iwtTSx20VMNxP/SW27MZshof4q2zTHeTXpbDTmbdp5EUaK3yysTyax00LsYN8FxipqD/iaaTKKxn/VehZJsvQYSV99bKzzeUnTp48ulhSe9o6ZeqyzHSWZpUdFRuBtp2I3YnfSm57jCyq6sWl54LRchWTLOk+pxn0Zww5JYbeGuE9P/crVpBoISVif1uLr8r9aYpiNEYVXSQL4dVFUJrEF2p8vFHm/1V/FVEpJi87PHZ6U7bUH9QCgtPBNDbMKVqZS4UrHeROfBoEd8iylVKdawfjLI8roGUvMSv9CscinV/i6IljcgLGV+t8Zk9sv9RXp72o3vqxlXoTbUDjtMX9MPQ3wH8vy56/6ATBWgjLYcxb9PmcktmX6fPK3eA8mRiJvfn1X2vK7LYu+wixSjxOnK18f/SChamzQmOXeEM0el0eZInCMK4kBjq6QPQy/iSbTmdCEm5FxzXSG79nNgNJ3xZoQNw88o59FOP7CQU1J1Yj6a/L87tYA1QEwz0VlE/aIcvPV32xnTI9MZm0QHiv14lY5W75rkmZKBci1oIVNa1qNcGVOyJ7lo9SqdDy+5cX1Bbk0pFlvOQWBw5VE1pb6rCzTdsxf4DRXTEqunIX5A1WzKq3DkVexF1f6EmJZQs8RQkT1jQUXZ7GmxllZEa7yltWT26Lgj7t2ZBcHeM3UvEQ9s/f7e4B7NkayC4l1nu78ihhbtL9qVf+rbqYW53gN/E4wIBIJi/MNzIHBq4TcNXxxEpZu6RbmoQrPxOKjMNuuy53lXx6Pm5FQQISvp5P/JpeyMXFdDPmSohfNc7GQt7Zpgxll3Mr8KBSU2skGqhfwFXqo75z4BC0Q7mRyBmSrUoPPSVJRShd5OTTUQIU2Smc+O0b6OpGaYtN8/g/YEUOihZGqoRZlKRpFa66UIhHrKsqW6JwRKeyPSAEHJ+pJ2CnVuuTzaRVb8BxY0TGnPoBYptHYaBA19hLsHPOcMNfCLNjbaQN11/duL6UQClJ2sh4LG0nYkKPJSVRpaYWYmvShT66RCKRYSiPFIYyLQFoTTn2eibdpw+m8n4QIDYVM5FNg//DNySnp0Ki3CaTf4rKiZYcRteEORs1pf3eP/5yHfGq+9SiYFwhDY6QQSgYdoZup47Kt4HYqRxLy1RV0BlJNYRz5pUIxhwfAL2CSUUlzk88qVVPzXPGzarmTX5U84+SEu95Ac9+myizyGNrHdk0jFCL2xIR8WtSWzbPwZ7nPuEyHlZHuKxGkqLpK6OcZBIXOpoSTofux+zLKiY1DoaDQ6Go2KkZHuoVJPxUxuiaE38glugi0jANPTWn2o8y7DfS7nd/iV7xO7Txyg5s17llc7sXIzKBgjr3I3JD2MRISXbm/2cN2PDWFG+HtbCi/t0Ytch5hvt6QspKTqbCV7n8Ca1esZUh2qWubHgvxsilJDL8pUhphSezP5IdL6lZlI04OnJ6VrEvUQlGjQppOgahMNB1+vSZflXNprqIaDWSyuwEwVaNB5ChWlBICTzwKi6ggYmHfT6FC8fNWk39TfQdQ57dJ2eOCSjQkB4QP8Eh3Sodt7JRQBZueqP60C8HkmGUSojTPvlcSRdKJ5adZroLSQwZv+UUfiCT+TEzP+nJC6Qj5xNYlQtukWK8VB6i+fHCCnbMEnoG+MMK80du2kEsvP6pDW5yDLHPkLPcaisiMiZxhnTtFPieUDUMzdfV/lz10PwSzPIA/qyy6Dcme+QLPDUTaBZ2DHX778xB6kuRvRnWzuyU/M2faXZv30v7xmdFOMthTRGKebZENn29GT47WNiMYYbZMJnTXprwojYnKsfiqiJc6wSA/Pj133ddGTXE1dz9iTiJ5gT3qdf9HbtR9yTaNOr76FONXerx1lzb2WsG9fnKZRGgnZXvaceOwri96JNm1EbEg0r/dHZ/fHnPw62GYm8b6P/lPvWM7YCL2RMiaztc+BkNa5fg8sWG1CXnlqfvqabG45eqXsyPgssMakUbtvGvNeUlpUdb01ZpUy+4Yh+JnSZJH49Xz3C+Pd30ktDIcyP+a4TfOPoDDe9Z3YiupotLfwcrxQe9ba3t6V62f5bnbrhRHCMW1TOjrbZvrIGm4WnJxAX2az1v3l66n3YKlgaW5el2fEaHlu4iuqRS6lOt4FUfLRy+43+N1qs9kj8zUozoEq+ULLQ8aVeols81REcjFb3p7i8jdXs+VivoH0AuWV4ikZofPGBUi00Rvu/W0niJU58r34Cj5KS3Ux4U1LQMzybHXrCH/78F97uitNYnGvJuKGGbGgaxfokGkW4L1y1Jcxzehz+MzbQ3458nsNwvTDb4eY00i27GTDgqfrPpYk6Tn6zlvwqmnhIC+LkiF8B4tWlKF1UXhphcrVB4IaOd9I+ozeFlIrc6phGXKuS1GoPpA2doyeiBVyKS3CEm+W6P3/SULs+6FPdqUrTWJ1104GIT3mceRSOgFaTyacHmAfvUSj7gQPk5kTIXm6I+uDG15UOaqIdWa5gcjcxz2ARV89wNl7mk32Lf+xNhqEnKRBml8uIbnrbCQe00haOCTP9821jO3UpB71SIzcT/MsYydqB+29YzKJZWmmLGaKnopPjLfwYsFoUEugUlIzICqzoA5VmHNpqfsE4P5op7OFFqqI4I8wTYuLBPZVTOjoudjjluGvi8ZhCzjQz1AxD2j0JqHqoiwvOVmv17cncoZh5lT91Fh+PDEtFGlftR097SVGFz72/Mvdc7HnZs6elW6Olq0Rxv/74UOMwwMrg18MJonjo0O3b/oxCZqrCVln4LEJ76LpYSQSvvgJyLdgXXgdm2JUPp6YhnTQKYG2MVSl3pxOXrrO0vPeX/j4XkrxU2eiJrW1HPmwN1nPsdPH5RSwK2JwkUNF8x8lIQkXKIyfJB+ov5z7GCxjcF8ycZmCR3P/B0vrt23cWN9bWhK0eMG6rViLv0LWJFSYTAnlsZfx+KHYhHLTXEOnzGVYdKdFtl+DXK9On2rQs0Vm88hmexnLzUrNMCrZet7R1+Nypercyiofn5lnKCmqmb2wGXPQrH0BMl+nLXi39LKDr0i3quT4rHfOBIVaYkjYicMIfpCwdbRLe3VBngIyJrMHWOlqqwRxBhs0UiCyfr4d7RHuIsnjlxfMDegCu5LfnTuIDTFhEP2r7+Q+c38psua/bR10cNMIdJ+PorNaVZQ/TgwkidXm/+IH64cIHDMqm2iTSmmufgfeCYICFyPdQWlylrCm671+plBqY/bdkNcRKQMPcVWA/ZYLYnnKSstTCDaZnsDUoJtvhZDVCnMmycx8EF5oKtHvhueilkLTdR0pWpV5tSTNbJrCmTlsBbwg9SPId2BGfGeSDFEb6hFsmUnCGPikRbEgLfrlfAYmZsab5wh752MKmT5UaMcA1V8ENKFCI+MvFxDGLrDEyNIMSjFp4O1zQq7YwMPZNQmlz2VkE+tleKG5WImacgtOUivMBK3EwVhzU15PpA7wNbhqwHHbxWV7SterMAyrCbIZ6Q6ydT/7+vEghoojUDFoKgFHReu2qLu/hZPD0Tw8ToUh6/laYzG6+XRUG6zQAyuMasNdlxqZxVftr5qKjgybi2lJrYIjnFLrT/tjbw3HD8exI4SUHXXP//ampDryvHZKq2AEDC2mXMcMwxGGYfqPQndOevQuYzV9O/QVjsCy/xNR1tZ9eu8VQKmia0BlO+J+DMkPnvB6ooNjthFjeJmygHJ0MAiRw5IiMdgHMb4dlUELlxya/BGNHUIRiLxMuT9mn3bGKhXWVLaubl0LBrXcBl9i/JnPHUH2ffVjdQz7V8V8BaErRcvno0a9wU1V+NeNKOSk1/jrkXozEQQ7Uh+kxtzeqSw3Dk2danygrGApitPuNzePOD4vGNwZGCwd57T8iT4EFi4v2Cto/dYqQBGXC1q+tQj2FKD+WJ6x3Ikj8/mYuHYGASPFEOjjYiURgVTny6PLF+A4W0QpoR6JaM//iP/3CiWhnhTRFg5OwCUuF2AdQviFPVfrFkkQJxFWwCMOXr7BSo0ISAiqeCkmRtYeh+HzyTjWjcuDRG7k+vi9VZmhBp3/8Wn6UxAdShnn59SL6RcV+Gvp18BZPePTCktYryrREfWiiLoq1KsSVmphg8mX1JJPab9qwv92+S8KddwuE+63q/+NPiQPMB7hZ6xms88S8NMum/Hn56bwvnvfhGOxKeUtTKGDcqalOZcCGbC4KUMmwv25KcLnvmv/YomNPZtQ41yPvI+2iuQFaJ2UkaPIoGVrxIFkKW8HozlK25oS8EvanQ5JW8DXytdqW/hBn6TN4ZS0B/0tKRqOopxjc3DrdFqE2OzlbIUCR0x+re5McpRzNIVMLXYYziLgEo0UHYYObRjDlEzFpupsGTqNQ07+3/3z/muGGyqWHk8jmHk8gpnWj7FYhh3G+XAFqaqtjk6ra68mkQN1dTR6Xa1//EUH+QF9mCqU706hGjmmKiyXGpeAwgcWrbJN4BflyacFKnQ9Tc3LVF7HWIHXz2+z6ehlCp2JQuVMM4bM/ZciMZiNbKxbpDNxeNjR90J6U5lnCS1e9IsKlEOUSa0yWqs5Bn2HvLQ8fV7Qymt0ORvYmpQcdGK2V9wOJl9ch19350YS8Q0MxAvbyvPQF7MVgYTFvvQCZL/sdOVpqH5ZIie9HFMUbOC72MWpS5qEmDoe6Kmr94OG7PqK8uw6gz6nrrwip14nZgqyppzAwWQFLf65m0Xvk/qg0D0q6XSylHHOqLndiWSOo2G6wt2lOg47w6RW4T0V5jc/pPL2gkIrVLIBSySRb1Kp7xUcSTURX0pGtsNL15zgYC5j5Rha0VA4mYXLKxJkPqjZQ+n5YnmbnpJes3npSBO8ASHKtvWDUgtUsgH6RqVdZbKH6CjK18cJA7fTNkQV3f4/AfuIIiQRc3Q04UnQmFNdmIrJuf0znbobXPNzTBJPaULYelMzVIY0vJr2xwlyA42Wz0A2VVs/CqdQ43w4F+S4dz41gZ7J8+C80UEq+yeQFK9MRTLOHEN0/U9CMaKOYGRmuTBhG0q7DBFfQkNbnPFhz7OlSdhCHFaFJiaAZ8h38MPCdytTi0XYlYa+oyjhrgzSeBqtPvoi9TjYpwGZ5c2TiX/jTEfZW7giq483wefldfadziOP1+lDUT7oy/orP8b0Ovx1kvfXXF6n18edAAbDk1YmI1cmJc1CJs/CCps/3EfhyaEJ+5KTDyRi9tH5Ij13xmS+mysrkldHxa2gYSsy6+YbsKxMuVF9SlyBEGGPsXp9jvAzeOxd2U+TSVWrOtjsztWVJFLl6k42u2NV1e8QXyOVP4SgN3Jpigb1Qx/mOFlFIKooxzGY4xQVkaAiH59HWUtjrKVQ1jJoa1HGqkG1JoxDo0Fhau2g3vhBTVhUqjcOarRh6TSaIkyjGdQbyhYRNH+NQKK7/yYTieS/u9HoPf4v1z3soeHlH3KhL8HPDmZKWrWAPfWOhfLLfJE+K6iNrMws/6Dj9O065OSkeHj0eTcs5F/ZBrnFnW+OrJwiF3PlRYrYdH6FVAcftj6aDGH1cXoMJLdB678xv1LlaRop+fmC9WmLCFCcjGT8FYEx519d1y3s1nYL5nWr/c6tj/C694gQvO6Rc6u3Rz1f0KPtEfasq1eBfAf5KDtvtobbEzEsMiI+PIP3YTanQKVxU0R8J68r1hpP1rlKo/3fRCFhsRFRiRH3hbejvY5iFaN8EQh6hI3zZXplYK6rQR0YvaLmlJVux3tkUmJOvpW+tE5avftdZbPo9MLgu3SR5LyZUlad+iNY25tW24u+W32yX9V/Uus3QaBAqu2qmMKOwVXrVq0tlZXrlLWrnJXrwfqp2xdQX9HyPlGoVyj0K2T1QRHlyuS+Xh1CzDec00QIvD5PMmUycfNGm+S7nbv7Vf1HtEeWlrv/7VRtV6Vl4JZPsp6Oh0WzUzgVIZqq2Yl1AcRnC3KTkQVx13ZqOWSHrh8JNndu9cl61XZYTEzmmhXL5v6Ijj+wvOnxvAnoxljh+bLKsLrix+hVgcY57tOVBa2jECHQ4ls1o/xXVgwvQOw9ZNbgcr+fT1D5w8qijzcbsE5JkYHy6GP7KLSdwrdGnR6WuFDQzJZp+XRw2eHaZo5MI5waVEQpwGIbcjA5eRCZ/FvaJpIrU7iqC8kDV+PQqXIrMnnbAeS2nXEIaw6wIUkvF+qVNAcK4vbS0QABD6DRAJ4AJA5AJRwA1IpTsjjpBRb7Ign5Cx7+778k+n8b8+cdZFLyD9w8GV7HKibxvBCq5ken1WASAqT4X3EQcIPMljggFkSW1h4NE3fx4AZZoOBHVfmfKdwgCxQ0TqOs1vjEroO4QWZLCIoUcHXgBlmgoFjF8lrwn/8pDFynpi3YIxJillJZyeJuJLhOzUDeVH7F/HfWolwvNXvyytEmIxVK6MY5rlPTFvAqFHDjwXVqBvK+tEKd9z35eAzTR+pEYVzqXdZm9OUIgHSTBnipXQf8fAGy6+yShbhBFpkbE8Cl3kDjDViZCTe7aoMsv91mXbysSG637hZzF2usYVJ66DHSDE+vijiKl/CyqZkshnEUL+FlvIJX8Rpexxt4E2/hbXnnFvO5yvkXu+YPe8ZoGGSMh2kEcKvlfFNxcJ9J/rCXs1ZM2dEcLAYAOjF23veg3Z+oF7a0l3V15pNp7FFvNAMBD4eoZh61A+AXF+D/Xuvfz7/xD8CPfzcADIHbF1+SfgDAgxvw5ZBNslXm9xpFusTi3Bp5AH1vdYNsKuXbFwDDGjiNLPz7xujKKftCUV8P8gBukreS7odWFm6QTYNQ+QQgxQWyaUW+fUffcV9Bdk8MtuqfI3t7PbR3YvtvpCwxO/Me4wVcmBhs0z9Ld+5LAM9qYoUVtWCPjM5cyCoAFWZxA/RFr8cab+GSbdM/5evqTnZ+BXOxTf/ECEabIGcptEdDS20cyC8E63eGjCI6iNOsSXZ2Jtg9FOlLfQ9QF+nGDqD/jnLceP40bp9PlzHpJ4eJJy3HL4lTL9rAqWUSPYDnu08gc7ycBNiZE80QoGVC1AWHEMiehBLf5OwaC4rytwtFYdAmUCNI0VYKro/A5yYC8SCBxmbR9rUGnOkQFvS15yaES1OKYpmuYvuMTAnkbiGirF1s1n7w2a1R0PyDRJpt+go+Ag2+HHSq47maW9zrLEqPTaxS5onhiwLWnfcnN01b+6eyajPY6zpqz8buNLu5WdJFNhZSR9/oX2LKwakBVr0JsCl9X2C0XNLWBwQAfsGznqLQPxIA+uxXL4eVQCfT0TcMY5sizIJ83zneMhuFnSElSEY3yJLtd2ksES7HYWTEbzKdvwUyPJ0gsHvKz9HEnaMc+ltxsIUvAHQVk6R6q8FGfp+xHsTN7A41egDQN5JKV9DBT0LbclRG3ALAxQ8MzW3nYWa696sjhZGn2YMTqlwdKmhIfgbrl+f/kTUy+7rVDv2dRKyJjdMfKWgHAelfUoEJAjUPLIDMw0nHBSNdKNKMvA/Ck7YpePeuNQS2oO3wtpR0W6eMFtjI7OzuRi54uUD9R2sQARPEt6scTovOBdVU/8vjASxHH2VwCNAnLKDU7XvGEVYULRI5IkkgkRIvFe4iCUVpt8buivwi3jVGUCeNAPhlf2a4fact72MivclBEAd0K/ieiC/Cl52Yrs98yNxA4SJhkAI+t7HkFnaoo2pzNXn2/mjMqZo7dSAGEAauCZX3DBS341UB5zlfHG1WT1IdS7SBsk6H/ieghlZyKgU+QHVwFQOU3ksqOABO+wBHGoXQMsS7yobfJRWAtN1AzANCZWYNOOaV2ch0GhJ3F7LFEwrAiGgbxogfDFiKSxgeoLAQexunyLNoEHdD1IlIWjlIb99jvWlIAsSIv0wASHZIuELINWTv1j3LakaVWcxygYx7qK2DMUooqdnXthmBwLIVIhcQrSQ36S/dlqoeFYn0CkB19Cp6J9dKAlfeOZ6VyoJ8leH7JHZvpUD0E9qGgW47WPEzgO5XSHQKQmtDsQDQ+8SlAtCmLMvwAU2BPPIPaaBajrp/ViuUoI/w8QOKYTpX8NvD/EsHIJdyNGfvzzvYe81C4gUIIxgaoN2mXBG9gB97FRlEpIqzYcGVFcgFuYjzVcic0NCV8BKZnWVSZnwEeYNNnzlcB67mHomxWulUSFJClBDCNFvuPxzM1CBfeKmiGjPNjIKHMfZBUSs2GAwypR1g7IC5srEskdnQMJRm1myNjVCN2p3DfABxjTqaQ59u1OOfMW3rwQIfoubBUXDVCCxzLrAFoKGeHGsLlKjHFCBhXVCjAETXGMVfWiDml//4db0zAMrd8yoi/KprYtWwxPk2ZPBxWYEJZECs5JSA+M5xBQnpRbXmSb0PrZwX6FKf/jk8785svh6FqJ6bYUKceE3uM0ylnwhcPyVg6e1CnIX7gOV0n03ATQrpBsxMj0wuxoy4dSdbEkaIdqVTBhEdIvSKfnZeG2M5sLSuPG6CK55WrPhkqEYqLuFxZ12SeT7ZbCcWTuHXmrRGEPtkN4lHGBF3gPXCUaJunGOQ8y7qNTlrIUHUv4CgnmfmWpu17jxUhwsOB1Elq3NVDo7Ply9buLTO3ma05lGDuX0ujV/ri6UpESL1+OgdpEcABsflrgkG0KkTc9wB22Om6PcVx4wCc3EYLbgJ0bxyYmH7yYoCruFt0alBOTUt2kt2xk/lF33zs+6g8V5HmcgfV0agCsFYjBjKZlrR/75PDgDGXsWlqcxCRe9isDFMd8vxxsmmVerrpzvUw1zTQsKFCq4cZZg7A9AfXAXBnUOjviPi96Ce5xDSnZooUeEgZMigoxQysoIT1c2bWDZhQj7cMKDEMkdUrhpjg1wv4FtFEDCsoISkctBREmlRTF9uuxUAFldcJN+g8h5L2XLk7+hljGsFXnJB883zGzR1TnSenK3AxPc7HzXgCTwJBFyxLC8v3Bnsur7DOj8rk7094587p/krFHpFwgrmR1vInAtoOQSCLubQvbxO+aJ36v1IBW148x86e+TJc/X8Gc+l5nqcjJ8z1jcnJ3udtgaP+sAT0zzdUv94Ue/IalFtKqDuM4CwxrYKsTi7pgqjeHM8nLp2x80lVQsJxDW1Dw33PoiywKWhFU6PDiffkmwl0vrTXXOU7BIE0UJLhGPy1yTXC04VE8j83RiQvVm2GRaD3TrTR9LUJQWfDaLGtvZxk6bhdOQeLmoHpiwZhGrWRtndM1FUTJ+SgeNPnTZRI954Ku09ZYPMnrbUfUfLziXZibULwB2WzGED59jFlfkwTmwxfd8zAD7LmHOud2JqJ+2mkLB6U3sYJ6AR48AEx4ARLSnJIMakfDr8P/pKBwXPOhCRn5eogBLg/1GFwdn7lv/iBjtkwPo1cBeRu7p4eAVvmgYWRj2SFBm2bjQq4z5gYaZuqTZZuKO6+ClA/g93U+LEsxWg5BgHXPxyXo0/k4xz0Q09KwDn5WZF377z9BcbPJ2NP+022CX2t1fkDPnuAT2Uyp6iW7uzcYke3k4eeOgio3af219kPbMJzzDgJdv41bzZQOHuEJo5/9SUGoun+uKFxrxUqkqc5Ffz1aV4bdPrK77tonrDLQrrMwRxFqhB64FkBlmG/Q8RGBJQZECBQw2KFmzSx5Sr8Myc0dnwui1r7yXkyR9pW4rJs79NtRTUQvqhqQcCbGB++xFzdGvJgmWycT1YFH2+JLV02cZ0ksioK8tzmvXtKeNRDokIcD6tty5niw4LJFPiTj2qHI/u0tqdsUyOIpugQr7aPC+8Ha7AtjAHh0bmI1uezwG7svCqLki3UKswj2Me3SjkUwAGAbv96BMB+GwAlBo0zjU9ezCjZJhDwdefM8chbjBiJlUhDpU+5DY7dhEiNJV86iZ2UJpCnHXPB8klqtwJZupopM8023FTIO4D2aMn/BO/2jYEhwDsNV+yZM/ehFWMYOeqY8XEBaBI4BmFMzoYDH8OBjIUBwgU4ChB4IHRzJqHKZNWciBnAotE5WJBJ5tnK72wA8MegKvs0JEyBYEUGm524gVSdEauYWoJQ6iimTLQCjVwpPNqYTBV8PJJAYvr+jFRLm6BoDi7kt0B2hb1tPRcjZxlYZwxVZNRqR2YVnoREPPZ9YjcSLP0Mg56t4wUZXX3BdnG9CXRFiUaukraR38h6nutLXMvf1m2t7yIxbDXUHRbTUlf6CCbXK1da6J+gUWin8+E4HV9JCgAZQap181IUKPkqcLmqaDOCGc9AoBfJS6W3L1lvjKAP10A/o/Vx4AAwGdXBZ+m97fpkXMOQAYHgMB/+V8HSN+eQfOfH4yLg6ycu0aucwCXAHcb5TUDaatjMI79ec1rH9Fcf8KnHyFVD/5Y43cDzOArcrbcgeKsQgIVDb7TBAoH3wqqMEr6dv9rws//pOXgzjkp5SLIWymhQ3QBfsk9/IvefxbJG27fnTJKf2mCGCvc8Ua+q0oPfNzQ6GzPTdHuU+gcEVvk9QSSFc/rCT+bCHtSnI4rJ9y+KGzy25voC1seKIjHRWptDijKXyg1y7PnSflJIN3yM8hb5P1rCBh+Y5BSZ9cTypjz1XDbngMCejc4FWR3U7UWsW7EGRm5+LG0FDnv8AK4gA5YSACMLWwGzjTbR7SICLJZs09RKPigbsHkQsezCbK5goLqbKGw1QWGGsekzOxsd7DdSYUD9EgZMpFtGyHsfBCtKbgBTCgCkqC6/kWi+JUCKfbK3dKzaX6FcPqUKL1Q4yJJTdrdN56RAg7YQLMh5+mAb4sxzfKK1gVZduM+dSic1XMxMhyRRuw4dzFIQWaj3HWV/JvbZtKgTQxLh11k8NByNhUgvllUyAS6Q634aQIT4S60QBl08NmAjcF+gDVCG5yDC1AJDeABF4yDEVADi2C9tPG8vADgMgAgUJ7rRHOqUARAFhw6CRwBaY4DRyED/Ago+sSgR32OIs0+z9JE9wUmx9EXcXLa+2LG53VlkZlFox+CBNqOSNgZNDqu1QhqGDWIgSN8zFcvk3lECvqfSotw7kj3VttcEk/IXwjWPCNr1SvNuVY5QyBIXo4RTEgQ1oxD0Jesf5KB06dPKCI/Ekq44AaPW97qAOPo+C4EFZzyxEISJcAACalOzuwQ8aNUeII/LU3MxoRGENSIYbOWrVlxaJatL9iiyRAWkqZZ61JcGHEihoXFrYmNI+gVlaJch8NJcTzB0vA0MWWCKx2MRemerSJsBpHLx/GRNGHUGGrChtuDhA0gceMxyEUIN3pBHtrvM43g0IY9GADY884uhN61D7VOXbr16NWn34BBQwjhKUZVwvhXP2nKtBmz5lCTkoFxaJ07d3Ce1h5ePn4BQbny5CtQaLvzztqhTLkuFS6qdM4FV4e+sqsW/yraqdpCu9xxy201XnjmlTq16g3XYIQNRmo0SpNmrVqMNsZdY7VrM06H8TbabZYJOk00yUuHPPc6IfIKisjQJkkyJBQ0DCwcvARxDCDaBsFDqLpFEfGC/fb5ySmnHXHUFlvFJ4xn7HXSDDogeRbTKFJimGJqT8VwqZnmmI0mTCmllVFWOeVVUFEllZWgYUpUkpKFFEpoYYQVTngRRBRJZFFEFU10McQUi7XWhS0OUxwWziWWBWKqwXDFI4I/+cC/xPKztxxARkKxmFapTeGjN889P5vvviG/JkUCCSWSWBJJJZNcqUqTQunKUKaUUkntq/BfrAaFZ2k8K+cxwqCS5/TLQyh81pC1SWlpsnJPIxTS3OpHufcx3lzkj16ELs28SY3RGZmSGTmpG+lN9wQUDNQzsM1QbSLm8rfaFP3RZ9TRKZzR7eWPta5VPIlSMm4ReiiVRENYBSMnYdzByXNejKkip3cjoHsCCgbqKeiGgoKAbhioZ6CgoBsdrLYOLAyDFgx0k8xObjcr+zY1M0cbTO72TiPi61vV3MAinZz6jt0Xqedk9w2+2OFit9OXSv2qfD3wWj4PPMuXgRf7Btvch/XN9rM04Fnl/JCRDryVFEaNO4Ue6/MUHssOOr7Z+1AI3of2+N/r3+8NcmjIITf+PNDk1X5VYpljFvDCaJvgTfRYPqIIsb8b4qXG/ufztau3DOiSvWc+AAA=\") format('woff2');\n unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2212, U+2215;\n }\n</style>\n<script>\n (function () {\n // FontFaceObserver https://github.com/bramstein/fontfaceobserver\n (function () { function e(e, t) { document.addEventListener ? e.addEventListener(\"scroll\", t, !1) : e.attachEvent(\"scroll\", t) } function t(e) { document.body ? e() : document.addEventListener ? document.addEventListener(\"DOMContentLoaded\", function t() { document.removeEventListener(\"DOMContentLoaded\", t), e() }) : document.attachEvent(\"onreadystatechange\", function n() { if (\"interactive\" == document.readyState || \"complete\" == document.readyState) document.detachEvent(\"onreadystatechange\", n), e() }) } function n(e) { this.a = document.createElement(\"div\"), this.a.setAttribute(\"aria-hidden\", \"true\"), this.a.appendChild(document.createTextNode(e)), this.b = document.createElement(\"span\"), this.c = document.createElement(\"span\"), this.h = document.createElement(\"span\"), this.f = document.createElement(\"span\"), this.g = -1, this.b.style.cssText = \"max-width:none;display:inline-block;position:absolute;height:100%;width:100%;overflow:scroll;font-size:16px;\", this.c.style.cssText = \"max-width:none;display:inline-block;position:absolute;height:100%;width:100%;overflow:scroll;font-size:16px;\", this.f.style.cssText = \"max-width:none;display:inline-block;position:absolute;height:100%;width:100%;overflow:scroll;font-size:16px;\", this.h.style.cssText = \"display:inline-block;width:200%;height:200%;font-size:16px;max-width:none;\", this.b.appendChild(this.h), this.c.appendChild(this.f), this.a.appendChild(this.b), this.a.appendChild(this.c) } function r(e, t) { e.a.style.cssText = \"max-width:none;min-width:20px;min-height:20px;display:inline-block;overflow:hidden;position:absolute;width:auto;margin:0;padding:0;top:-999px;left:-999px;white-space:nowrap;font:\" + t + \";\" } function i(e) { var t = e.a.offsetWidth, n = t + 100; return e.f.style.width = n + \"px\", e.c.scrollLeft = n, e.b.scrollLeft = e.b.scrollWidth + 100, e.g !== t ? (e.g = t, !0) : !1 } function s(t, n) { function r() { var e = s; i(e) && null !== e.a.parentNode && n(e.g) } var s = t; e(t.b, r), e(t.c, r), i(t) } function o(e, t) { var n = t || {}; this.family = e, this.style = n.style || \"normal\", this.weight = n.weight || \"normal\", this.stretch = n.stretch || \"normal\" } function l() { if (null === a) { var e = document.createElement(\"div\"); try { e.style.font = \"condensed 100px sans-serif\" } catch (t) { } a = \"\" !== e.style.font } return a } function c(e, t) { return [e.style, e.weight, l() ? e.stretch : \"\", \"100px\", t].join(\" \") } var u = null, a = null, f = null; o.prototype.load = function (e, i) { var o = this, a = e || \"BESbswy\", l = i || 3e3, h = (new Date).getTime(); return new Promise(function (e, i) { null === f && (f = !!window.FontFace); if (f) { var p = new Promise(function (e, t) { function n() { (new Date).getTime() - h >= l ? t() : document.fonts.load(c(o, o.family), a).then(function (t) { 1 <= t.length ? e() : setTimeout(n, 25) }, function () { t() }) } n() }), d = new Promise(function (e, t) { setTimeout(t, l) }); Promise.race([d, p]).then(function () { e(o) }, function () { i(o) }) } else t(function () { function t() { var t; if (t = -1 != m && -1 != g || -1 != m && -1 != S || -1 != g && -1 != S) (t = m != g && m != S && g != S) || (null === u && (t = /AppleWebKit\\/([0-9]+)(?:\\.([0-9]+))/.exec(window.navigator.userAgent), u = !!t && (536 > parseInt(t[1], 10) || 536 === parseInt(t[1], 10) && 11 >= parseInt(t[2], 10))), t = u && (m == x && g == x && S == x || m == T && g == T && S == T || m == N && g == N && S == N)), t = !t; t && (null !== C.parentNode && C.parentNode.removeChild(C), clearTimeout(L), e(o)) } function f() { if ((new Date).getTime() - h >= l) null !== C.parentNode && C.parentNode.removeChild(C), i(o); else { var e = document.hidden; if (!0 === e || void 0 === e) m = p.a.offsetWidth, g = d.a.offsetWidth, S = v.a.offsetWidth, t(); L = setTimeout(f, 50) } } var p = new n(a), d = new n(a), v = new n(a), m = -1, g = -1, S = -1, x = -1, T = -1, N = -1, C = document.createElement(\"div\"), L = 0; C.dir = \"ltr\", r(p, c(o, \"sans-serif\")), r(d, c(o, \"serif\")), r(v, c(o, \"monospace\")), C.appendChild(p.a), C.appendChild(d.a), C.appendChild(v.a), document.body.appendChild(C), x = p.a.offsetWidth, T = d.a.offsetWidth, N = v.a.offsetWidth, f(), s(p, function (e) { m = e, t() }), r(p, c(o, '\"' + o.family + '\",sans-serif')), s(d, function (e) { g = e, t() }), r(d, c(o, '\"' + o.family + '\",serif')), s(v, function (e) { S = e, t() }), r(v, c(o, '\"' + o.family + '\",monospace')) }) }) }, \"undefined\" != typeof module ? module.exports = o : (window.FontFaceObserver = o, window.FontFaceObserver.prototype.load = o.prototype.load) })();\n var fontASubset = new FontFaceObserver('Source Sans Pro');\n Promise.all([fontASubset.load()]).then(function () {});\n })();\n</script>\n<meta content=\"2f37cb8a-ecf8-49e3-9e7c-2b383f0639e0\" name=\"correlationId\">\n<script src=\"https://secureimages.allrecipes.com/assets/deployables/v-1.185.0.5222/segment-analytics.bundled.js\"></script>\n<script type=\"text/javascript\">\r\n window.segment = {};\r\n var segmentShim = new SegmentShim(dataLayer, window.segment);\r\n window.segmentAnalytics.page(window.segment);\r\n </script>\n<meta content=\"profile email\" name=\"google-signin-scope\"/>\n<meta content=\"363274647518-g085sh00jkmgpbfphoj1rlim2btbn07s.apps.googleusercontent.com\" name=\"google-signin-client_id\"/>\n<!-- Using api:client.js for custom styling of Google sign-in button. https://developers.google.com/identity/sign-in/web/build-button -->\n<script src=\"https://apis.google.com/js/api:client.js\"></script>\n</meta></head>\n<!-- ARLOG SERVER: RD0003FFB37290 LOCAL_IP: 10.255.21.146 MERCH_KEY: -->\n<body data-scoby-impression='{\"id\": \"\", \"eventType\": \"Allrecipes.SignIn.PageView\", \"eventCategory\": \"Page.View\", \"value\": {\"user\": {\"loginStatus\":\"no\",\"visitorType\":\"anonymous\"}}}' ng-app=\"allrecipes\">\n<script>\r\n var Pubsub = function () {\r\n \"use strict\";\r\n var cache = {};\r\n var instance = this;\r\n this.isListening = function (topicName, subscriberName) {\r\n var ret = false;\r\n if (cache[topicName]) {\r\n ret = cache[topicName].filter(function (item) {\r\n return item.name == subscriberName;\r\n }).length > 0;\r\n }\r\n return ret;\r\n };\r\n\r\n this.listen = function (topicName, subscriberName, subscribingFunction) {\r\n if (cache[topicName] == undefined) {\r\n cache[topicName] = [];\r\n }\r\n\r\n cache[topicName].push({ name: subscriberName, func: subscribingFunction });\r\n console.log(\"pub sub is listening to \" + topicName + \" for \" + subscriberName);\r\n };\r\n\r\n this.broadcast = function (topicName, args) {\r\n\r\n if (!cache[topicName] || cache[topicName].length < 1) {\r\n return;\r\n }\r\n var i = 0;\r\n do {\r\n console.log(\"listening function \" + cache[topicName][i].name + \" firing for broadcast \" + topicName);\r\n cache[topicName][i].func.apply(null, args || []);\r\n i++;\r\n } while (i < cache[topicName].length);\r\n }\r\n };\r\n </script>\n<a id=\"top\"></a>\n<a class=\"skip-to-content\" href=\"#main-content\">Skip to main content</a>\n<a class=\"newThisMonth\" href=\"/new-this-month/\" rel=\"nofollow\">New<> this month</a>\n<!-- Begin comScore Tag - Part 2 -->\n<noscript>\n<img src=\"https://sb.scorecardresearch.com/p?c1=2&c2=6036305&cv=2.0&cj=1&cs_ucfr=1\"/>\n</noscript>\n<!-- End comScore Tag - Part 2 -->\n<div class=\"slider-container\" global-ui-events=\"\">\n<div class=\"leaderboard-wrapper\" data-ad-container-autocollapse=\"\" id=\"docking-leaderboard-container\">\n<div class=\"docking-leaderboard-container\">\n<div class=\"docking-leaderboard\" data-tier=\"1\" id=\"div-gpt-leaderboard-flex-1\"></div>\n</div>\n</div>\n<div class=\"site-content\">\n<header class=\"header new-nav\">\n<div class=\"branch-journeys-top\"></div>\n<section class=\"magazine-bar\">\n<ul class=\"magazine-bar__social\">\n<li>Follow us on:</li>\n<li><a aria-label=\"Pinterest\" class=\"pinterest\" data-header-link-tracking='{\"label\": \"Social > Pinterest\"}' href=\"http://pinterest.com/allrecipes/\" target=\"_blank\" title=\"Pinterest\"><span class=\"svg-icon--social--pinterest svg-icon--social--pinterest-dims\"></span></a></li>\n<li><a aria-label=\"Facebook\" class=\"facebook\" data-header-link-tracking='{\"label\": \"Social > Facebook\"}' href=\"https://www.facebook.com/allrecipes\" target=\"_blank\" title=\"Facebook\"><span class=\"svg-icon--social--facebook svg-icon--social--facebook-dims\"></span></a></li>\n<li><a aria-label=\"Instagram\" class=\"instagram\" data-header-link-tracking='{\"label\": \"Social > Instagram\"}' href=\"http://instagram.com/allrecipes\" target=\"_blank\" title=\"Instagram\"><span class=\"svg-icon--social--instagram svg-icon--social--instagram-dims\"></span></a></li>\n<li><a aria-label=\"Twitter\" class=\"twitter\" data-header-link-tracking='{\"label\": \"Social > Twitter\"}' href=\"https://twitter.com/Allrecipes\" target=\"_blank\" title=\"Twitter\"><span class=\"svg-icon--social--twitter svg-icon--social--twitter-dims\"></span></a></li>\n</ul>\n<a class=\"magazine-bar__link\" data-header-link-tracking='{\"label\": \"Magazine\"}' href=\"http://armagazine.com/upper-nav\" target=\"_blank\">Get the Allrecipes magazine</a>\n</section>\n<section ng-controller=\"ar_controllers_top_nav\" ng-init=\"init()\">\n<ul class=\"ar-nav-section\">\n<li class=\"ar-logo-tab\">\n<a aria-label=\"Allrecipes home page\" data-header-link-tracking='{\"label\": \"Brand Logo\"}' href=\"https://www.allrecipes.com\">\n<div class=\"ar-logo\" ng-click=\"setAnalyticsCookie('ARlogo')\">\n<img alt=\"Allrecipes\" height=\"27\" src=\"https://secureimages.allrecipes.com/ar-images/ARlogoNew.svg\" width=\"110\"/> </div>\n</a>\n</li>\n<li class=\"browse-recipes\">\n<a class=\"recipes-txt {active:topBrowseRecipePanel_showing}\" data-header-link-tracking='{\"label\": \"Browse\"}' href=\"\" id=\"navmenu_recipes\" popup-trigger=\"topBrowseRecipePanel\"><span>BROWSE</span><span class=\"icon--chevron-down\"></span></a>\n</li>\n<li class=\"search-tab\" ng-controller=\"ar_controllers_search\">\n<div class=\"nav-search\">\n<input id=\"searchText\" name=\"searchText\" ng-keypress=\"isEnterKey($event) && performSearch()\" ng-model=\"search.keywords\" placeholder=\"Find a recipe\" type=\"text\"/>\n<button aria-label=\"Search\" class=\"btn-basic--small search-button\" ng-click=\"performSearch()\">\n<span class=\"svg-icon--top-nav-bar--search-magnify svg-icon--top-nav-bar--search-magnify-dims\"></span>\n</button>\n<div ar-event-focus=\"click\" ar-event-focus-id=\"setFocus-keywordSearch\" class=\"ingredient-searchtxt\" id=\"ingredientSearch\" popup-trigger=\"topNavSearchMenu\">Ingredient Search</div>\n</div>\n</li>\n<li class=\"social-notification\" ng-class=\"{active: notifications_showing}\" popup-trigger=\"notifications\">\n<a class=\"socialNotification\" href=\"\" ng-click=\"setNotificationsViewed()\" ng-cloak=\"\" ng-controller=\"ar_controllers_notifications\" title=\"Notifications\">\n<span aria-label=\"notifications\" class=\"svg-icon--top-nav-bar--nav-bell svg-icon--top-nav-bar--nav-bell-dims\"></span>\n<span class=\"notification-count\" ng-bind=\"notificationCount\" ng-show=\"displayCount\"></span>\n</a>\n</li>\n<li class=\"nav-favorites\" ng-click=\"setAnalyticsCookie('favorites')\">\n<a aria-label=\"My Favorites\" data-header-link-tracking='{\"label\": \"Favorites\"}' href=\"https://www.allrecipes.com/cook/my/favorites/\" title=\"My Favorites\">\n<span class=\"svg-icon--top-nav-bar--grey-heart svg-icon--top-nav-bar--grey-heart-dims\"></span>\n</a>\n</li>\n<li class=\"nav-profile anonymous-user\">\n<a data-header-link-tracking='{\"label\": \"Create A Profile\"}' href=\"https://www.allrecipes.com/account/authenticationwelcome/\">\n<div class=\"login-state\">\n<div class=\"img-profile svg-icon--top-nav-bar--userhead svg-icon--top-nav-bar--userhead-dims\" ng-click=\"setAnalyticsCookie('profile|profile')\"></div>\n<span class=\"username icon-user--default\" id=\"offCanvasDisplayName\" ng-click=\"setAnalyticsCookie('create profile|sign up')\">Create a profile</span>\n</div>\n</a>\n</li>\n<li class=\"small-screen search-phone--landscape\" ng-class=\"{active:topNavSearchMenu_showing}\">\n<a ar-event-focus=\"click\" ar-event-focus-id=\"setFocus-keywordSearch\" href=\"\" popup-trigger=\"topNavSearchMenu\">\n<div class=\"nav-search\">\n<span class=\"svg-icon--top-nav-bar--search-magnify-gray svg-icon--top-nav-bar--search-magnify-gray-dims\"></span>\n</div>\n</a>\n</li>\n<li class=\"small-screen profile-phone--landscape\" ng-class=\"{active:topNavProfileMenu_showing}\" popup-trigger=\"topNavProfileMenu\">\n<a aria-label=\"Open Profile\" data-link-tracking='{\"label\": \"Open Profile\"}' href=\"\">\n<div class=\"login-state\">\n<div class=\"img-profile svg-icon--top-nav-bar--userhead svg-icon--top-nav-bar--userhead-dims\"></div>\n</div>\n</a>\n</li>\n<li class=\"hamburger-tab\" ng-class=\"{active: topNavHamburgerMenu_showing}\" popup-trigger=\"topNavHamburgerMenu\">\n<a aria-label=\"secondary\" data-link-tracking='{\"label\": \"Open Hamburger Menu\"}' href=\"\" ng-click=\"trackHamburgerMenuToggle(this)\" ng-switch=\"\" role=\"navigation\" title=\"More menu\">\n<div class=\"hamburger-nav\">\n<span class=\"browse-recipes-iconbar\"></span>\n<span class=\"browse-recipes-iconbar\"></span>\n<span class=\"browse-recipes-iconbar\"></span>\n</div>\n</a>\n</li>\n</ul>\n<social-notification ng-cloak=\"\" popup-panel=\"notifications\"></social-notification>\n<div class=\"nav-tab nav-tab__search ng-hide\" ng-cloak=\"\" popup-panel=\"topNavSearchMenu\">\n<form>\n<div data-ng-controller=\"ar_controllers_search\">\n<span class=\"icon--close\" hidewhenclicked=\"\" title=\"Close Ingredient Search\"></span>\n<div class=\"input-wrap--home\">\n<span class=\"svg-icon--top-nav-bar--search-magnify-gray svg-icon--top-nav-bar--search-magnify-gray-dims\"></span>\n<input class=\"setFocus-keywordSearch\" id=\"searchText\" ng-model=\"search.keywords\" placeholder=\"Keywords\" type=\"text\"/>\n</div>\n<div class=\"input-wrap--home ingredients\">\n<div class=\"ingredient-clipping-frame\">\n<ul class=\"ingredient-scroller\" onselectstart=\"return false;\" unselectable=\"on\">\n<li ng-repeat=\"ingredient in search.ingredientsInclude\">\n<span>\n<span ng-bind=\"::ingredient\"></span>\n<span class=\"icon--x\" ng-click=\"removeIngredientInclude(ingredient, $event)\" unsubscribe-global-click-handler=\"\">✕</span>\n</span>\n</li>\n</ul>\n<div class=\"ingredient-add-exclude\">\n<input class=\"setFocus-includeIng\" id=\"includeIngText\" name=\"txtIncludeIng\" ng-attr-placeholder=\"{{includeIngPlaceholderText}}\" ng-keydown=\"(isBackspaceKey($event) && removeLastIngredientInclude($event)) || (isTabKey($event) && addIngredientInclude($event))\" ng-model=\"includeIngredient\" type=\"text\"/>\n</div>\n</div>\n<a ar-event-focus=\"click\" ar-event-focus-id=\"setFocus-includeIng\" class=\"btn-basic--small include\" ng-class=\"{ 'grayed-out': includeIngHitMax }\" ng-click=\"addIngredientInclude($event)\"><span>+</span></a>\n</div>\n<div class=\"input-wrap--home ingredients\">\n<div class=\"ingredient-clipping-frame\">\n<ul class=\"ingredient-scroller\" onselectstart=\"return false;\" unselectable=\"on\">\n<li ng-repeat=\"ingredient in search.ingredientsExclude\">\n<span class=\"exclude-item\">\n<span ng-bind=\"::ingredient\"></span>\n<span class=\"icon--x\" ng-click=\"removeIngredientExclude(ingredient, $event)\" unsubscribe-global-click-handler=\"\">✕</span>\n</span>\n</li>\n</ul>\n<div class=\"ingredient-add-exclude\">\n<input class=\"setFocus-excludeIng\" id=\"excludeIngText\" name=\"txtExcludeIng\" ng-attr-placeholder=\"{{excludeIngPlaceholderText}}\" ng-keydown=\"(isBackspaceKey($event) && removeLastIngredientExclude($event)) || (isTabKey($event) && addIngredientExclude($event))\" ng-model=\"excludeIngredient\" type=\"text\"/>\n</div>\n</div>\n<a ar-event-focus=\"click\" ar-event-focus-id=\"setFocus-excludeIng\" class=\"btn-basic--small exclude\" ng-class=\"{ 'grayed-out': excludeIngHitMax }\" ng-click=\"addIngredientExclude($event)\"><span>—</span></a>\n</div>\n<div class=\"nav-tab__buttons\">\n<button class=\"btn-basic--small btn-search\" ng-click=\"performSearch()\" ng-cloak=\"\">Go</button>\n</div>\n</div>\n<ar-notification></ar-notification>\n</form>\n</div>\n<div class=\"browse-recipe-tab social ng-hide\" id=\"topBrowseRecipePanel\" ng-cloak=\"\" popup-panel=\"topBrowseRecipePanel\">\n<section class=\"hero-link nav-tab__options recipe-nav-tab__options\">\n<div class=\"grid underline_hero_link\">\n<ul class=\"browse-hubs\">\n<li class=\"browse-hubs__categories\">\n<h3>\n Meal Type\n </h3><span class=\"icon--chevron-right\"></span>\n<ul class=\"browse-hubs__subcategories\">\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Appetizers & Snacks\"}' href=\"https://www.allrecipes.com/recipes/76/appetizers-and-snacks/\" ng-click=\"setAnalyticsCookie('browse|appetizers \\u0026 snacks')\" title=\"Appetizers & Snacks Recipes\">\n Appetizers & Snacks\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Breakfast & Brunch\"}' href=\"https://www.allrecipes.com/recipes/78/breakfast-and-brunch/\" ng-click=\"setAnalyticsCookie('browse|breakfast \\u0026 brunch')\" title=\"Breakfast & Brunch Recipes\">\n Breakfast & Brunch\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Desserts\"}' href=\"https://www.allrecipes.com/recipes/79/desserts/\" ng-click=\"setAnalyticsCookie('browse|desserts')\" title=\"Desserts Recipes\">\n Desserts\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Dinner\"}' href=\"https://www.allrecipes.com/recipes/17562/dinner/\" ng-click=\"setAnalyticsCookie('browse|dinner')\" title=\"Dinner Recipes\">\n Dinner\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Drinks\"}' href=\"https://www.allrecipes.com/recipes/77/drinks/\" ng-click=\"setAnalyticsCookie('browse|drinks')\" title=\"Drinks Recipes\">\n Drinks\n </a>\n</li>\n</ul>\n</li>\n<li class=\"browse-hubs__categories\">\n<h3>\n Ingredient\n </h3><span class=\"icon--chevron-right\"></span>\n<ul class=\"browse-hubs__subcategories\">\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Beef\"}' href=\"https://www.allrecipes.com/recipes/200/meat-and-poultry/beef/\" ng-click=\"setAnalyticsCookie('browse|beef')\" title=\"Beef Recipes\">\n Beef\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Chicken\"}' href=\"https://www.allrecipes.com/recipes/201/meat-and-poultry/chicken/\" ng-click=\"setAnalyticsCookie('browse|chicken')\" title=\"Chicken Recipes\">\n Chicken\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Pasta\"}' href=\"https://www.allrecipes.com/recipes/95/pasta-and-noodles/\" ng-click=\"setAnalyticsCookie('browse|pasta')\" title=\"Pasta Recipes\">\n Pasta\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Pork\"}' href=\"https://www.allrecipes.com/recipes/205/meat-and-poultry/pork/\" ng-click=\"setAnalyticsCookie('browse|pork')\" title=\"Pork Recipes\">\n Pork\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Salmon\"}' href=\"https://www.allrecipes.com/recipes/416/seafood/fish/salmon/\" ng-click=\"setAnalyticsCookie('browse|salmon')\" title=\"Salmon Recipes\">\n Salmon\n </a>\n</li>\n</ul>\n</li>\n<li class=\"browse-hubs__categories\">\n<h3>\n Diet & Health\n </h3><span class=\"icon--chevron-right\"></span>\n<ul class=\"browse-hubs__subcategories\">\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Diabetic\"}' href=\"https://www.allrecipes.com/recipes/739/healthy-recipes/diabetic/\" ng-click=\"setAnalyticsCookie('browse|diabetic')\" title=\"Diabetic Recipes\">\n Diabetic\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Gluten Free\"}' href=\"https://www.allrecipes.com/recipes/741/healthy-recipes/gluten-free/\" ng-click=\"setAnalyticsCookie('browse|gluten free')\" title=\"Gluten Free Recipes\">\n Gluten Free\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Healthy\"}' href=\"https://www.allrecipes.com/recipes/84/healthy-recipes/\" ng-click=\"setAnalyticsCookie('browse|healthy')\" title=\"Healthy Recipes\">\n Healthy\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Low Calorie\"}' href=\"https://www.allrecipes.com/recipes/1232/healthy-recipes/low-calorie/\" ng-click=\"setAnalyticsCookie('browse|low calorie')\" title=\"Low Calorie Recipes\">\n Low Calorie\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Low Fat\"}' href=\"https://www.allrecipes.com/recipes/1231/healthy-recipes/low-fat/\" ng-click=\"setAnalyticsCookie('browse|low fat')\" title=\"Low Fat Recipes\">\n Low Fat\n </a>\n</li>\n</ul>\n</li>\n<li class=\"browse-hubs__categories\">\n<h3>\n Seasonal\n </h3><span class=\"icon--chevron-right\"></span>\n<ul class=\"browse-hubs__subcategories\">\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Lunar New Year\"}' href=\"https://www.allrecipes.com/recipes/17668/holidays-and-events/lunar-new-year/\" ng-click=\"setAnalyticsCookie('browse|lunar new year')\" title=\"Lunar New Year Recipes\">\n Lunar New Year\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Recipes for the Super Bowl®\"}' href=\"https://www.allrecipes.com/recipes/1419/holidays-and-events/big-game/\" ng-click=\"setAnalyticsCookie('browse|recipes for the super bowl®')\" title=\"Recipes for the Super Bowl®\">\n Recipes for the Super Bowl®\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Valentines Day\"}' href=\"https://www.allrecipes.com/recipes/199/holidays-and-events/valentines-day/\" ng-click=\"setAnalyticsCookie('browse|valentine\\u0027s day')\" title=\"Valentine's Day Recipes\">\n Valentine's Day\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Mardi Gras\"}' href=\"https://www.allrecipes.com/recipes/192/holidays-and-events/mardi-gras/\" ng-click=\"setAnalyticsCookie('browse|mardi gras')\" title=\"Mardi Gras Recipes\">\n Mardi Gras\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > More Holidays and Events\"}' href=\"https://www.allrecipes.com/recipes/85/holidays-and-events/\" ng-click=\"setAnalyticsCookie('browse|more holidays and events')\" title=\"More Holidays and Events Recipes\">\n More Holidays and Events\n </a>\n</li>\n</ul>\n</li>\n<li class=\"browse-hubs__categories\">\n<h3>\n Dish Type\n </h3><span class=\"icon--chevron-right\"></span>\n<ul class=\"browse-hubs__subcategories\">\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Breads\"}' href=\"https://www.allrecipes.com/recipes/156/bread/\" ng-click=\"setAnalyticsCookie('browse|breads')\" title=\"Breads Recipes\">\n Breads\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Cakes\"}' href=\"https://www.allrecipes.com/recipes/276/desserts/cakes/\" ng-click=\"setAnalyticsCookie('browse|cakes')\" title=\"Cakes Recipes\">\n Cakes\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Salads\"}' href=\"https://www.allrecipes.com/recipes/96/salad/\" ng-click=\"setAnalyticsCookie('browse|salads')\" title=\"Salads Recipes\">\n Salads\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Smoothies\"}' href=\"https://www.allrecipes.com/recipes/138/drinks/smoothies/\" ng-click=\"setAnalyticsCookie('browse|smoothies')\" title=\"Smoothies Recipes\">\n Smoothies\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Soups, Stews & Chili\"}' href=\"https://www.allrecipes.com/recipes/94/soups-stews-and-chili/\" ng-click=\"setAnalyticsCookie('browse|soups, stews \\u0026 chili')\" title=\"Soups, Stews & Chili Recipes\">\n Soups, Stews & Chili\n </a>\n</li>\n</ul>\n</li>\n<li class=\"browse-hubs__categories\">\n<h3>\n Cooking Style\n </h3><span class=\"icon--chevron-right\"></span>\n<ul class=\"browse-hubs__subcategories\">\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > BBQ & Grilling\"}' href=\"https://www.allrecipes.com/recipes/88/bbq-grilling/\" ng-click=\"setAnalyticsCookie('browse|bbq \\u0026 grilling')\" title=\"BBQ & Grilling Recipes\">\n BBQ & Grilling\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Quick & Easy\"}' href=\"https://www.allrecipes.com/recipes/1947/everyday-cooking/quick-and-easy/\" ng-click=\"setAnalyticsCookie('browse|quick \\u0026 easy')\" title=\"Quick & Easy Recipes\">\n Quick & Easy\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Slow Cooker\"}' href=\"https://www.allrecipes.com/recipes/253/everyday-cooking/slow-cooker/\" ng-click=\"setAnalyticsCookie('browse|slow cooker')\" title=\"Slow Cooker Recipes\">\n Slow Cooker\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Vegan\"}' href=\"https://www.allrecipes.com/recipes/1227/everyday-cooking/vegan/\" ng-click=\"setAnalyticsCookie('browse|vegan')\" title=\"Vegan Recipes\">\n Vegan\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Vegetarian\"}' href=\"https://www.allrecipes.com/recipes/87/everyday-cooking/vegetarian/\" ng-click=\"setAnalyticsCookie('browse|vegetarian')\" title=\"Vegetarian Recipes\">\n Vegetarian\n </a>\n</li>\n</ul>\n</li>\n<li class=\"browse-hubs__categories\">\n<h3>\n World Cuisine\n </h3><span class=\"icon--chevron-right\"></span>\n<ul class=\"browse-hubs__subcategories\">\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Asian\"}' href=\"https://www.allrecipes.com/recipes/227/world-cuisine/asian/\" ng-click=\"setAnalyticsCookie('browse|asian')\" title=\"Asian Recipes\">\n Asian\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Indian\"}' href=\"https://www.allrecipes.com/recipes/233/world-cuisine/asian/indian/\" ng-click=\"setAnalyticsCookie('browse|indian')\" title=\"Indian Recipes\">\n Indian\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Italian\"}' href=\"https://www.allrecipes.com/recipes/723/world-cuisine/european/italian/\" ng-click=\"setAnalyticsCookie('browse|italian')\" title=\"Italian Recipes\">\n Italian\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Mexican\"}' href=\"https://www.allrecipes.com/recipes/728/world-cuisine/latin-american/mexican/\" ng-click=\"setAnalyticsCookie('browse|mexican')\" title=\"Mexican Recipes\">\n Mexican\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Southern\"}' href=\"https://www.allrecipes.com/recipes/15876/us-recipes/southern/\" ng-click=\"setAnalyticsCookie('browse|southern')\" title=\"Southern Recipes\">\n Southern\n </a>\n</li>\n</ul>\n</li>\n<li class=\"browse-hubs__categories\">\n<h3>\n Special Collections\n </h3><span class=\"icon--chevron-right\"></span>\n<ul class=\"browse-hubs__subcategories\">\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Food Wishes with Chef John\"}' href=\"https://www.allrecipes.com/recipes/16791/everyday-cooking/special-collections/web-show-recipes/food-wishes/\" ng-click=\"setAnalyticsCookie('browse|food wishes with chef john')\" title=\"Food Wishes with Chef John Recipes\">\n Food Wishes with Chef John\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Allrecipes Magazine Recipes\"}' href=\"https://www.allrecipes.com/recipes/17235/everyday-cooking/allrecipes-magazine-recipes/\" ng-click=\"setAnalyticsCookie('browse|allrecipes magazine recipes')\" title=\"Allrecipes Magazine Recipes\">\n Allrecipes Magazine Recipes\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Our Newest Recipes\"}' href=\"https://www.allrecipes.com/recipes/22908/everyday-cooking/special-collections/new/\" ng-click=\"setAnalyticsCookie('browse|our newest recipes')\" title=\"Our Newest Recipes\">\n Our Newest Recipes\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Trusted Brands\"}' href=\"http://dish.allrecipes.com/trusted-brand-pages/\" ng-click=\"setAnalyticsCookie('browse|trusted brands')\" title=\"Trusted Brands Recipes\">\n Trusted Brands\n </a>\n</li>\n</ul>\n</li>\n</ul>\n</div>\n<a class=\"recipe-hero-link__item__text\" href=\"https://www.allrecipes.com/recipes/\" ng-click=\"setAnalyticsCookie('browse|all categories')\">All Categories</a>\n</section>\n</div>\n<!-- user sign in area -->\n<div class=\"nav-tab social profile-nav ng-hide\" ng-cloak=\"\" popup-panel=\"topNavProfileMenu\">\n<ul class=\"nav-tab__options\">\n<li ng-click=\"setAnalyticsCookie('profile|feed', 'menu')\">\n<a href=\"https://www.allrecipes.com/account/authenticationwelcome/?loginReferrerUrl=/home\" id=\"navmenu_myFeed\">\n<span class=\"nav-icon svg-icon--top-nav-bar--home svg-icon--top-nav-bar--home-dims\"></span>\n<span class=\"itemText\">Feed</span>\n</a>\n</li>\n<li ng-click=\"setAnalyticsCookie('profile|profile', 'menu')\">\n<a href=\"https://www.allrecipes.com/cook/my/\" id=\"navmenu_myprofile\">\n<span class=\"nav-icon svg-icon--top-nav-bar--nav-profile svg-icon--top-nav-bar--nav-profile-dims\"></span>\n<span class=\"itemText\">Profile</span>\n</a>\n</li>\n<li ng-click=\"setAnalyticsCookie('profile|favorites', 'menu')\">\n<a href=\"https://www.allrecipes.com/cook/my/favorites/\" id=\"navmenu_recipebox\" rel=\"nofollow\">\n<span class=\"nav-icon svg-icon--top-nav-bar--grey-heart svg-icon--top-nav-bar--grey-heart-dims\"></span>\n<span class=\"itemText\">Favorites</span>\n</a>\n</li>\n<li ng-click=\"setAnalyticsCookie('profile|friends', 'menu')\">\n<a href=\"https://www.allrecipes.com/cook/my/findfriends/\" id=\"navmenu_findfriends\" rel=\"nofollow\">\n<span class=\"nav-icon svg-icon--top-nav-bar--nav-friends svg-icon--top-nav-bar--nav-friends-dims\"></span>\n<span class=\"itemText\">Friends</span>\n</a>\n</li>\n<li ng-click=\"setAnalyticsCookie('profile|shopping list', 'menu')\">\n<a href=\"https://www.allrecipes.com/my/shopping-lists/\" id=\"navmenu_shoppinglist\" rel=\"nofollow\">\n<span class=\"nav-icon svg-icon--top-nav-bar--grey-shopping svg-icon--top-nav-bar--grey-shopping-dims\"></span>\n<span class=\"itemText\">Shopping List</span>\n</a>\n</li>\n<li ng-click=\"setAnalyticsCookie('profile|settings', 'menu')\">\n<a href=\"https://www.allrecipes.com/cook/my/account-settings/\" id=\"navmenu_settings\" rel=\"nofollow\">\n<span class=\"nav-icon svg-icon--top-nav-bar--nav-settings svg-icon--top-nav-bar--nav-settings-dims\"></span>\n<span class=\"itemText\">Settings</span>\n</a>\n</li>\n</ul>\n<div class=\"signin\" ng-click=\"setAnalyticsCookie('profile|sign in ', 'menu')\">\n<button class=\"btn-basic--large btn-gold\" id=\"navmenu_signin_signup\" onclick=\"location.href='https://www.allrecipes.com/account/authenticationwelcome/?actionsource=' +(typeof dataLayer !=='undefined' ? dataLayer.page.category.contentType : '' ) \">Sign In <em>or</em> Sign Up</button>\n</div>\n</div>\n<!-- hub links, etc. -->\n<div class=\"nav-tab last ng-hide\" ng-cloak=\"\" popup-panel=\"topNavHamburgerMenu\">\n<ul class=\"nav-tab__options\">\n<li class=\"underline_link\">\n<a href=\"\" id=\"navmenu_recipes\" ng-click=\"browseNav()\" popup-trigger=\"browseRecipePanel\">\n<span class=\"nav-icon svg-icon--top-nav-bar--nav-browse-orange svg-icon--top-nav-bar--nav-browse-orange-dims\" ng-class=\"{'active': isActive}\"></span>\n<span class=\"nav-icon svg-icon--top-nav-bar--nav-browse svg-icon--top-nav-bar--nav-browse-dims\" ng-class=\"{'hidden': isActive}\"></span>\n<span class=\"nav-link-text\">Browse Recipes</span>\n<span class=\"icon-chevron\" ng-class=\"{'active': isActive}\"></span>\n</a>\n</li>\n<li class=\"browse-div-option ng-hide\" id=\"mobile-nav-container\" popup-panel=\"browseRecipePanel\">\n<ul class=\"nav-tab__mobile-browse\">\n<li>\n<input id=\"Meal Type\" type=\"checkbox\"/><label for=\"Meal Type\">Meal Type<span class=\"icon-chevron\"></span></label>\n<ul class=\"mobile-browse-subnav\">\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Appetizers & Snacks\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/76/appetizers-and-snacks/\" ng-click=\"setAnalyticsCookie('browse|appetizers \\u0026 snacks')\" title=\"Appetizers & Snacks Recipes\">Appetizers & Snacks</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Breakfast & Brunch\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/78/breakfast-and-brunch/\" ng-click=\"setAnalyticsCookie('browse|breakfast \\u0026 brunch')\" title=\"Breakfast & Brunch Recipes\">Breakfast & Brunch</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Desserts\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/79/desserts/\" ng-click=\"setAnalyticsCookie('browse|desserts')\" title=\"Desserts Recipes\">Desserts</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Dinner\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/17562/dinner/\" ng-click=\"setAnalyticsCookie('browse|dinner')\" title=\"Dinner Recipes\">Dinner</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Drinks\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/77/drinks/\" ng-click=\"setAnalyticsCookie('browse|drinks')\" title=\"Drinks Recipes\">Drinks</a></li>\n</ul>\n</li>\n<li>\n<input id=\"Ingredient\" type=\"checkbox\"/><label for=\"Ingredient\">Ingredient<span class=\"icon-chevron\"></span></label>\n<ul class=\"mobile-browse-subnav\">\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Beef\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/200/meat-and-poultry/beef/\" ng-click=\"setAnalyticsCookie('browse|beef')\" title=\"Beef Recipes\">Beef</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Chicken\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/201/meat-and-poultry/chicken/\" ng-click=\"setAnalyticsCookie('browse|chicken')\" title=\"Chicken Recipes\">Chicken</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Pasta\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/95/pasta-and-noodles/\" ng-click=\"setAnalyticsCookie('browse|pasta')\" title=\"Pasta Recipes\">Pasta</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Pork\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/205/meat-and-poultry/pork/\" ng-click=\"setAnalyticsCookie('browse|pork')\" title=\"Pork Recipes\">Pork</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Salmon\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/416/seafood/fish/salmon/\" ng-click=\"setAnalyticsCookie('browse|salmon')\" title=\"Salmon Recipes\">Salmon</a></li>\n</ul>\n</li>\n<li>\n<input id=\"Diet & Health\" type=\"checkbox\"/><label for=\"Diet & Health\">Diet & Health<span class=\"icon-chevron\"></span></label>\n<ul class=\"mobile-browse-subnav\">\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Diabetic\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/739/healthy-recipes/diabetic/\" ng-click=\"setAnalyticsCookie('browse|diabetic')\" title=\"Diabetic Recipes\">Diabetic</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Gluten Free\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/741/healthy-recipes/gluten-free/\" ng-click=\"setAnalyticsCookie('browse|gluten free')\" title=\"Gluten Free Recipes\">Gluten Free</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Healthy\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/84/healthy-recipes/\" ng-click=\"setAnalyticsCookie('browse|healthy')\" title=\"Healthy Recipes\">Healthy</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Low Calorie\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/1232/healthy-recipes/low-calorie/\" ng-click=\"setAnalyticsCookie('browse|low calorie')\" title=\"Low Calorie Recipes\">Low Calorie</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Low Fat\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/1231/healthy-recipes/low-fat/\" ng-click=\"setAnalyticsCookie('browse|low fat')\" title=\"Low Fat Recipes\">Low Fat</a></li>\n</ul>\n</li>\n<li>\n<input id=\"Seasonal\" type=\"checkbox\"/><label for=\"Seasonal\">Seasonal<span class=\"icon-chevron\"></span></label>\n<ul class=\"mobile-browse-subnav\">\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Lunar New Year\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/17668/holidays-and-events/lunar-new-year/\" ng-click=\"setAnalyticsCookie('browse|lunar new year')\" title=\"Lunar New Year Recipes\">Lunar New Year</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Recipes for the Super Bowl®\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/1419/holidays-and-events/big-game/\" ng-click=\"setAnalyticsCookie('browse|recipes for the super bowl®')\" title=\"Recipes for the Super Bowl®\">Recipes for the Super Bowl®</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Valentines Day\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/199/holidays-and-events/valentines-day/\" ng-click=\"setAnalyticsCookie('browse|valentine\\u0027s day')\" title=\"Valentine's Day Recipes\">Valentine's Day</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Mardi Gras\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/192/holidays-and-events/mardi-gras/\" ng-click=\"setAnalyticsCookie('browse|mardi gras')\" title=\"Mardi Gras Recipes\">Mardi Gras</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > More Holidays and Events\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/85/holidays-and-events/\" ng-click=\"setAnalyticsCookie('browse|more holidays and events')\" title=\"More Holidays and Events Recipes\">More Holidays and Events</a></li>\n</ul>\n</li>\n<li>\n<input id=\"Dish Type\" type=\"checkbox\"/><label for=\"Dish Type\">Dish Type<span class=\"icon-chevron\"></span></label>\n<ul class=\"mobile-browse-subnav\">\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Breads\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/156/bread/\" ng-click=\"setAnalyticsCookie('browse|breads')\" title=\"Breads Recipes\">Breads</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Cakes\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/276/desserts/cakes/\" ng-click=\"setAnalyticsCookie('browse|cakes')\" title=\"Cakes Recipes\">Cakes</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Salads\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/96/salad/\" ng-click=\"setAnalyticsCookie('browse|salads')\" title=\"Salads Recipes\">Salads</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Smoothies\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/138/drinks/smoothies/\" ng-click=\"setAnalyticsCookie('browse|smoothies')\" title=\"Smoothies Recipes\">Smoothies</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Soups, Stews & Chili\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/94/soups-stews-and-chili/\" ng-click=\"setAnalyticsCookie('browse|soups, stews \\u0026 chili')\" title=\"Soups, Stews & Chili Recipes\">Soups, Stews & Chili</a></li>\n</ul>\n</li>\n<li>\n<input id=\"Cooking Style\" type=\"checkbox\"/><label for=\"Cooking Style\">Cooking Style<span class=\"icon-chevron\"></span></label>\n<ul class=\"mobile-browse-subnav\">\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > BBQ & Grilling\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/88/bbq-grilling/\" ng-click=\"setAnalyticsCookie('browse|bbq \\u0026 grilling')\" title=\"BBQ & Grilling Recipes\">BBQ & Grilling</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Quick & Easy\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/1947/everyday-cooking/quick-and-easy/\" ng-click=\"setAnalyticsCookie('browse|quick \\u0026 easy')\" title=\"Quick & Easy Recipes\">Quick & Easy</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Slow Cooker\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/253/everyday-cooking/slow-cooker/\" ng-click=\"setAnalyticsCookie('browse|slow cooker')\" title=\"Slow Cooker Recipes\">Slow Cooker</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Vegan\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/1227/everyday-cooking/vegan/\" ng-click=\"setAnalyticsCookie('browse|vegan')\" title=\"Vegan Recipes\">Vegan</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Vegetarian\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/87/everyday-cooking/vegetarian/\" ng-click=\"setAnalyticsCookie('browse|vegetarian')\" title=\"Vegetarian Recipes\">Vegetarian</a></li>\n</ul>\n</li>\n<li>\n<input id=\"World Cuisine\" type=\"checkbox\"/><label for=\"World Cuisine\">World Cuisine<span class=\"icon-chevron\"></span></label>\n<ul class=\"mobile-browse-subnav\">\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Asian\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/227/world-cuisine/asian/\" ng-click=\"setAnalyticsCookie('browse|asian')\" title=\"Asian Recipes\">Asian</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Indian\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/233/world-cuisine/asian/indian/\" ng-click=\"setAnalyticsCookie('browse|indian')\" title=\"Indian Recipes\">Indian</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Italian\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/723/world-cuisine/european/italian/\" ng-click=\"setAnalyticsCookie('browse|italian')\" title=\"Italian Recipes\">Italian</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Mexican\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/728/world-cuisine/latin-american/mexican/\" ng-click=\"setAnalyticsCookie('browse|mexican')\" title=\"Mexican Recipes\">Mexican</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Southern\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/15876/us-recipes/southern/\" ng-click=\"setAnalyticsCookie('browse|southern')\" title=\"Southern Recipes\">Southern</a></li>\n</ul>\n</li>\n<li>\n<input id=\"Special Collections\" type=\"checkbox\"/><label for=\"Special Collections\">Special Collections<span class=\"icon-chevron\"></span></label>\n<ul class=\"mobile-browse-subnav\">\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Food Wishes with Chef John\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/16791/everyday-cooking/special-collections/web-show-recipes/food-wishes/\" ng-click=\"setAnalyticsCookie('browse|food wishes with chef john')\" title=\"Food Wishes with Chef John Recipes\">Food Wishes with Chef John</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Allrecipes Magazine Recipes\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/17235/everyday-cooking/allrecipes-magazine-recipes/\" ng-click=\"setAnalyticsCookie('browse|allrecipes magazine recipes')\" title=\"Allrecipes Magazine Recipes\">Allrecipes Magazine Recipes</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Our Newest Recipes\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/22908/everyday-cooking/special-collections/new/\" ng-click=\"setAnalyticsCookie('browse|our newest recipes')\" title=\"Our Newest Recipes\">Our Newest Recipes</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Trusted Brands\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"http://dish.allrecipes.com/trusted-brand-pages/\" ng-click=\"setAnalyticsCookie('browse|trusted brands')\" title=\"Trusted Brands Recipes\">Trusted Brands</a></li>\n</ul>\n</li>\n</ul>\n<div class=\"see-all\"><a href=\"https://www.allrecipes.com/recipes/\" target=\"_self\">See all categories</a></div>\n</li>\n<li ng-click=\"setAnalyticsData('allrecipes magazine')\">\n<a data-link-tracking='{\"label\": \"Hamburger Menu > Magazine\", \"eventName\": \"Header Action Taken\"}' href=\"https://www.magazine.store/allrecipes-magazine/\" id=\"navmenu_magazine\" rel=\"noopener\" target=\"_blank\">\n<span class=\"nav-icon svg-icon--top-nav-bar--nav-magazine svg-icon--top-nav-bar--nav-magazine-dims\"></span>\n<span>Allrecipes Magazine</span>\n</a>\n</li>\n<li ng-click=\"setAnalyticsData('dinner spinner tv')\">\n<a data-link-tracking='{\"label\": \"Hamburger Menu > Dinner Spinner TV\", \"eventName\": \"Header Action Taken\"}' href=\"http://dish.allrecipes.com/dinner-spinner-tv-show/\" id=\"navmenu_tv\" rel=\"noopener\">\n<span class=\"nav-icon svg-icon--top-nav-bar--tv_icon svg-icon--top-nav-bar--tv_icon-dims\"></span>\n<span>Dinner Spinner TV</span>\n</a>\n</li>\n<li class=\"underline_link\" ng-click=\"setAnalyticsData('cooking school')\">\n<a data-link-tracking='{\"label\": \"Hamburger Menu > Cooking School\", \"eventName\": \"Header Action Taken\"}' href=\"http://cookingschool.allrecipes.com/\" id=\"navmenu_cooking_school\" rel=\"noopener\" target=\"_blank\">\n<span class=\"nav-icon svg-icon--top-nav-bar--nav-cookingschool svg-icon--top-nav-bar--nav-cookingschool-dims\"></span>\n<span>Cooking School</span>\n</a>\n</li>\n<li ng-click=\"setAnalyticsCookie('newsletters')\">\n<a data-link-tracking='{\"label\": \"Hambuger Menu > Newsletters\", \"eventName\": \"Header Action Taken\"}' href=\"https://www.allrecipes.com/cook/my/account-settings/#NewslettersSubscription\" id=\"navmenu_social_gallery\" rel=\"noopener\">\n<span class=\"nav-icon svg-icon--top-nav-bar--nav-newsletters svg-icon--top-nav-bar--nav-newsletters-dims\"></span>\n<span>Newsletters</span>\n</a>\n</li>\n<li ng-click=\"setAnalyticsData('ask the community')\">\n<a data-link-tracking='{\"label\": \"Hambuger Menu > Ask The Community\", \"eventName\": \"Header Action Taken\"}' href=\"http://dish.allrecipes.com/ask-the-community/\" id=\"navmenu_dish\" rel=\"noopener\" target=\"_self\">\n<span class=\"nav-icon svg-icon--top-nav-bar--nav-community svg-icon--top-nav-bar--nav-community-dims\"></span>\n<span>Ask the Community</span>\n</a>\n</li>\n<li class=\"underline_link\" ng-click=\"setAnalyticsData('help')\">\n<a data-link-tracking='{\"label\": \"Hambuger Menu > Help\", \"eventName\": \"Header Action Taken\"}' href=\"http://dish.allrecipes.com/customer-service/\" id=\"navmenu_help\" rel=\"noopener\" target=\"_self\">\n<span class=\"nav-icon svg-icon--top-nav-bar--nav-help svg-icon--top-nav-bar--nav-help-dims\"></span>\n<span>Help</span>\n</a>\n</li>\n<li>\n<a data-link-tracking='{\"label\": \"Hamburger Menu > Jobs\", \"eventName\": \"Header Action Taken\"}' href=\"http://dish.allrecipes.com/allrecipes-jobs-2/\" ng-click=\"setAnalyticsData('jobs')\" rel=\"noopener\" target=\"_self\">Jobs</a>\n<a data-link-tracking='{\"label\": \"Hamburger Menu > Newsroom\", \"eventName\": \"Header Action Taken\"}' href=\"http://press.allrecipes.com/\" ng-click=\"setAnalyticsData('newsroom')\" rel=\"noopener\">Newsroom</a>\n</li>\n</ul>\n</div>\n</section>\n</header>\n<div class=\"container-content body-content\" id=\"main-content\">\n<script src=\"https://secureimages.allrecipes.com/assets/deployables/v-1.185.0.5222/account-js.bundled.js\"></script>\n<script>var adManager = {};</script>\n<div data-role=\"page\" id=\"account_login\">\n<div class=\"authentication suppress-foresee\">\n<section>\n<section class=\"subheader\">\n<script>\n // Load the SDK asynchronously\n (function (d) {\n if (d.getElementById('facebook-jssdk')) {\n return;\n }\n var js = d.createElement('script');\n js.type = 'text/javascript';\n js.async = true;\n js.src = '//connect.facebook.net/en_US/sdk.js';\n js.id = 'facebook-jssdk';\n var s = d.getElementsByTagName('script')[0];\n s.parentNode.insertBefore(js, s);\n })(document);\n\n var initializeGoogeAuth = function () {\n gapi.load('auth2', function () {\n // Retrieve the singleton for the GoogleAuth library and set up the client.\n auth2 = gapi.auth2.init({\n client_id: '363274647518-g085sh00jkmgpbfphoj1rlim2btbn07s.apps.googleusercontent.com',\n cookiepolicy: 'single_host_origin',\n redirect_uri: 'postmessage'\n });\n attachSigninClickEventHandler(document.getElementById('google-sign-in-button'));\n });\n\n function attachSigninClickEventHandler(element) {\n auth2.attachClickHandler(element, {},\n function (googleUser) {\n var userAuthResponse = googleUser.getAuthResponse();\n var accessToken = userAuthResponse.access_token;\n googleSignInCallback(accessToken);\n }, function (error) {\n googleSignInCallback();\n }\n );\n }\n }();\n</script>\n<div id=\"fb-root\"></div>\n<h1 class=\"sign--social\" data-state=\"5496194823785841786597583543653447895202431228279444578006387344706326\" data-targetorigin=\"https://www.allrecipes.com/\">Sign in with social.</h1>\n<h4 class=\"signin-subhead\">New and existing Allrecipes users.</h4>\n<div class=\"mobile-auth-error\" id=\"authenticationError\"></div>\n<div class=\"login-facebook\">\n<a href=\"#\" onclick=\"facebookLogin();\"><span></span></a>\n</div>\n<div id=\"gSignInWrapper\">\n<div class=\"customGPlusSignIn\" id=\"google-sign-in-button\">\n<span class=\"google-signin-icon\"></span>\n<span class=\"google-signin-text\">Google</span>\n</div>\n</div>\n<script>\n function facebookLogin() {\n var trackPropertiesSerialized = localStorage.getItem('Allrecipes.trackProperties');\n var trackProperties = {};\n if (trackPropertiesSerialized !== undefined &&\n trackPropertiesSerialized !== null &&\n trackPropertiesSerialized !== \"\") {\n trackProperties = JSON.parse(trackPropertiesSerialized);\n }\n\n var layout = $('#AuthLayoutMode').val();\n\n if (window.location.href.indexOf(\"amp_savebar\") >= 0) {\n window.segmentAnalytics.track('Sign In and Registration',\n {\n category: trackProperties.category,\n label: \"Facebook Sign In or Registration\",\n registrationId: \"3638\",\n regSource: \"ALR_SitePlacement_amp save recipe\"\n });\n } else {\n window.segmentAnalytics.track('Sign In and Registration',\n {\n category: trackProperties.category,\n label: \"Facebook Sign In or Registration\",\n registrationId: trackProperties.registrationId,\n regSource: trackProperties.regSource\n });\n }\n\n if (typeof layout != 'undefined' && layout != 'Compact') {\n FB.login(function(response) {\n if (response.status === 'connected') {\n if (response && response.authResponse && response.authResponse.accessToken) {\n\n if (window.location.href.indexOf(\"amp_savebar\") >= 0) {\n window.segmentAnalytics.track('Sign In and Registration',\n {\n category: trackProperties.category,\n label: \"Facebook Sign In or Registration Success\",\n registrationId: \"3638\",\n regSource: \"ALR_SitePlacement_amp save recipe\"\n });\n } else {\n window.segmentAnalytics.track('Sign In and Registration',\n {\n category: trackProperties.category,\n label: \"Facebook Sign In or Registration Success\",\n registrationId: trackProperties.registrationId,\n regSource: trackProperties.regSource\n });\n }\n\n var socialCsrfToken = document.getElementById(\"SocialCsrfToken\").value;\n\n var data = {\n socialType: 1,\n socialAuthCode: null,\n socialAccessToken: response.authResponse.accessToken,\n socialCsrfToken: socialCsrfToken,\n rememberMe: true,\n layoutMode: layout\n };\n\n if (AR && AR.Account && AR.Account.SocialSignInApi) {\n AR.Account.SocialSignInApi(data, 'https%3a%2f%2fwww.allrecipes.com%2f');\n }\n } else {\n\n if (window.location.href.indexOf(\"amp_savebar\") >= 0) {\n window.segmentAnalytics.track('Sign In and Registration',\n {\n category: trackProperties.category,\n label: \"Facebook Sign In or Registration Failure\",\n registrationId: \"3638\",\n regSource: \"ALR_SitePlacement_amp save recipe\"\n });\n } else {\n window.segmentAnalytics.track('Sign In and Registration',\n {\n category: trackProperties.category,\n label: \"Facebook Sign In or Registration Failure\",\n registrationId: trackProperties.registrationId,\n regSource: trackProperties.regSource\n });\n }\n\n AR.Account.HandleUnknownSignInError(layout, true);\n }\n }\n },\n { scope: 'email,user_friends' });\n } else {\n var permissionUrl = \"https://m.facebook.com/dialog/oauth?client_id=\" +\n \"66102450266\" +\n \"&scope=email,user_likes\" +\n \"&response_type=code&redirect_uri=\" +\n encodeURIComponent(window.location.protocol +\n \"//\" +\n window.location.hostname +\n \"/account/socialsignin/facebook/?loginReferringUrl=\" +\n encodeURIComponent('https://www.allrecipes.com/'));\n\n window.location = permissionUrl;\n return;\n }\n }\n</script>\n</section>\n<div id=\"authenticationError\"></div>\n<section class=\"uiForm login\">\n<h2>Sign in with email.</h2>\n<h4 class=\"signin-subhead\">Existing Allrecipes users.</h4>\n<!-- For Login Errors-->\n<form action=\"/account/signin/\" data-ajax=\"false\" method=\"post\" name=\"signinForm\" novalidate=\"\">\n<input id=\"ReferringType\" name=\"ReferringType\" type=\"hidden\" value=\"\">\n<input id=\"ReferringUrl\" name=\"ReferringUrl\" type=\"hidden\" value=\"https://www.allrecipes.com/\">\n<input id=\"ReferringAction\" name=\"ReferringAction\" type=\"hidden\" value=\"\">\n<input id=\"ReferringParams\" name=\"ReferringParams\" type=\"hidden\" value=\"\">\n<input data-val=\"true\" data-val-required=\"The AuthLayoutMode field is required.\" id=\"AuthLayoutMode\" name=\"AuthLayoutMode\" type=\"hidden\" value=\"Standard\">\n<input id=\"SocialCsrfToken\" name=\"SocialCsrfToken\" type=\"hidden\" value=\"5496194823785841786597583543653447895202431228279444578006387344706326\">\n<input data-role=\"none\" id=\"txtUserNameOrEmail\" maxlength=\"500\" name=\"txtUserNameOrEmail\" placeholder=\"Email\" type=\"email\" value=\"\">\n<div class=\"loginPswrdCntnr\">\n<input data-role=\"none\" id=\"password\" maxlength=\"500\" name=\"password\" placeholder=\"Password\" type=\"password\" value=\"\">\n</input></div>\n<aside class=\"aside-left\">\n<label class=\"checkList__item secure\">\n<input checked=\"checked\" data-role=\"none\" id=\"rememberMe\" name=\"rememberMe\" type=\"checkbox\"><span class=\"span-signIn\">Remember me</span>\n</input></label>\n</aside>\n<aside class=\"aside-right\">\n<a class=\"btn-right\" href=\"/account/forgotpassword/?layout=Standard&loginReferrerUrl=https%3A%2F%2Fwww.allrecipes.com%2F\" id=\"lnkForgotPassword\" tabindex=\"500\">Forgot password?</a>\n</aside>\n<input class=\"btnSubmit\" data-action=\"submit\" data-role=\"none\" id=\"submitLogInForm\" type=\"submit\" value=\"Sign In\">\n</input></input></input></input></input></input></input></input></form>\n<a class=\"join-for-free\" onclick=\"AR.Account.lnkJoinForFree();\"><span>New to Allrecipes?</span> Join for free!</a>\n<p class=\"sub-text\">\r\n By signing in, you are agreeing to our <a href=\"http://www.meredith.com/legal/terms\">Terms of <br/>\r\n Service</a> and our <a href=\"http://www.meredith.com/privacy.html\" target=\"_blank\">Privacy Policy—Your California Rights</a>.\r\n </p>\n</section>\n</section>\n</div>\n</div>\n</div>\n<footer class=\"full-page\" id=\"pageFooter\">\n<section class=\"grid grid-fixed\">\n<article class=\"grid-col grid-col--tiles footer-share\">\n<ul class=\"social-sharing__icons\">\n<li><a class=\"svg-icon--social--facebook svg-icon--social--facebook-dims\" data-footer-link-tracking='{\"label\": \"footer > social > facebook\"}' href=\"https://www.facebook.com/allrecipes\" id=\"footer_facebook\" rel=\"noopener\" target=\"_blank\" title=\"Facebook\"></a></li>\n<li><a class=\"svg-icon--social--pinterest svg-icon--social--pinterest-dims\" data-footer-link-tracking='{\"label\": \"footer > social > pinterest\"}' href=\"http://pinterest.com/allrecipes/\" id=\"footer_pinterest\" rel=\"noopener\" target=\"_blank\" title=\"Pinterest\"></a></li>\n<li><a class=\"svg-icon--social--twitter svg-icon--social--twitter-dims\" data-footer-link-tracking='{\"label\": \"footer > social > twitter\"}' href=\"https://twitter.com/Allrecipes\" id=\"footer_twitter\" rel=\"noopener\" target=\"_blank\" title=\"Twitter\"></a></li>\n<li><a class=\"svg-icon--social--instagram svg-icon--social--instagram-dims\" data-footer-link-tracking='{\"label\": \"footer > social > instagram\"}' href=\"http://instagram.com/allrecipes\" id=\"footer_instagram\" rel=\"noopener\" target=\"_blank\" title=\"Instagram\"></a></li>\n<li><a class=\"svg-icon--social--tumblr_wh svg-icon--social--tumblr_wh-dims\" data-footer-link-tracking='{\"label\": \"footer > social > tumblr\"}' href=\"http://allrecipes.tumblr.com/\" id=\"footer_tumblr\" rel=\"noopener\" target=\"_blank\" title=\"Tumblr\"></a></li>\n<li><a class=\"svg-icon--social--stumbleupon svg-icon--social--stumbleupon-dims\" data-footer-link-tracking='{\"label\": \"footer > social > stumbleupon\"}' href=\"http://www.stumbleupon.com/stumbler/Allrecipes\" id=\"footer_stumbleupon\" rel=\"noopener\" target=\"_blank\" title=\"StumbleUpon\"></a></li>\n<li><a class=\"svg-icon--social--youtube_wh svg-icon--social--youtube_wh-dims\" data-footer-link-tracking='{\"label\": \"footer > social > youtube\"}' href=\"https://www.youtube.com/user/allrecipes\" id=\"footer_youtube\" rel=\"noopener\" target=\"_blank\" title=\"YouTube\"></a></li>\n</ul>\n</article>\n<article class=\"grid-col grid-col--tiles\">\n<ul>\n<li>About Us</li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > newsroom\"}' href=\"http://press.allrecipes.com/\" id=\"footer_newsroom\" rel=\"noopener\">Newsroom</a></li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > jobs\"}' href=\"http://dish.allrecipes.com/allrecipes-jobs/\" id=\"footer_jobs\" rel=\"noopener\">Jobs at Allrecipes</a></li>\n</ul>\n</article>\n<article class=\"grid-col grid-col--tiles footer_advertising\">\n<ul>\n<li>Advertising</li>\n<li><a class=\"\" data-footer-link-tracking='{\"label\": \"footer > advertise\"}' href=\"http://www.meredith.com/national-media/digital\" id=\"footer_advertisewithus\" rel=\"noopener\">Advertise with Us</a></li>\n<li><a class=\"\" data-footer-link-tracking='{\"label\": \"footer > meredith womans network\"}' href=\"http://www.meredith.com/marketing_solutions/interactive_media.html\" id=\"footer_womensnetwork\" rel=\"noopener\">Meredith Women's Network</a></li>\n</ul>\n</article>\n<article class=\"grid-col grid-col--tiles\">\n<ul>\n<li>Support</li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > site map\"}' href=\"http://dish.allrecipes.com/faq-sitemap/\" id=\"footer_sitemap\" rel=\"noopener\">Site Map</a></li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > contact us\"}' href=\"http://dish.allrecipes.com/customer-service/contact-us-2/\" id=\"footer_contactus\" rel=\"noopener\">Contact Us</a></li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > customer support\"}' href=\"http://dish.allrecipes.com/customer-service/\" id=\"footer_customersupport\" rel=\"noopener\">Customer Support</a></li>\n</ul>\n</article>\n<article class=\"grid-col grid-col--tiles\" data-siteurl=\"https://www.allrecipes.com\" ng-controller=\"ar_controllers_footerLinks\">\n<ul>\n<li>Global Community</li>\n<li>\n<select aria-label=\"Change Country\" id=\"country-selector\" onchange=\"changesite(this.value);\">\n<option value=\"\">Select location</option>\n<option data-footer-link-tracking='{\"label\": \"footer > global community > austrailia & new zealand\"}' rel=\"noopener\" value=\"http://allrecipes.com.au\">Australia & New Zealand</option>\n<option data-footer-link-tracking='{\"label\": \"footer > global community > canada\"}' rel=\"noopener\" value=\"https://www.allrecipes.com?country=CA\">Canada</option>\n<option data-footer-link-tracking='{\"label\": \"footer > global community > quebec\"}' rel=\"noopener\" value=\"http://qc.allrecipes.ca\">Quebec</option>\n<option data-footer-link-tracking='{\"label\": \"footer > global community > united kingdom & ireland\"}' rel=\"noopener\" value=\"http://allrecipes.co.uk\">United Kingdom & Ireland</option>\n<option data-footer-link-tracking='{\"label\": \"footer > global community > united states\"}' rel=\"noopener\" value=\"https://www.allrecipes.com/?country=US\">United States</option>\n</select>\n</li>\n<li>© 2020 Allrecipes.com <br/>All Rights Reserved </li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > california rights\"}' href=\"http://www.meredith.com/legal/privacy\" id=\"footer_privacypolicy\" rel=\"noopener\" target=\"_blank\">Privacy Policy Your California Rights</a></li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > eu privacy policy\"}' href=\"http://www.meredith.com/legal/eu-privacy\" id=\"footer_eu_privacypolicy\" rel=\"noopener\" target=\"_blank\">EU Privacy Policy</a></li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > terms\"}' href=\"http://www.meredith.com/legal/terms\" id=\"footer_terms\" rel=\"noopener\" target=\"_blank\">Terms of Service</a></li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > data policy\"}' href=\"http://www.meredith.com/datapolicy.html\" id=\"footer_datapolicy\" rel=\"noopener\" target=\"_blank\">Data Policy</a></li>\n<li>\n<a class=\"privacy-notification-dsar\" data-footer-link-tracking='{\"label\": \"footer > eu data requests\"}' href=\"\" id=\"footer_dsar\" rel=\"noopener\" target=\"_blank\">EU Data Subject Requests</a>\n<!-- EU DSAR link removal start -->\n<script type=\"text/javascript\">\r\n // Find the EU DSAR link by link text.\r\n function getEUDSARlink() {\r\n let elem;\r\n const links = document.getElementsByTagName('a');\r\n for (let link of links) {\r\n if (link.innerHTML == 'EU Data Subject Requests') {\r\n elem = link;\r\n break;\r\n }\r\n }\r\n return elem;\r\n };\r\n // Pass EU DSAR link element to the link-hiding function.\r\n (function (elem) {\r\n if (elem && typeof elem !== 'undefined') {\r\n const d = window.document;\r\n const now = Math.floor(Date.now() / 1000);\r\n const newYear2020 = 1577836800;\r\n if (now >= newYear2020 || d.location.hash == '#ccpa') {\r\n elem.style.display = 'none';\r\n }\r\n }\r\n }(getEUDSARlink()));\r\n </script>\n<!-- EU DSAR link removal end -->\n</li>\n<li>\n<!-- Do Not Sell button start -->\n<!-- NOTE: OneTrust settings might override the button text. -->\n<!-- If text link is preferred, change to an <a> tag with the same class. -->\n<button class=\"ot-sdk-show-settings\">California Do Not Sell</button>\n<script type=\"text/javascript\">\r\n (function () {\r\n const d = window.document;\r\n const now = Math.floor(Date.now() / 1000);\r\n const newYear2020 = 1577836800;\r\n const otLinkClass = '.ot-sdk-show-settings';\r\n if (now < newYear2020 && d.location.hash != '#ccpa') {\r\n const otLink = d.querySelector(otLinkClass);\r\n otLink.style.display = 'none';\r\n }\r\n }());\r\n </script>\n<!-- Do Not Sell button end -->\n</li>\n<li>\n<!-- Ghostery Inc tag script_ghostery cid: 1333 pid: 282-->\n<a data-footer-link-tracking='{\"label\": \"footer > adchoices\"}' href=\"#\" id=\"_bapw-link\" rel=\"noopener\" target=\"_blank\"><span id=\"footer_adchoices\" style=\"vertical-align:middle !important;padding-right:5px\">AdChoices</span><img height=\"11\" id=\"_bapw-icon\" role=\"presentation\" style=\"border:0 !important;display:inline !important;vertical-align:middle !important;padding-right:5px !important;\"/></a>\n<a class=\"btns-one-small\" data-show-on-scroll=\"700\" id=\"footer_top_button\" ng-cloak=\"\" ng-show=\"yTrigger == true\" onclick=\"AnchorScroll('top')\">Top</a>\n</li>\n<script>(function () { var g = 282, i = 1333, a = false, h = document, j = h.getElementById(\"_bapw-link\"), e = (h.location.protocol == \"https:\"), f = (e ? \"https\" : \"http\") + \"://\", c = f + (e ? \"a248.e.akamai.net/betterad.download.akamai.com/91609\" : \"cdn.betrad.com\") + \"/pub/\"; function b(k) { var d = new Image(); d.src = f + \"l.betrad.com/pub/p.gif?pid=\" + g + \"&ocid=\" + i + \"&i\" + k + \"=1&r=\" + Math.random() } h.getElementById(\"_bapw-icon\").src = c + \"icon1.png\"; j.onmouseover = function () { if (/#$/.test(j.href)) { j.href = \"http://info.evidon.com/pub_info/\" + g + \"?v=1\" } }; j.onclick = function () { var k = window._bap_p_overrides; function d(n, q) { var o = h.getElementsByTagName(\"head\")[0] || h.documentElement, m = a, l = h.createElement(\"script\"); function p() { l.onload = l.onreadystatechange = null; o.removeChild(l); q() } l.src = n; l.onreadystatechange = function () { if (!m && (this.readyState == \"loaded\" || this.readyState == \"complete\")) { m = true; p() } }; l.onload = p; o.insertBefore(l, o.firstChild) } if (k && k.hasOwnProperty(g)) { if (k[g].new_window) { b(\"c\"); return true } } this.onclick = \"return \" + a; d(f + \"ajax.googleapis.com/ajax/libs/jquery/1.6.4/jquery.min.js\", function () { d(c + \"pub2.js\", function () { BAPW.i(j, { pid: g, ocid: i }) }) }); return a }; b(\"i\") }()); var _bap_p_overrides = _bap_p_overrides || {}; _bap_p_overrides[282] = { new_window: true };</script>\n</ul>\n</article>\n<article class=\"grid-col grid-col--tiles\">\n<ul>\n<li>More Allrecipes</li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > ar magazine subscribe\"}' href=\"https://www.magazine.store/allrecipes-magazine/\" id=\"footer_magazine\" rel=\"noopener\">Allrecipes Magazine <span><span>–</span> Subscribe</span></a></li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > apps\"}' href=\"http://dish.allrecipes.com/mobile-apps\" id=\"footer_apps\" rel=\"noopener\">Allrecipes Apps</a></li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > food wishes videos\"}' href=\"http://youtube.com/foodwishes\" id=\"footer_foodwishes\" rel=\"noopener\">Food Wishes Videos</a></li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > ar blog\"}' href=\"http://press.allrecipes.com/blog/\" id=\"footer_blog\" rel=\"noopener\">The Allrecipes Blog</a></li>\n</ul>\n</article>\n</section>\n<script type=\"text/javascript\">\r\n function changesite(value) {\r\n window.location = value;\r\n }\r\n\r\n \r\n function readCookieValue(cookieName) {\r\n var cookieValueRegex = new RegExp('(?:(?:^|.*;\\\\s*)' + cookieName + '\\\\s*\\\\=\\\\s*([^;]*).*$)|^.*$');\r\n var cookieValue = document.cookie.replace(cookieValueRegex, \"$1\");\r\n return cookieValue; // empty string, if cookie not found\r\n }\r\n\r\n \r\n var dsarUrl = \"https://app-de.onetrust.com/app/#/webform/0c410d51-8e85-4308-9bb9-37c24a461ccb?\";\r\n\r\n var dsarUserId = readCookieValue(\"euConsentId\");\r\n if (!dsarUserId) {\r\n dsarUserId = readCookieValue(\"globalTI_SID\");\r\n }\r\n if (dsarUserId) {\r\n dsarUrl += 'uid=' + dsarUserId + '&';\r\n }\r\n\r\n var siteDomain = document.domain.replace(/^.*\\.([^.]+\\.\\w+)$/i, \"$1\");\r\n dsarUrl += 'domain=' + siteDomain;\r\n\r\n document.getElementById(\"footer_dsar\").href = dsarUrl;\r\n\r\n \r\n var notificationDsar = document.getElementById(\"privacy-notification_dsar\");\r\n if (notificationDsar != null) {\r\n notificationDsar.href = dsarUrl;\r\n }\r\n\r\n window.addEventListener(\"load\",\r\n function() {\r\n window.segmentAnalytics.identify(dsarUserId, window.dataLayer);\r\n });\r\n</script>\n<div data-ng-controller=\"ar_controllers_deferredAction\" data-ng-init=\"wireupAdIntegrationListeners();executePostLoginEvents();\">\n<ar-notification></ar-notification>\n<div data-loading-indicator=\"\"></div>\n</div>\n</footer>\n</div>\n</div>\n<div class=\"ad-footer--fixed\" id=\"ad-footer\">\n<div data-tier=\"1\" id=\"div-gpt-mob-adhesive-banner-fixed\"></div>\n</div>\n<script>\r\n window.Toggles={\"AdTest\":false,\"RecipePreferences\":true,\"AzureRelatedcontentRecipes\":true,\"RdpRightRailRecommendations\":true,\"RecipePagePerf\":true,\"StreamsTest\":true,\"TastePrefOverlays\":true,\"RdpTasteCarousel\":true,\"MonetizedIngredients\":false,\"FixedGrid\":true,\"VideoWatchIcon\":true,\"reCaptcha\":false,\"Optimizely\":false,\"WatchVideoRDP\":false,\"SearchAB\":false,\"EUCheck\":true,\"ShowTasteSuperCard\":true,\"Pushly\":true};\r\n\r\n\r\n var enviromentOmnitureId = 'rdirdallrecipes';\r\n var pubsub = new Pubsub();\r\n </script>\n<script src=\"https://secureimages.allrecipes.com/assets/deployables/v-1.185.0.5222/analytics.bundled.js\"></script>\n<!-- script_analyticstag -->\n<script>\r\n\r\n\r\n\r\n function completed(whenReady) {\r\n document.removeEventListener(\"DOMContentLoaded\", completed);\r\n window.removeEventListener(\"load\", completed);\r\n if (whenReady) {\r\n whenReady();\r\n }\r\n }\r\n\r\n function googAnalytics() {\r\n pubsub.broadcast(\"GoogleAnalytics\");\r\n }\r\n\r\n document.addEventListener(\"DOMContentLoaded\", completed(googAnalytics)); // use the handy event callback\r\n window.addEventListener(\"load\", completed(googAnalytics)); // a fallback to window.onload, that will always work\r\n\r\n if (typeof (window.dataLayer) !== \"undefined\" && dataLayer) {\r\n var clientAnalytics = new ClientAnalytics(window.dataLayer);\r\n var comscoreShim = new ComscoreShim(window.dataLayer, pubsub);\r\n var omniShim = new OmnitureShim(window.dataLayer, s, pubsub);\r\n var kruxShim = new KruxShim(window.dataLayer, pubsub);\r\n\r\n }\r\n </script>\n<script type=\"text/javascript\">\r\n window.Toggles={\"AdTest\":false,\"RecipePreferences\":true,\"AzureRelatedcontentRecipes\":true,\"RdpRightRailRecommendations\":true,\"RecipePagePerf\":true,\"StreamsTest\":true,\"TastePrefOverlays\":true,\"RdpTasteCarousel\":true,\"MonetizedIngredients\":false,\"FixedGrid\":true,\"VideoWatchIcon\":true,\"reCaptcha\":false,\"Optimizely\":false,\"WatchVideoRDP\":false,\"SearchAB\":false,\"EUCheck\":true,\"ShowTasteSuperCard\":true,\"Pushly\":true};\r\n\r\n var enviromentOmnitureId = 'rdirdallrecipes';\r\n\r\n try {\r\n var thirtyMinutesInMilliseconds = 1800000;\r\n window.localStorage.setItem(\"CurrentUserStateModel\", ''); //primarily used by private profile SPA, but pertains to current user in general\r\n window.localStorage.setItem(\"PublicProfileStateModel\", ''); //used by public profile SPA\r\n window.localStorage.setItem(\"CurrentUserStateModelExpirationDate\", Date.now() + thirtyMinutesInMilliseconds); //primarily used by private profile SPA, but pertains to current user in general\r\n window.localStorage.setItem(\"PublicProfileStateModelExpirationDate\", Date.now() + thirtyMinutesInMilliseconds); //used by public profile SPA\r\n } catch(err) {\r\n var CurrentUserStateCookie ='';\r\n var PublicProfileStateCookie = '';\r\n document.cookie = \"CurrentUserStateModel=\" + CurrentUserStateCookie;\r\n document.cookie = \"PublicProfileStateModel=\" + PublicProfileStateCookie;\r\n }\r\n\r\n </script>\n<script src=\"https://secureimages.allrecipes.com/assets/deployables/v-1.185.0.5222/main-bottom.bundled.js\"></script>\n<script src=\"https://secureimages.allrecipes.com/assets/deployables/v-1.185.0.5222/main-bottom-templates.bundled.js\"></script>\n<script>\r\n angular.module('allrecipes')\r\n .constant('Constant', {\r\n 'version': '1.185.0.5222'\r\n });\r\n </script>\n<!-- Begin comScore Tag - Part 1 -->\n<script id=\"script_comscore\">\r\n var _comscore = _comscore || [];\r\n _comscore.push({ c1: \"2\", c2: \"6036305\", cs_ucfr: \"1\" });\r\n (function () {\r\n var s = document.createElement(\"script\"), el = document.getElementsByTagName(\"script\")[0];\r\n s.async = true;\r\n s.src = (document.location.protocol == \"https:\" ? \"https://sb\" : \"http://b\") + \".scorecardresearch.com/beacon.js\";\r\n el.parentNode.insertBefore(s, el);\r\n })();\r\n </script>\n<!-- End comScore Tag Part 1-->\n<script>\r\n (function (d) {\r\n var e = d.createElement('script');\r\n e.src = d.location.protocol + '//tag.bounceexchange.com/2602/i.js';\r\n e.async = true;\r\n d.getElementsByTagName(\"head\")[0].appendChild(e);\r\n }(document));\r\n </script>\n<!-- script_facebookpixel -->\n<script>\r\n AR.FacebookPixel.init();\r\n </script>\n<!-- OneTrust Cookies Consent Notice start -->\n<script type=\"text/javascript\">\r\n // Arguments passed to this function:\r\n // OneTrust ID (required): The OneTrust-assigned ID for this site.\r\n // Environment (optional): Test environment is assumed. Specify 'prod' for production.\r\n (function (otid, env) {\r\n const d = window.document;\r\n const otscript = d.createElement('script');\r\n const firstscr = d.getElementsByTagName('script')[0];\r\n otscript.src = 'https://cdn.cookielaw.org/scripttemplates/otSDKStub.js';\r\n otscript.type = 'text/javascript';\r\n otscript.charset = 'UTF-8';\r\n otscript.async = true;\r\n otscript.setAttribute('data-domain-script', otid + (env != 'prod' ? '-test' : ''));\r\n firstscr.parentNode.insertBefore(otscript, firstscr);\r\n }('63a0b6bc-e912-4c8d-adfd-3b8a4b698c6c', 'prod'));\r\n </script>\n<script type=\"text/javascript\">\r\n function OptanonWrapper() { }\r\n </script>\n<!-- OneTrust Cookies Consent Notice end -->\n<!-- Scoby Telemetry snippet script_scobytelemetry -->\n<script src=\"https://moprd-cdnservice-uw1.azureedge.net/telemetryapi/1/telemetry.js\"></script>\n<!-- End Scoby Telemetry snippet -->\n<!-- script_adobetagmanager-->\n<script src=\"//assets.adobedtm.com/1c2ad567a53f27e563c4dc2c278a904b84dc5fde/satelliteLib-a07d47e4668bf3c3fa98aff5b2fc6d3f1d0981a3-staging.js\"></script>\n<script type=\"text/javascript\">_satellite.pageBottom(); // Initialize Adobe DTM</script>\n<div id=\"dsapp-is-tablet\"></div>\n<script type=\"text/javascript\">\r\n var testStringVersion = 'True';\r\n </script>\n<script type=\"text/javascript\">\r\n (function(b,r,a,n,c,h,_,s,d,k){if(!b[n]||!b[n]._q){for(;s<_.length;)c(h,_[s++]);d=r.createElement(a);d.async=1;d.src=\"https://cdn.branch.io/branch-latest.min.js\";k=r.getElementsByTagName(a)[0];k.parentNode.insertBefore(d,k);b[n]=h}})(window,document,\"script\",\"branch\",function(b,r){b[r]=function(){b._q.push([r,arguments])}},{_q:[],_v:1},\"addListener applyCode banner closeBanner creditHistory credits data deepview deepviewCta first getCode init link logout redeem referrals removeListener sendSMS setBranchViewData setIdentity track validateCode\".split(\" \"), 0);\r\n branch.init('key_live_dcvcpHkps9BjZy4HCivJjpdewCg0PjvK');\r\n branch.setBranchViewData({\r\n data: {\r\n '$deeplink_path': '/account/signin/'\r\n }});\r\n\r\n branch.addListener('didShowJourney', function(event) {\r\n var journeysBanner = document.getElementById('branch-banner-iframe');\r\n if (!journeysBanner || !journeysBanner.style) { // don't run if the journey doesn't exist\r\n return;\r\n }\r\n var topPosition = journeysBanner.style.top;\r\n var position = window.getComputedStyle(journeysBanner).getPropertyValue('position');\r\n var bannerHeight = window.getComputedStyle(journeysBanner).getPropertyValue('height');\r\n if (topPosition === '0px' && position !== 'fixed') { // if its a top, inline journey\r\n journeysBanner.style.top = '-' + bannerHeight; // shift the banner upward by the height\r\n }\r\n }); // fires as soon as a journey is being shown\r\n </script>\n</body>\n</html>\n\n"
]
],
[
[
"### Retrieving the form tag that contains the sign in inputs",
"_____no_output_____"
]
],
[
[
"signin_form = signin_soup.find('form', attrs = {'name':'signinForm'})",
"_____no_output_____"
]
],
[
[
"### Getting the input tag containing the 'token'",
"_____no_output_____"
]
],
[
[
"input_token = signin_form.find('input', attrs = {'name':'SocialCsrfToken'})",
"_____no_output_____"
]
],
[
[
"### Printing the value of the token",
"_____no_output_____"
]
],
[
[
"token_to_use = input_token.get('value')",
"_____no_output_____"
]
],
[
[
"### Pause between 2 requests",
"_____no_output_____"
]
],
[
[
"time.sleep(1)",
"_____no_output_____"
]
],
[
[
"### Get cookies",
"_____no_output_____"
]
],
[
[
"session_request = requests.session()",
"_____no_output_____"
]
],
[
[
"### Email used to sign-in",
"_____no_output_____"
]
],
[
[
"email_used = '[email protected]'",
"_____no_output_____"
]
],
[
[
"### Password used to sign-in",
"_____no_output_____"
]
],
[
[
"password_used = '12345678aA'",
"_____no_output_____"
]
],
[
[
"### Storing the data to be parsed to stay logged in",
"_____no_output_____"
]
],
[
[
"form_data = {'ReferringType':'',\n 'ReferringUrl': 'https://www.allrecipes.com/',\n 'ReferringAction': '',\n 'ReferringParams': '',\n 'AuthLayoutMode': 'Standard',\n 'SocialCsrfToken': token_to_use,\n 'txtUserNameOrEmail': email_used,\n 'password': password_used,\n 'remeberMe': 'on'} ",
"_____no_output_____"
]
],
[
[
"### Loging into the website and timing out after 15 seconds",
"_____no_output_____"
]
],
[
[
"loged_in = session_request.post(signin_url,\n data = form_data,\n headers = dict(ReferringUrl= 'https://www.allrecipes.com/'),\n timeout = 15\n )",
"_____no_output_____"
]
],
[
[
"### Getting the code of the webpage",
"_____no_output_____"
]
],
[
[
"logged_in_page = session_request.get(signin_url,\n headers = dict(ReferringUrl= signin_url))",
"_____no_output_____"
]
],
[
[
"### Storing the code in a BeautifulSoup object",
"_____no_output_____"
]
],
[
[
"logged_in_soup = BeautifulSoup(logged_in_page.content, 'html.parser')",
"_____no_output_____"
]
],
[
[
"### Printing the BeautifulSoup object",
"_____no_output_____"
]
],
[
[
"print(logged_in_soup)",
"\n<!DOCTYPE html>\n\n<html lang=\"en-us\">\n<head>\n<title>Allrecipes | Food, friends, and recipe inspiration</title>\n<script async=\"true\" src=\"https://secureimages.allrecipes.com/assets/deployables/v-1.185.0.5222/karma.bundled.js\"></script>\n<!--Make our website baseUrl available to the client-side code-->\n<script type=\"text/javascript\">\r\n var AR = AR || {};\r\n\r\n AR.segmentWriteKey = \"RnmsxUrjIjM7W62olfjKgJrcsVlxe68V\";\r\n AR.baseWebsiteUrl = 'https://www.allrecipes.com';\r\nwindow.dataLayer={\"version\":\"1.0\",\"pageInstanceId\":\"www.allrecipes.com/\",\"externalLinkId\":\"\",\"page\":{\"pageInfo\":{\"pageId\":\"\",\"pageName\":\"/\",\"destinationUrl\":\"https://www.allrecipes.com/\",\"sysEnv\":\"RD0003FFA88238\",\"variant\":\"Control\",\"version\":\"\",\"issueDate\":\"01/24/2020 00:52:37\",\"effectiveDate\":\"01/24/2020 00:52:37\",\"domain\":\"www.allrecipes.com\",\"parameters\":{}},\"category\":{\"primaryCategory\":\"home page\",\"contentType\":\"home page\",\"subContentType\":\"\",\"adZone\":\"\",\"adKeys\":\"status=freeloggedin;oid=;fit=0;id=1\",\"contentSource\":\"\"},\"attributes\":{\"contentId\":\"\",\"title\":\"\",\"country\":\"USA\"}},\"event\":[],\"user\":[{\"analyticsId\":\"\",\"segment\":{\"isLoggedIn\":true,\"adStatus\":\"freeloggedin\",\"visitorType\":\"free\",\"loginStatus\":\"yes\"},\"profile\":[{\"profileInfo\":{\"profileId\":\"26921828\",\"userName\":\"Test\",\"loginType\":\"Allrecipes\"}}],\"magFollower\":false}],\"newsletter\":{\"mailingId\":\"\",\"mailingName\":\"\",\"mailingDate\":\"\",\"mailingLinkGroup\":\"\",\"mailingLinkName\":\"\"},\"pageImpressionTraceList\":[\"rc-10813\",\"rc-213268\",\"rc-214614\",\"rc-23600\",\"rc-236867\",\"rc-237411\",\"rc-255253\",\"rc-25690\",\"rc-260852\",\"rc-269574\",\"rc-269592\",\"rc-270200\",\"rc-274966\",\"rc-275055\",\"rc-275451\",\"rc-277292\",\"rc-6903\",\"rc-70163\",\"rc-71722\",\"rc-82954\"]}; </script>\n<script type=\"text/javascript\">\r\n //Remove Ref_Hub from session after first recipe visited\r\n var hubId = window.sessionStorage[\"Ref_Hub_Id\"];\r\n var count = window.sessionStorage[\"Ref_Hub_Recipe_Count\"];\r\n if (hubId && count) {\r\n if (count > 0) {\r\n window.sessionStorage.removeItem(\"Ref_Hub_Id\");\r\n window.sessionStorage.removeItem(\"Ref_Hub_Recipe_Count\");\r\n }\r\n }\r\n </script>\n<meta content=\"Allrecipes | Food, friends, and recipe inspiration\" property=\"og:title\"/>\n<meta content=\"Allrecipes\" property=\"og:site_name\"/>\n<meta charset=\"utf-8\"/>\n<meta content=\"width=device-width, initial-scale=1.0\" name=\"viewport\"/>\n<meta content=\"Find and share everyday cooking inspiration on Allrecipes. Discover recipes, cooks, videos, and how-tos based on the food you love and the friends you follow.\" id=\"metaDescription\" name=\"description\"/>\n<meta content=\"Find and share everyday cooking inspiration on Allrecipes. Discover recipes, cooks, videos, and how-tos based on the food you love and the friends you follow.\" property=\"og:description\">\n<meta content=\"noodp,noydir\" name=\"robots\"/>\n<link href=\"https://www.allrecipes.com/\" id=\"canonicalUrl\" rel=\"canonical\"/>\n<meta content=\"https://www.allrecipes.com/\" property=\"og:url\"/>\n<link href=\"https://secureimages.allrecipes.com/assets/deployables/v-1.185.0.5222/main-css.bundled.Css\" rel=\"stylesheet\"/>\n<link href=\"https://www.allrecipes.com/?page=2\" rel=\"next\"/>\n<meta content=\"66102450266\" property=\"fb:app_id\"/>\n<meta content=\"71158748377\" property=\"fb:pages\"/>\n<script type=\"text/javascript\">window.NREUM||(NREUM={});NREUM.info = {\"beacon\":\"bam.nr-data.net\",\"errorBeacon\":\"bam.nr-data.net\",\"licenseKey\":\"55db0cb698\",\"applicationID\":\"117742538,90586318\",\"transactionName\":\"YwABYUUDXUIABRZbCVpKIllbEFZSCBYHQTFRBxBcQwcceQ4LB3EJWhERWlsOVkNPLwxWA0xNSg==\",\"queueTime\":0,\"applicationTime\":136,\"agent\":\"\",\"atts\":\"\"}</script><script type=\"text/javascript\">(window.NREUM||(NREUM={})).init={distributed_tracing:{enabled:true}};(window.NREUM||(NREUM={})).loader_config={agentID:\"117745958\",accountID:\"989419\",trustKey:\"659849\",xpid:\"XQ4OVVdaGwIBU1ZWBQQEXg==\",licenseKey:\"55db0cb698\",applicationID:\"117742538\"};window.NREUM||(NREUM={}),__nr_require=function(t,e,n){function r(n){if(!e[n]){var o=e[n]={exports:{}};t[n][0].call(o.exports,function(e){var o=t[n][1][e];return r(o||e)},o,o.exports)}return e[n].exports}if(\"function\"==typeof __nr_require)return __nr_require;for(var o=0;o<n.length;o++)r(n[o]);return r}({1:[function(t,e,n){function r(t){try{s.console&&console.log(t)}catch(e){}}var o,i=t(\"ee\"),a=t(24),s={};try{o=localStorage.getItem(\"__nr_flags\").split(\",\"),console&&\"function\"==typeof console.log&&(s.console=!0,o.indexOf(\"dev\")!==-1&&(s.dev=!0),o.indexOf(\"nr_dev\")!==-1&&(s.nrDev=!0))}catch(c){}s.nrDev&&i.on(\"internal-error\",function(t){r(t.stack)}),s.dev&&i.on(\"fn-err\",function(t,e,n){r(n.stack)}),s.dev&&(r(\"NR AGENT IN DEVELOPMENT MODE\"),r(\"flags: \"+a(s,function(t,e){return t}).join(\", \")))},{}],2:[function(t,e,n){function r(t,e,n,r,s){try{l?l-=1:o(s||new UncaughtException(t,e,n),!0)}catch(f){try{i(\"ierr\",[f,c.now(),!0])}catch(d){}}return\"function\"==typeof u&&u.apply(this,a(arguments))}function UncaughtException(t,e,n){this.message=t||\"Uncaught error with no additional information\",this.sourceURL=e,this.line=n}function o(t,e){var n=e?null:c.now();i(\"err\",[t,n])}var i=t(\"handle\"),a=t(25),s=t(\"ee\"),c=t(\"loader\"),f=t(\"gos\"),u=window.onerror,d=!1,p=\"nr@seenError\",l=0;c.features.err=!0,t(1),window.onerror=r;try{throw new Error}catch(h){\"stack\"in h&&(t(13),t(12),\"addEventListener\"in window&&t(6),c.xhrWrappable&&t(14),d=!0)}s.on(\"fn-start\",function(t,e,n){d&&(l+=1)}),s.on(\"fn-err\",function(t,e,n){d&&!n[p]&&(f(n,p,function(){return!0}),this.thrown=!0,o(n))}),s.on(\"fn-end\",function(){d&&!this.thrown&&l>0&&(l-=1)}),s.on(\"internal-error\",function(t){i(\"ierr\",[t,c.now(),!0])})},{}],3:[function(t,e,n){t(\"loader\").features.ins=!0},{}],4:[function(t,e,n){function r(){L++,C=g.hash,this[u]=y.now()}function o(){L--,g.hash!==C&&i(0,!0);var t=y.now();this[h]=~~this[h]+t-this[u],this[d]=t}function i(t,e){E.emit(\"newURL\",[\"\"+g,e])}function a(t,e){t.on(e,function(){this[e]=y.now()})}var s=\"-start\",c=\"-end\",f=\"-body\",u=\"fn\"+s,d=\"fn\"+c,p=\"cb\"+s,l=\"cb\"+c,h=\"jsTime\",m=\"fetch\",v=\"addEventListener\",w=window,g=w.location,y=t(\"loader\");if(w[v]&&y.xhrWrappable){var x=t(10),b=t(11),E=t(8),O=t(6),R=t(13),P=t(7),N=t(14),T=t(9),M=t(\"ee\"),S=M.get(\"tracer\");t(16),y.features.spa=!0;var C,L=0;M.on(u,r),M.on(p,r),M.on(d,o),M.on(l,o),M.buffer([u,d,\"xhr-done\",\"xhr-resolved\"]),O.buffer([u]),R.buffer([\"setTimeout\"+c,\"clearTimeout\"+s,u]),N.buffer([u,\"new-xhr\",\"send-xhr\"+s]),P.buffer([m+s,m+\"-done\",m+f+s,m+f+c]),E.buffer([\"newURL\"]),x.buffer([u]),b.buffer([\"propagate\",p,l,\"executor-err\",\"resolve\"+s]),S.buffer([u,\"no-\"+u]),T.buffer([\"new-jsonp\",\"cb-start\",\"jsonp-error\",\"jsonp-end\"]),a(N,\"send-xhr\"+s),a(M,\"xhr-resolved\"),a(M,\"xhr-done\"),a(P,m+s),a(P,m+\"-done\"),a(T,\"new-jsonp\"),a(T,\"jsonp-end\"),a(T,\"cb-start\"),E.on(\"pushState-end\",i),E.on(\"replaceState-end\",i),w[v](\"hashchange\",i,!0),w[v](\"load\",i,!0),w[v](\"popstate\",function(){i(0,L>1)},!0)}},{}],5:[function(t,e,n){function r(t){}if(window.performance&&window.performance.timing&&window.performance.getEntriesByType){var o=t(\"ee\"),i=t(\"handle\"),a=t(13),s=t(12),c=\"learResourceTimings\",f=\"addEventListener\",u=\"resourcetimingbufferfull\",d=\"bstResource\",p=\"resource\",l=\"-start\",h=\"-end\",m=\"fn\"+l,v=\"fn\"+h,w=\"bstTimer\",g=\"pushState\",y=t(\"loader\");y.features.stn=!0,t(8),\"addEventListener\"in window&&t(6);var x=NREUM.o.EV;o.on(m,function(t,e){var n=t[0];n instanceof x&&(this.bstStart=y.now())}),o.on(v,function(t,e){var n=t[0];n instanceof x&&i(\"bst\",[n,e,this.bstStart,y.now()])}),a.on(m,function(t,e,n){this.bstStart=y.now(),this.bstType=n}),a.on(v,function(t,e){i(w,[e,this.bstStart,y.now(),this.bstType])}),s.on(m,function(){this.bstStart=y.now()}),s.on(v,function(t,e){i(w,[e,this.bstStart,y.now(),\"requestAnimationFrame\"])}),o.on(g+l,function(t){this.time=y.now(),this.startPath=location.pathname+location.hash}),o.on(g+h,function(t){i(\"bstHist\",[location.pathname+location.hash,this.startPath,this.time])}),f in window.performance&&(window.performance[\"c\"+c]?window.performance[f](u,function(t){i(d,[window.performance.getEntriesByType(p)]),window.performance[\"c\"+c]()},!1):window.performance[f](\"webkit\"+u,function(t){i(d,[window.performance.getEntriesByType(p)]),window.performance[\"webkitC\"+c]()},!1)),document[f](\"scroll\",r,{passive:!0}),document[f](\"keypress\",r,!1),document[f](\"click\",r,!1)}},{}],6:[function(t,e,n){function r(t){for(var e=t;e&&!e.hasOwnProperty(u);)e=Object.getPrototypeOf(e);e&&o(e)}function o(t){s.inPlace(t,[u,d],\"-\",i)}function i(t,e){return t[1]}var a=t(\"ee\").get(\"events\"),s=t(\"wrap-function\")(a,!0),c=t(\"gos\"),f=XMLHttpRequest,u=\"addEventListener\",d=\"removeEventListener\";e.exports=a,\"getPrototypeOf\"in Object?(r(document),r(window),r(f.prototype)):f.prototype.hasOwnProperty(u)&&(o(window),o(f.prototype)),a.on(u+\"-start\",function(t,e){var n=t[1],r=c(n,\"nr@wrapped\",function(){function t(){if(\"function\"==typeof n.handleEvent)return n.handleEvent.apply(n,arguments)}var e={object:t,\"function\":n}[typeof n];return e?s(e,\"fn-\",null,e.name||\"anonymous\"):n});this.wrapped=t[1]=r}),a.on(d+\"-start\",function(t){t[1]=this.wrapped||t[1]})},{}],7:[function(t,e,n){function r(t,e,n){var r=t[e];\"function\"==typeof r&&(t[e]=function(){var t=i(arguments),e={};o.emit(n+\"before-start\",[t],e);var a;e[m]&&e[m].dt&&(a=e[m].dt);var s=r.apply(this,t);return o.emit(n+\"start\",[t,a],s),s.then(function(t){return o.emit(n+\"end\",[null,t],s),t},function(t){throw o.emit(n+\"end\",[t],s),t})})}var o=t(\"ee\").get(\"fetch\"),i=t(25),a=t(24);e.exports=o;var s=window,c=\"fetch-\",f=c+\"body-\",u=[\"arrayBuffer\",\"blob\",\"json\",\"text\",\"formData\"],d=s.Request,p=s.Response,l=s.fetch,h=\"prototype\",m=\"nr@context\";d&&p&&l&&(a(u,function(t,e){r(d[h],e,f),r(p[h],e,f)}),r(s,\"fetch\",c),o.on(c+\"end\",function(t,e){var n=this;if(e){var r=e.headers.get(\"content-length\");null!==r&&(n.rxSize=r),o.emit(c+\"done\",[null,e],n)}else o.emit(c+\"done\",[t],n)}))},{}],8:[function(t,e,n){var r=t(\"ee\").get(\"history\"),o=t(\"wrap-function\")(r);e.exports=r;var i=window.history&&window.history.constructor&&window.history.constructor.prototype,a=window.history;i&&i.pushState&&i.replaceState&&(a=i),o.inPlace(a,[\"pushState\",\"replaceState\"],\"-\")},{}],9:[function(t,e,n){function r(t){function e(){c.emit(\"jsonp-end\",[],p),t.removeEventListener(\"load\",e,!1),t.removeEventListener(\"error\",n,!1)}function n(){c.emit(\"jsonp-error\",[],p),c.emit(\"jsonp-end\",[],p),t.removeEventListener(\"load\",e,!1),t.removeEventListener(\"error\",n,!1)}var r=t&&\"string\"==typeof t.nodeName&&\"script\"===t.nodeName.toLowerCase();if(r){var o=\"function\"==typeof t.addEventListener;if(o){var a=i(t.src);if(a){var u=s(a),d=\"function\"==typeof u.parent[u.key];if(d){var p={};f.inPlace(u.parent,[u.key],\"cb-\",p),t.addEventListener(\"load\",e,!1),t.addEventListener(\"error\",n,!1),c.emit(\"new-jsonp\",[t.src],p)}}}}}function o(){return\"addEventListener\"in window}function i(t){var e=t.match(u);return e?e[1]:null}function a(t,e){var n=t.match(p),r=n[1],o=n[3];return o?a(o,e[r]):e[r]}function s(t){var e=t.match(d);return e&&e.length>=3?{key:e[2],parent:a(e[1],window)}:{key:t,parent:window}}var c=t(\"ee\").get(\"jsonp\"),f=t(\"wrap-function\")(c);if(e.exports=c,o()){var u=/[?&](?:callback|cb)=([^&#]+)/,d=/(.*)\\.([^.]+)/,p=/^(\\w+)(\\.|$)(.*)$/,l=[\"appendChild\",\"insertBefore\",\"replaceChild\"];Node&&Node.prototype&&Node.prototype.appendChild?f.inPlace(Node.prototype,l,\"dom-\"):(f.inPlace(HTMLElement.prototype,l,\"dom-\"),f.inPlace(HTMLHeadElement.prototype,l,\"dom-\"),f.inPlace(HTMLBodyElement.prototype,l,\"dom-\")),c.on(\"dom-start\",function(t){r(t[0])})}},{}],10:[function(t,e,n){var r=t(\"ee\").get(\"mutation\"),o=t(\"wrap-function\")(r),i=NREUM.o.MO;e.exports=r,i&&(window.MutationObserver=function(t){return this instanceof i?new i(o(t,\"fn-\")):i.apply(this,arguments)},MutationObserver.prototype=i.prototype)},{}],11:[function(t,e,n){function r(t){var e=a.context(),n=s(t,\"executor-\",e),r=new f(n);return a.context(r).getCtx=function(){return e},a.emit(\"new-promise\",[r,e],e),r}function o(t,e){return e}var i=t(\"wrap-function\"),a=t(\"ee\").get(\"promise\"),s=i(a),c=t(24),f=NREUM.o.PR;e.exports=a,f&&(window.Promise=r,[\"all\",\"race\"].forEach(function(t){var e=f[t];f[t]=function(n){function r(t){return function(){a.emit(\"propagate\",[null,!o],i),o=o||!t}}var o=!1;c(n,function(e,n){Promise.resolve(n).then(r(\"all\"===t),r(!1))});var i=e.apply(f,arguments),s=f.resolve(i);return s}}),[\"resolve\",\"reject\"].forEach(function(t){var e=f[t];f[t]=function(t){var n=e.apply(f,arguments);return t!==n&&a.emit(\"propagate\",[t,!0],n),n}}),f.prototype[\"catch\"]=function(t){return this.then(null,t)},f.prototype=Object.create(f.prototype,{constructor:{value:r}}),c(Object.getOwnPropertyNames(f),function(t,e){try{r[e]=f[e]}catch(n){}}),a.on(\"executor-start\",function(t){t[0]=s(t[0],\"resolve-\",this),t[1]=s(t[1],\"resolve-\",this)}),a.on(\"executor-err\",function(t,e,n){t[1](n)}),s.inPlace(f.prototype,[\"then\"],\"then-\",o),a.on(\"then-start\",function(t,e){this.promise=e,t[0]=s(t[0],\"cb-\",this),t[1]=s(t[1],\"cb-\",this)}),a.on(\"then-end\",function(t,e,n){this.nextPromise=n;var r=this.promise;a.emit(\"propagate\",[r,!0],n)}),a.on(\"cb-end\",function(t,e,n){a.emit(\"propagate\",[n,!0],this.nextPromise)}),a.on(\"propagate\",function(t,e,n){this.getCtx&&!e||(this.getCtx=function(){if(t instanceof Promise)var e=a.context(t);return e&&e.getCtx?e.getCtx():this})}),r.toString=function(){return\"\"+f})},{}],12:[function(t,e,n){var r=t(\"ee\").get(\"raf\"),o=t(\"wrap-function\")(r),i=\"equestAnimationFrame\";e.exports=r,o.inPlace(window,[\"r\"+i,\"mozR\"+i,\"webkitR\"+i,\"msR\"+i],\"raf-\"),r.on(\"raf-start\",function(t){t[0]=o(t[0],\"fn-\")})},{}],13:[function(t,e,n){function r(t,e,n){t[0]=a(t[0],\"fn-\",null,n)}function o(t,e,n){this.method=n,this.timerDuration=isNaN(t[1])?0:+t[1],t[0]=a(t[0],\"fn-\",this,n)}var i=t(\"ee\").get(\"timer\"),a=t(\"wrap-function\")(i),s=\"setTimeout\",c=\"setInterval\",f=\"clearTimeout\",u=\"-start\",d=\"-\";e.exports=i,a.inPlace(window,[s,\"setImmediate\"],s+d),a.inPlace(window,[c],c+d),a.inPlace(window,[f,\"clearImmediate\"],f+d),i.on(c+u,r),i.on(s+u,o)},{}],14:[function(t,e,n){function r(t,e){d.inPlace(e,[\"onreadystatechange\"],\"fn-\",s)}function o(){var t=this,e=u.context(t);t.readyState>3&&!e.resolved&&(e.resolved=!0,u.emit(\"xhr-resolved\",[],t)),d.inPlace(t,g,\"fn-\",s)}function i(t){y.push(t),h&&(b?b.then(a):v?v(a):(E=-E,O.data=E))}function a(){for(var t=0;t<y.length;t++)r([],y[t]);y.length&&(y=[])}function s(t,e){return e}function c(t,e){for(var n in t)e[n]=t[n];return e}t(6);var f=t(\"ee\"),u=f.get(\"xhr\"),d=t(\"wrap-function\")(u),p=NREUM.o,l=p.XHR,h=p.MO,m=p.PR,v=p.SI,w=\"readystatechange\",g=[\"onload\",\"onerror\",\"onabort\",\"onloadstart\",\"onloadend\",\"onprogress\",\"ontimeout\"],y=[];e.exports=u;var x=window.XMLHttpRequest=function(t){var e=new l(t);try{u.emit(\"new-xhr\",[e],e),e.addEventListener(w,o,!1)}catch(n){try{u.emit(\"internal-error\",[n])}catch(r){}}return e};if(c(l,x),x.prototype=l.prototype,d.inPlace(x.prototype,[\"open\",\"send\"],\"-xhr-\",s),u.on(\"send-xhr-start\",function(t,e){r(t,e),i(e)}),u.on(\"open-xhr-start\",r),h){var b=m&&m.resolve();if(!v&&!m){var E=1,O=document.createTextNode(E);new h(a).observe(O,{characterData:!0})}}else f.on(\"fn-end\",function(t){t[0]&&t[0].type===w||a()})},{}],15:[function(t,e,n){function r(t){if(!i(t))return null;var e=window.NREUM;if(!e.loader_config)return null;var n=(e.loader_config.accountID||\"\").toString()||null,r=(e.loader_config.agentID||\"\").toString()||null,s=(e.loader_config.trustKey||\"\").toString()||null;if(!n||!r)return null;var c=a.generateCatId(),f=a.generateCatId(),u=Date.now(),d=o(c,f,u,n,r,s);return{header:d,guid:c,traceId:f,timestamp:u}}function o(t,e,n,r,o,i){var a=\"btoa\"in window&&\"function\"==typeof window.btoa;if(!a)return null;var s={v:[0,1],d:{ty:\"Browser\",ac:r,ap:o,id:t,tr:e,ti:n}};return i&&r!==i&&(s.d.tk=i),btoa(JSON.stringify(s))}function i(t){var e=!1,n=!1,r={};if(\"init\"in NREUM&&\"distributed_tracing\"in NREUM.init&&(r=NREUM.init.distributed_tracing,n=!!r.enabled),n)if(t.sameOrigin)e=!0;else if(r.allowed_origins instanceof Array)for(var o=0;o<r.allowed_origins.length;o++){var i=s(r.allowed_origins[o]);if(t.hostname===i.hostname&&t.protocol===i.protocol&&t.port===i.port){e=!0;break}}return n&&e}var a=t(22),s=t(17);e.exports={generateTracePayload:r,shouldGenerateTrace:i}},{}],16:[function(t,e,n){function r(t){var e=this.params,n=this.metrics;if(!this.ended){this.ended=!0;for(var r=0;r<l;r++)t.removeEventListener(p[r],this.listener,!1);e.aborted||(n.duration=s.now()-this.startTime,this.loadCaptureCalled||4!==t.readyState?null==e.status&&(e.status=0):a(this,t),n.cbTime=this.cbTime,d.emit(\"xhr-done\",[t],t),c(\"xhr\",[e,n,this.startTime]))}}function o(t,e){var n=t.responseType;if(\"json\"===n&&null!==e)return e;var r=\"arraybuffer\"===n||\"blob\"===n||\"json\"===n?t.response:t.responseText;return v(r)}function i(t,e){var n=f(e),r=t.params;r.host=n.hostname+\":\"+n.port,r.pathname=n.pathname,t.parsedOrigin=f(e),t.sameOrigin=t.parsedOrigin.sameOrigin}function a(t,e){t.params.status=e.status;var n=o(e,t.lastSize);if(n&&(t.metrics.rxSize=n),t.sameOrigin){var r=e.getResponseHeader(\"X-NewRelic-App-Data\");r&&(t.params.cat=r.split(\", \").pop())}t.loadCaptureCalled=!0}var s=t(\"loader\");if(s.xhrWrappable){var c=t(\"handle\"),f=t(17),u=t(15).generateTracePayload,d=t(\"ee\"),p=[\"load\",\"error\",\"abort\",\"timeout\"],l=p.length,h=t(\"id\"),m=t(20),v=t(19),w=window.XMLHttpRequest;s.features.xhr=!0,t(14),t(7),d.on(\"new-xhr\",function(t){var e=this;e.totalCbs=0,e.called=0,e.cbTime=0,e.end=r,e.ended=!1,e.xhrGuids={},e.lastSize=null,e.loadCaptureCalled=!1,t.addEventListener(\"load\",function(n){a(e,t)},!1),m&&(m>34||m<10)||window.opera||t.addEventListener(\"progress\",function(t){e.lastSize=t.loaded},!1)}),d.on(\"open-xhr-start\",function(t){this.params={method:t[0]},i(this,t[1]),this.metrics={}}),d.on(\"open-xhr-end\",function(t,e){\"loader_config\"in NREUM&&\"xpid\"in NREUM.loader_config&&this.sameOrigin&&e.setRequestHeader(\"X-NewRelic-ID\",NREUM.loader_config.xpid);var n=u(this.parsedOrigin);n&&n.header&&(e.setRequestHeader(\"newrelic\",n.header),this.dt=n)}),d.on(\"send-xhr-start\",function(t,e){var n=this.metrics,r=t[0],o=this;if(n&&r){var i=v(r);i&&(n.txSize=i)}this.startTime=s.now(),this.listener=function(t){try{\"abort\"!==t.type||o.loadCaptureCalled||(o.params.aborted=!0),(\"load\"!==t.type||o.called===o.totalCbs&&(o.onloadCalled||\"function\"!=typeof e.onload))&&o.end(e)}catch(n){try{d.emit(\"internal-error\",[n])}catch(r){}}};for(var a=0;a<l;a++)e.addEventListener(p[a],this.listener,!1)}),d.on(\"xhr-cb-time\",function(t,e,n){this.cbTime+=t,e?this.onloadCalled=!0:this.called+=1,this.called!==this.totalCbs||!this.onloadCalled&&\"function\"==typeof n.onload||this.end(n)}),d.on(\"xhr-load-added\",function(t,e){var n=\"\"+h(t)+!!e;this.xhrGuids&&!this.xhrGuids[n]&&(this.xhrGuids[n]=!0,this.totalCbs+=1)}),d.on(\"xhr-load-removed\",function(t,e){var n=\"\"+h(t)+!!e;this.xhrGuids&&this.xhrGuids[n]&&(delete this.xhrGuids[n],this.totalCbs-=1)}),d.on(\"addEventListener-end\",function(t,e){e instanceof w&&\"load\"===t[0]&&d.emit(\"xhr-load-added\",[t[1],t[2]],e)}),d.on(\"removeEventListener-end\",function(t,e){e instanceof w&&\"load\"===t[0]&&d.emit(\"xhr-load-removed\",[t[1],t[2]],e)}),d.on(\"fn-start\",function(t,e,n){e instanceof w&&(\"onload\"===n&&(this.onload=!0),(\"load\"===(t[0]&&t[0].type)||this.onload)&&(this.xhrCbStart=s.now()))}),d.on(\"fn-end\",function(t,e){this.xhrCbStart&&d.emit(\"xhr-cb-time\",[s.now()-this.xhrCbStart,this.onload,e],e)}),d.on(\"fetch-before-start\",function(t){var e,n=t[1]||{};\"string\"==typeof t[0]?e=t[0]:t[0]&&t[0].url&&(e=t[0].url),e&&(this.parsedOrigin=f(e),this.sameOrigin=this.parsedOrigin.sameOrigin);var r=u(this.parsedOrigin);if(r&&r.header){var o=r.header;if(\"string\"==typeof t[0]){var i={};for(var a in n)i[a]=n[a];i.headers=new Headers(n.headers||{}),i.headers.set(\"newrelic\",o),this.dt=r,t.length>1?t[1]=i:t.push(i)}else t[0]&&t[0].headers&&(t[0].headers.append(\"newrelic\",o),this.dt=r)}})}},{}],17:[function(t,e,n){var r={};e.exports=function(t){if(t in r)return r[t];var e=document.createElement(\"a\"),n=window.location,o={};e.href=t,o.port=e.port;var i=e.href.split(\"://\");!o.port&&i[1]&&(o.port=i[1].split(\"/\")[0].split(\"@\").pop().split(\":\")[1]),o.port&&\"0\"!==o.port||(o.port=\"https\"===i[0]?\"443\":\"80\"),o.hostname=e.hostname||n.hostname,o.pathname=e.pathname,o.protocol=i[0],\"/\"!==o.pathname.charAt(0)&&(o.pathname=\"/\"+o.pathname);var a=!e.protocol||\":\"===e.protocol||e.protocol===n.protocol,s=e.hostname===document.domain&&e.port===n.port;return o.sameOrigin=a&&(!e.hostname||s),\"/\"===o.pathname&&(r[t]=o),o}},{}],18:[function(t,e,n){function r(){}function o(t,e,n){return function(){return i(t,[f.now()].concat(s(arguments)),e?null:this,n),e?void 0:this}}var i=t(\"handle\"),a=t(24),s=t(25),c=t(\"ee\").get(\"tracer\"),f=t(\"loader\"),u=NREUM;\"undefined\"==typeof window.newrelic&&(newrelic=u);var d=[\"setPageViewName\",\"setCustomAttribute\",\"setErrorHandler\",\"finished\",\"addToTrace\",\"inlineHit\",\"addRelease\"],p=\"api-\",l=p+\"ixn-\";a(d,function(t,e){u[e]=o(p+e,!0,\"api\")}),u.addPageAction=o(p+\"addPageAction\",!0),u.setCurrentRouteName=o(p+\"routeName\",!0),e.exports=newrelic,u.interaction=function(){return(new r).get()};var h=r.prototype={createTracer:function(t,e){var n={},r=this,o=\"function\"==typeof e;return i(l+\"tracer\",[f.now(),t,n],r),function(){if(c.emit((o?\"\":\"no-\")+\"fn-start\",[f.now(),r,o],n),o)try{return e.apply(this,arguments)}catch(t){throw c.emit(\"fn-err\",[arguments,this,t],n),t}finally{c.emit(\"fn-end\",[f.now()],n)}}}};a(\"actionText,setName,setAttribute,save,ignore,onEnd,getContext,end,get\".split(\",\"),function(t,e){h[e]=o(l+e)}),newrelic.noticeError=function(t,e){\"string\"==typeof t&&(t=new Error(t)),i(\"err\",[t,f.now(),!1,e])}},{}],19:[function(t,e,n){e.exports=function(t){if(\"string\"==typeof t&&t.length)return t.length;if(\"object\"==typeof t){if(\"undefined\"!=typeof ArrayBuffer&&t instanceof ArrayBuffer&&t.byteLength)return t.byteLength;if(\"undefined\"!=typeof Blob&&t instanceof Blob&&t.size)return t.size;if(!(\"undefined\"!=typeof FormData&&t instanceof FormData))try{return JSON.stringify(t).length}catch(e){return}}}},{}],20:[function(t,e,n){var r=0,o=navigator.userAgent.match(/Firefox[\\/\\s](\\d+\\.\\d+)/);o&&(r=+o[1]),e.exports=r},{}],21:[function(t,e,n){function r(t,e){var n=t.getEntries();n.forEach(function(t){\"first-paint\"===t.name?a(\"timing\",[\"fp\",Math.floor(t.startTime)]):\"first-contentful-paint\"===t.name&&a(\"timing\",[\"fcp\",Math.floor(t.startTime)])})}function o(t){if(t instanceof c&&!u){var e,n=Math.round(t.timeStamp);e=n>1e12?Date.now()-n:s.now()-n,u=!0,a(\"timing\",[\"fi\",n,{type:t.type,fid:e}])}}if(!(\"init\"in NREUM&&\"page_view_timing\"in NREUM.init&&\"enabled\"in NREUM.init.page_view_timing&&NREUM.init.page_view_timing.enabled===!1)){var i,a=t(\"handle\"),s=t(\"loader\"),c=NREUM.o.EV;if(\"PerformanceObserver\"in window&&\"function\"==typeof window.PerformanceObserver){i=new PerformanceObserver(r);try{i.observe({entryTypes:[\"paint\"]})}catch(f){}}if(\"addEventListener\"in document){var u=!1,d=[\"click\",\"keydown\",\"mousedown\",\"pointerdown\",\"touchstart\"];d.forEach(function(t){document.addEventListener(t,o,!1)})}}},{}],22:[function(t,e,n){function r(){function t(){return e?15&e[n++]:16*Math.random()|0}var e=null,n=0,r=window.crypto||window.msCrypto;r&&r.getRandomValues&&(e=r.getRandomValues(new Uint8Array(31)));for(var o,i=\"xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx\",a=\"\",s=0;s<i.length;s++)o=i[s],\"x\"===o?a+=t().toString(16):\"y\"===o?(o=3&t()|8,a+=o.toString(16)):a+=o;return a}function o(){function t(){return e?15&e[n++]:16*Math.random()|0}var e=null,n=0,r=window.crypto||window.msCrypto;r&&r.getRandomValues&&Uint8Array&&(e=r.getRandomValues(new Uint8Array(31)));for(var o=[],i=0;i<16;i++)o.push(t().toString(16));return o.join(\"\")}e.exports={generateUuid:r,generateCatId:o}},{}],23:[function(t,e,n){function r(t,e){if(!o)return!1;if(t!==o)return!1;if(!e)return!0;if(!i)return!1;for(var n=i.split(\".\"),r=e.split(\".\"),a=0;a<r.length;a++)if(r[a]!==n[a])return!1;return!0}var o=null,i=null,a=/Version\\/(\\S+)\\s+Safari/;if(navigator.userAgent){var s=navigator.userAgent,c=s.match(a);c&&s.indexOf(\"Chrome\")===-1&&s.indexOf(\"Chromium\")===-1&&(o=\"Safari\",i=c[1])}e.exports={agent:o,version:i,match:r}},{}],24:[function(t,e,n){function r(t,e){var n=[],r=\"\",i=0;for(r in t)o.call(t,r)&&(n[i]=e(r,t[r]),i+=1);return n}var o=Object.prototype.hasOwnProperty;e.exports=r},{}],25:[function(t,e,n){function r(t,e,n){e||(e=0),\"undefined\"==typeof n&&(n=t?t.length:0);for(var r=-1,o=n-e||0,i=Array(o<0?0:o);++r<o;)i[r]=t[e+r];return i}e.exports=r},{}],26:[function(t,e,n){e.exports={exists:\"undefined\"!=typeof window.performance&&window.performance.timing&&\"undefined\"!=typeof window.performance.timing.navigationStart}},{}],ee:[function(t,e,n){function r(){}function o(t){function e(t){return t&&t instanceof r?t:t?c(t,s,i):i()}function n(n,r,o,i){if(!p.aborted||i){t&&t(n,r,o);for(var a=e(o),s=m(n),c=s.length,f=0;f<c;f++)s[f].apply(a,r);var d=u[y[n]];return d&&d.push([x,n,r,a]),a}}function l(t,e){g[t]=m(t).concat(e)}function h(t,e){var n=g[t];if(n)for(var r=0;r<n.length;r++)n[r]===e&&n.splice(r,1)}function m(t){return g[t]||[]}function v(t){return d[t]=d[t]||o(n)}function w(t,e){f(t,function(t,n){e=e||\"feature\",y[n]=e,e in u||(u[e]=[])})}var g={},y={},x={on:l,addEventListener:l,removeEventListener:h,emit:n,get:v,listeners:m,context:e,buffer:w,abort:a,aborted:!1};return x}function i(){return new r}function a(){(u.api||u.feature)&&(p.aborted=!0,u=p.backlog={})}var s=\"nr@context\",c=t(\"gos\"),f=t(24),u={},d={},p=e.exports=o();p.backlog=u},{}],gos:[function(t,e,n){function r(t,e,n){if(o.call(t,e))return t[e];var r=n();if(Object.defineProperty&&Object.keys)try{return Object.defineProperty(t,e,{value:r,writable:!0,enumerable:!1}),r}catch(i){}return t[e]=r,r}var o=Object.prototype.hasOwnProperty;e.exports=r},{}],handle:[function(t,e,n){function r(t,e,n,r){o.buffer([t],r),o.emit(t,e,n)}var o=t(\"ee\").get(\"handle\");e.exports=r,r.ee=o},{}],id:[function(t,e,n){function r(t){var e=typeof t;return!t||\"object\"!==e&&\"function\"!==e?-1:t===window?0:a(t,i,function(){return o++})}var o=1,i=\"nr@id\",a=t(\"gos\");e.exports=r},{}],loader:[function(t,e,n){function r(){if(!E++){var t=b.info=NREUM.info,e=l.getElementsByTagName(\"script\")[0];if(setTimeout(u.abort,3e4),!(t&&t.licenseKey&&t.applicationID&&e))return u.abort();f(y,function(e,n){t[e]||(t[e]=n)}),c(\"mark\",[\"onload\",a()+b.offset],null,\"api\");var n=l.createElement(\"script\");n.src=\"https://\"+t.agent,e.parentNode.insertBefore(n,e)}}function o(){\"complete\"===l.readyState&&i()}function i(){c(\"mark\",[\"domContent\",a()+b.offset],null,\"api\")}function a(){return O.exists&&performance.now?Math.round(performance.now()):(s=Math.max((new Date).getTime(),s))-b.offset}var s=(new Date).getTime(),c=t(\"handle\"),f=t(24),u=t(\"ee\"),d=t(23),p=window,l=p.document,h=\"addEventListener\",m=\"attachEvent\",v=p.XMLHttpRequest,w=v&&v.prototype;NREUM.o={ST:setTimeout,SI:p.setImmediate,CT:clearTimeout,XHR:v,REQ:p.Request,EV:p.Event,PR:p.Promise,MO:p.MutationObserver};var g=\"\"+location,y={beacon:\"bam.nr-data.net\",errorBeacon:\"bam.nr-data.net\",agent:\"js-agent.newrelic.com/nr-spa-1158.min.js\"},x=v&&w&&w[h]&&!/CriOS/.test(navigator.userAgent),b=e.exports={offset:s,now:a,origin:g,features:{},xhrWrappable:x,userAgent:d};t(18),t(21),l[h]?(l[h](\"DOMContentLoaded\",i,!1),p[h](\"load\",r,!1)):(l[m](\"onreadystatechange\",o),p[m](\"onload\",r)),c(\"mark\",[\"firstbyte\",s],null,\"api\");var E=0,O=t(26)},{}],\"wrap-function\":[function(t,e,n){function r(t){return!(t&&t instanceof Function&&t.apply&&!t[a])}var o=t(\"ee\"),i=t(25),a=\"nr@original\",s=Object.prototype.hasOwnProperty,c=!1;e.exports=function(t,e){function n(t,e,n,o){function nrWrapper(){var r,a,s,c;try{a=this,r=i(arguments),s=\"function\"==typeof n?n(r,a):n||{}}catch(f){p([f,\"\",[r,a,o],s])}u(e+\"start\",[r,a,o],s);try{return c=t.apply(a,r)}catch(d){throw u(e+\"err\",[r,a,d],s),d}finally{u(e+\"end\",[r,a,c],s)}}return r(t)?t:(e||(e=\"\"),nrWrapper[a]=t,d(t,nrWrapper),nrWrapper)}function f(t,e,o,i){o||(o=\"\");var a,s,c,f=\"-\"===o.charAt(0);for(c=0;c<e.length;c++)s=e[c],a=t[s],r(a)||(t[s]=n(a,f?s+o:o,i,s))}function u(n,r,o){if(!c||e){var i=c;c=!0;try{t.emit(n,r,o,e)}catch(a){p([a,n,r,o])}c=i}}function d(t,e){if(Object.defineProperty&&Object.keys)try{var n=Object.keys(t);return n.forEach(function(n){Object.defineProperty(e,n,{get:function(){return t[n]},set:function(e){return t[n]=e,e}})}),e}catch(r){p([r])}for(var o in t)s.call(t,o)&&(e[o]=t[o]);return e}function p(e){try{t.emit(\"internal-error\",e)}catch(n){}}return t||(t=o),n.inPlace=f,n.flag=a,n}},{}]},{},[\"loader\",2,16,5,3,4]);</script>\n<link href=\"/ar-favicon-192.png\" rel=\"apple-touch-icon-precomposed\" sizes=\"192x192\"/>\n<link href=\"/ar-favicon-180.png\" rel=\"apple-touch-icon-precomposed\" sizes=\"180x180\"/>\n<link href=\"/ar-favicon-152.png\" rel=\"apple-touch-icon-precomposed\" sizes=\"152x152\"/>\n<link href=\"/ar-favicon-120.png\" rel=\"apple-touch-icon-precomposed\" sizes=\"120x120\"/>\n<link href=\"/ar-favicon-114.png\" rel=\"apple-touch-icon-precomposed\" sizes=\"114x114\"/>\n<link href=\"/ar-favicon-96.png\" rel=\"apple-touch-icon-precomposed\" sizes=\"96x96\"/>\n<link href=\"/ar-favicon-76.png\" rel=\"apple-touch-icon-precomposed\" sizes=\"76x76\"/>\n<link href=\"/ar-favicon-72.png\" rel=\"apple-touch-icon-precomposed\" sizes=\"72x72\"/>\n<link href=\"/ar-favicon-57.png\" rel=\"apple-touch-icon-precomposed\" sizes=\"57x57\"/>\n<link href=\"/ar-favicon-114.png\" rel=\"icon\" sizes=\"114x114\" type=\"image/png\"/>\n<link href=\"/ar-favicon-16.png\" rel=\"icon\" sizes=\"16x16\" type=\"image/png\"/>\n<link href=\"/ar-favicon-32.png\" rel=\"icon\" sizes=\"32x32\" type=\"image/png\"/>\n<meta content=\"#ffffff\" name=\"msapplication-TileColor\"/>\n<style>\n /*Critical Foft with DataUri*/\n @font-face {\n font-family: 'Source Sans Pro';\n font-style: normal;\n font-weight: 400;\n src: local('Source Sans Pro Regular'), local('SourceSansPro-Regular'), url(\"data:application/x-font-woff;charset=utf-8;base64,d09GMgABAAAAAD4kABEAAAAAmwQAAD3CAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGkYbsmAchiAGYACNFggqCYJzEQgKgcwQgbINC4QaAAE2AiQDiC4EIAWFMgeJGwxWG7iKF9g27YNhtwPg/rvfFGaDabcDJUr8QyMRehxgrSqz//+/JydjFGDHtmla38MUyOqcy5zIuDkDS9xYoi4IaZMtNh1MPC2t/apAKGMpT3108cseeN4t8voHIXYgRGUG5V9RClMhyEHdX5Vt9IlskYi9ZNm8MNs4tacyTIssTmEjaarGIxLh7jRSNPmH/bOT/3nH54vMNvvCG2UKq/R4eLRhtch0U9LrTV+nYYrCEVFp0frOgTl6BraN/ElOXvh/su3r3KrqRvSbN4yaZSuxikfOn3XJzKIJ1xR4f869LxkBpxkhpIjpB6F2U24SUIv9qLO+J7FlSUYZZEjsOICzk8zMzn4gqOiAKsQWsGivJSjvmu7qzwP8/vrOm5IXd2EmCMmIkD+E3ZBEDc//c4je9344YNsDmm0bky4q9lRwCtS1Yv0ExKVtCmDGz1M51rjUAtaIw6FQoqHnH/8O9AM0t25s5JEC67htbA0DlsVgTYdBqaBgFmaB/xhoPwYYFe/L65dV74e9NV2uwjO78iw3beW0jBnZMreE8y9RKrjg0IajnNqsFR/MWMEyGEoAC7ZUhMfnjqPv6usiG6hsgiIEJudyamreTU24qUkA5MeWU0i+TQEvdxtac2ahqCUIEULfBrWQReRN5KQA/nn8555diXtZ4B8Grgnu+ARL8D9/kO9vH6y7KMEeZhSIbks5qQYUALj8/9v0c3dD72oy+aNFTYh1cpwjhxdr/wBQR1y/ee/JM2/ejNg5ksYgy7s+trwg+CQ5IM/YP7IcYhB80F9gKIMVAHdQtNyUVDYLXcJ9uhRNkzIhjJ3wAYbh3F5HGAaGcf6Xqtny/U8I0jliYLnHwKkpSVPjorG7glx8cgdcYvYC7jLtXF2InROEXVIBvBBD5aJz5aIp7f//qZqf19udscQLmItfSwEtWE5pvq4JNMU/YLFodajJmJIXlDjiKq9004jx5/bVVxOPyli62dZxHXnrWFYheGvjPzbO5hbWJdvbq3tNRCRIkCFk6zE2qov+SjfKwkKQScrN39EIkAcAOHmwccicOciefcitW4hCgdgcSEICkpSG/fsqggBZZ7jMomnvDOrnbaEN3esmxYKRQyl3YUEmADMBxUA0EKOMd2JfGbpQ7ycAov/SkwkDgmJPkXd7FFIuExA06zVjDZIpR41Rr2CSJrJOmZhKoiKScbIl2DPCC0IGpoRkmWwZDv0JCvTQPZxWVdBmtYbxKtZdiiHRp+5bWMH2wK5IfYlmYBj2Baqdu5wB43Zbrh49UDCGwoILI7wksuDcy8nAL7TbiwN/p3/Brp5qNJIXZBjbfUPXZQztk8kQ5nf+4I/+5C9wMNFwMQ+X8vAi/uRP/cu8I9gjWIryJC/UinkpL5dxlEKoZFQiNcdLNAptlnpztUFlJE0aS4Gd2kHtSueB3jXtzLrI3Yjd5uDO41JPHu9lby73W94Veg8S7/8RPvSR0sc+r/YFX9b4mq+rfdN39L7re+IX7Kr8wi/7G0B+nxjiXed6ETfw3ChwE8/NGbjFrcRtEreLuUPizoy5y72GPPvSxxXLBSg++L4x3kSYWY4TZwnc5uf+owsq6Tw0aQ/w/uQTI9A1xExp+8YgSYSM/8T64kWuS+K35LIXRLaAcshc0xJ50s1iKATO5y0pkFwTWWIOlRETk850stt5Ai49JbNizSD29RR9fmMhPMtM8bRGUomsRY4UBOZ3F3BMGTBYHApslzgnIkopRya6xp4mgKwHdOvzBlWqsvBqnB9lJlvOnRboVQXYq44wRyKikEPl8aFqrMU2PATMFM2caYqBnuzeCIS47VtHqPHm1BOwtiQDqSJczeJzFVNmlj6sosjRgPxFV81y9yxPXU92DPNH5fuTYBIbwuXwBEm07SfSo4cH0VLn0kW9ZD7qAsXGqQTIJrMlhEy9l3/SpPlauJqZfc0/ZMG6tjWnHYQKFKKudR0xSwdtmtF+e3DcQiVG19tsYNIMIjKFT8tVJQED1a6Nj3HeB0vAPNgFBznWGmt/uRa1galk/e33YYa5OXtWmAYe8rTMEX4CRZkE8tm56/1Zpibtj9e9a0TKpRIIFH7iL7uY/m4K9lkzWxK1qdlq2EJ2VGrOU12F7HiwZJgrVhgqGHvAj8ay9KqxqavGWJKnMy2mS4z3ae39P1+vMAZZ3iwenNPNSSqdJryDHY/hY7eCnncx/HRAXK5HW/eergoosPhDPZkRsR/ppeqqj0YCWe6OyehSIjnTc8/01Jho9o2wuWbLUDD7XCwaPQXfKj5Pa9NLEGYbZzEOC4nDlM308uQXFfdUvMTN09kt0MP6tEFqnkqbQqW5mEeKaoC0IxHZXRBdbfQ6S1Ha+/zeiNWR3+Bnik6Nshajj5bJKxNs4KfB/Midqett2NzPS9nqVvr7Iluy1qwrlgrSDNtxNJiyY/Th5VF8rJ4reJKJnJQlFedJehRrUt2Yp2NOjMXx1p+vMMvOOtXmD4WNbO7NGQOLnWJn1j0naRgJdhRZtlRAA1IvzXWZbBV7t9lcyA1vw6P3d9oGIOnL/7884ld5vbGnn608Zo/ap7XwEMXN3/hHpoIuj5kBPCNoDyNifr7RHl1p4w2zZh0c1PuupqjecbsXOVyN6LmEXXX0bVikI5AjITYzJXfoLCIoYlpwZd1nuTEzEkZzldr4pzswI2tasTN2Y086wff7txQpE1HMRKsAqOwa4dc559TO+1lxnY3D7OYldbEp0hoUQUnH3Qf6euhj2JOEH91oaHEoNz/FVujYE/bsb8Q8EoCDjgbzIcpf+cKl7Pt/T0eEYxBAgoaAqQRFpRYFRmvTsDwg2I8gGOVwUkgzrFhK05yCU/E6M4OFpbUtgBwBwAhKBgAgkshkcrleqdbqzY0mKxto1cdRXDZ3rrkrcLPXX3Ma3oMhGMlg9Ul16mMCPaxCAXJCS0t93v8wfp8dxjpwzDaifyUCpFuBqWgA8+mmrBmAvQkAwLaw5zwTQ5I83rZTWjJ8/3cisHwkpw+bAQDQIeE82GPbPV8FSFAC2K4FoBBMlbX7yj0O18RABQ6PFlQMnTgEdkXJBkADCUD7fwpgokzg3LsA7p8RypGArRHIhAd0vOlc+3SQ6YVNz+38PBJNPqum+vu/vP/4UsAKndIMxAfWGlMgZfgEcW0v73FjDFkchyhCfDRNuIj4l/n/cYT78BgkPnP3wUH4UkermrW1zE8+7Ks7Mo209bGND2YVNwFb+vN+I+nulU+LBuoqfLeerP91v8/exr++LQbMwnbTCXjfJ4Ds2g9SLmuZ7Yi51FV4SQjx9L1SPDirXR5Wuz26fevUkF+o4Sh6jW+LNgNJNiTgMJxs0g9iD2C1FKpYriacKhSqVKpKpWq1mvEq0ai5VhS4bWSvWzO1JVUiG1s9CYaNTO9AKnNUeyfVu1K5azVwVkMXNXWjGrfbn7nzuNITwr10laQgHam4fuYrvqoX7EXrQCM1onklLbWiYymWrBRJVoq++sbf/A2FACj/CG37tCIdpKeWvcMkf16rOnvoiBSpjxsWiHkFkrOMZFOh7yW7afTtZC+EKwkl0bAPOOMbDqWcdueMlil039yckSv1Zgr49Xg6wODv+p/tdgfDiUf5t80qgUv3c5rt4caj+PvjcIV7+whAOMAjPAEigsQZI1KU0WWZaU2IygYDRBoRcx0YYmT1J+/YKJZ3nXXOeRdcdMllV1x1w03XXI+C6s9zzUGHDBh02BFHHXPcKaedcFIIjrPNAPCgHtEppXnq48LOSE+eqdzgiFYw+gEIiHGId10FIBxSpnvg2qCj5vTsH/wmFo97eY3BGA2kCWWVk1JrjjNRuGkHRewOUNP4n2wZUhs4FQJjhFAxVghTwBAV8290tPhELcmO3AU0nfAck1REv8aHpoR3WV0vFNyVDQLYkB1GHSYODcDlIu+Ddy4AY/wpD8Cye78DGuTzjPF60wH/ZRISgN0HAB3rEgAJOBAAMHJ2AP0EjsLLwcmqCH34sT4fGYJeAgoaeUq063TUVQ+98R3Wlqdner8PTpOwiA6WsIYehiji5ODpumm1wyKQIUtO39TR1cu7H/+hiZt3ncSkO+m/afN+yR1E/DzxcogvJ76MeHv+p+6vaXh4UaHI6h890a/91JOERLqiBTAB2BgBcJPmDX8APnxQ/6e2kK3abYp5bI0FJk1ZBmw0pNa8eg2g197absO0RTM0p5lmLZijhwLbUO645a50K1xrvh2GDVvniXin0C6LYdWYwLYfR/CeemaHTCZmFlY2dk8NksUlWw43rxFaValWo1ades/MNdIojZo0G62F03ALderRpVuvDIiyvTcDZiBAgAigDtSBragLdWEX6kE9yEN9qA+30QAawGM0hIawBo2gEexEY2gCZ9AUmkIGmkEzeI7m0Bw2ogW0gKNoCS1hA1pBK0hBa+gCa9EVusEmdIfu8AY9oAe8Rk/oCW/RC3rBdvRGb+SiDyYgBjARE5GASZiENEzGZCRiCqbgPaZiKk5jGqbhM6ZjNWrAGqxBEtZiLVKxDuuQjPVYjw84hVNEkTiN09iGMziD+ziLs7iDBCTgFhKRiLtIQhLSkYxkZCIFKfiOVKQiG2lIw1WkIx2HkYEMfEQmMnEMWchCDrKRjR/IQQ5uIhe5eIc85KMQBSjAFxSiEJ9QhCJkoRjFOIkSlOAXSlGKApShDNdQjnIcQQWq8ATVqMYOnMMFdISLuIiRuITrGIgbuIGXuImbGIRbuIchuI/7GIUHeID5eIgPGI2P+Ihl+ITvmIMf+IFX+ImfmItf+IcF+I//WI444lhKJCISYTaRm8iNwUQeIg/mETtD7Ay+ErtP7D5+EntA7AG+EdoQ2uE4oRuhB3YTehF6YT+hH2EA9hIGEQbhAKGKUIVThNEIo2ELYR7CfDhIWElYiQeEkwkno5jwNOFp3KNuK+q2wgvqdqBuBzwUo6CkYYz2kjeJaB0T7WLybjHR7lzR7nrR7n3RNcDEByFy69tHsGvfRd+n1cBw9CUSf+RLXYBNTDDahKySCyFkYg3Y+EpdgE9MYvaM1f1pmVAnet1cJzGukzkHhtNetlnofU1kPG8bWd37pngpmX9rFkReXUBISAEbMQ3hli1ttZL/d6RDJ26gA+mZW6e9KZ8fmrw0JFMpjPj17Cf0ekYRGaJhKrU48tzk9UJ4PGx8aUUIGDu8sbTcldw/Vwa5suLz/aSpR5B/QcKx+W0VIGuSglilZ6wSTXytNybCNB1eMcfIPP0SFdaFyIkbBtMoNJWiOgDIviuA8gCuG4CfAS3HAHQeAoRvAX4nYPQPQgGHRzg0wwXO1aR/C1MvfaIvlV18d7XHDC6yZnk147YLdxtfWa/H+kIMaCeuYlhJ6qUuKk3Ou0sLwm0TdHulFKEZLuofhm1c/MjWMF/ICc7yALvnsxqAEMfeXz4l8yAYw0CrjaaJ4X2e51GQL+LgRno7+li1uoxHF3VBcR+njupYm0dsqCoTF6Vn78qzsFM89mGYBZFJgnMkaKPUNhf3kyuT7eHp7clanZr+JlTrwUYbG9tBp1xz3c4teJoKQSnNKRWkPN/QzRpRfE9NnveM04P1UtRSrYNmH/JFq6NRG93+s5wPcj2vMeanY38nZtsENVopTVXF+eNStmXC6KSK45AZ6r1zzJQSuSKlUISVPGlwm5N8gibhCwgfpoOY2QR5yYWfGwAsgNdwxdOG0CIXMwchtvn0UY0QYA7WjOnAmsQz17vSADptnJi9tgwk2E0vIzdKXhN8EIhJscjJvKKwoA5dJvLQd6GEJyVW7TENakPD57mVtwl9vjtdylYY5OkF9dZaOk08ghaHuWn+ro3cgGPpGY+3WYbTF+MojJZEJQF1FATdRUxQS+HOXFNWv3pDenMQjQPPXxXzEfSv8q0P0WPsb6nj/Qwhr+RLc9wbo/EN6fZ15OETo4Epsnb6k+7liVX7K/BqZypRnCYfatUnzOG/mrBW+qkbIOOFqeJBWmG3gqUCmQkY9kE9szKspOTmNTehvs6HKPIYziUez7fpwLXQO3AivePJdc1JdQCtm2gcPoAxYbItElfIkXRPj1YUjqC8yo3CG2YEJ6b2vwFnEo3HdwbSeR8z4YwJQtvaSTlPmSJq4J9X2uxPG8pOQgVIt17Ogr53qTHlUY2CuIXTBLzk8C3zlpLJXuEI4inckLZlUjcbduVDa/NgNtbWIQ1IRm04CX8WmZzN/rKOcyfg32n4ZogPYb3nxoBlOgi1Us6RgUz1GuXEqXMelmIMpEgoXzLj/nL2tabt6QzhQziE4lAawydYwJT7UEvToj9AJfDfhPOpz3Ll5K0WHeoGPMafR7qSEs2cDPcFhsUIAqQR4DXfHHTD8VAXKzyLBnkOaY05OrkqUIQxPf/CZz/FMy+1VY1/309h71NI4l3CazUu3Vd9MXxi9IMMEpTF5LZjp4+O/qMfXZfW3eR1INAc9PzrCS9NpKAZeOhllwmwQ5crlkChyjAV0/XwBJbSZhPraPR11uzzi1+owRyH55atj8PEsKXqL/Fi2sOsSjWp2kcEoKybAX7jz6c+SCuk83GHtxNvLHhJRSGr7nXJ/R4+8b7QOsyg6/F69flto8QyIEseFoev1d9XT4NbV9LPBVCIKAj9NUwdTq4fP946KF1Bo2sOXGfK74jZhPxMwiac5mRg34nzOrghn5Af1ed8lcp82/1MreRpVxpW64w8/UNJ7Iiw7DbqxHczMA1kwjlGXiHYC9pMGco7iob4Sn4lMJujWtsCc+OqtC4xNvsPbxhnlnyPbIVmYu4RJmZ90sTJ1VffRpE4oYg6JsDs2zqqB3uWqQRXJ/cv5XFbnCO7GmpLoOP5j8MVmD1Lc5/Ot/Q87IXnJBLirI+nIBmnih+KdZ6uZRmQCwDKepmM/zlSfu6RAr+FS/7ubcwfy/p+fvUz76JVfJb6sm19AQbUTP4d8uN3w6qoUsqMwlYexF8JePINmQZ6UExesQMjbfFSHFf02ojrZ+4hYwsfhjDrEd6IoKmFZDZkLMYo49dd+XBi9GPKmoQEWMu1YO/rjRsj9Y4oXLZWC+GScY74rOqT/zFcmcWTDx74NYV0Kn+BlpXBoV525CM9m2UOhdqJyLpCVbQu7wCyUIAWg2RMukKPEBUlJUFsN2u+xNNpioCUoV8JBv2rasy5+7HnUUwEQkcfiivr4+kwVx1ARkRMza6rtM78HAWHtkAyLP7UZzIfO90WCJBjPjJHk1U3QuKi4OfqoPiX2SbgzkdybGD8cI06UiY4KciDZCdWdmgBzOjvp22bKv3xvsnkzpOeONOZ9hkGyZpqiFRsCDxQ0xCc+dRnJf3hZuOo1PuwXdkPi/KRPV15bz+as4sJVUnlIhGY5/PBQXVQYezqKEwLbSWG7ddnAkPMKvnjoguRgmR1b09ZkniYt79NZVNEHLq1grrJP63VHHrlxL8qy4785cv13PooSyMDG3Pj6PElR3hlumx9vpUpMFW5LSVxbt+qxNtn5h141Or+Y88q+Sy6oP+Y89/rIAAlmVrKhVqm5HIBilEAE3VktZZgtJmMeFXxNCBieaELcTRJtbXQcoTYBpuk3O4HQtAyDASCZUQHjX4RumPO+61bzLlLvEDmyslrO/GpticCvJIAO8xMgQe/MsTrgkYlXbp9ip/OvriWBCWOqDhi/E6PduZG0ELsMni88XTQXmLPUPxNf4hf6Zejf9GftkGeA+E0sZcpOV4oEmosF0uk3Wu+7w88opljfRkI3iciGs3aLV0F68PAl2aWYV601SXO4beghfmuzpIrj9L1Ml5qs9D5lKHstVFpXrYMrCilyBfs34Yemhkr7BadBNYqijwPV/0ggKl1LIQTUwTKNILR7K820JXxdapx+FNEf786MjbqOz8jvbUtwurc8rl9lpoPqat/8oByaZOUZ39tRXwrQAUVt1+6iAv5wnZDRQXpxoS/cg23sshqMOCcrX/z/zqCZq32/EyFzK17IEVJ1odCLKSBMeV5C4j8K2LIdDWNpEsK+ThQ2YDskKBa0x6XvslV0wtbV7z1MRmfOuOiNCR5beFJopyfN6bIf8ele3gKnY3jJpSBMaGlMczxQwzzydLL/qU3sdgQKLgHr51/J5M0aJOgFPnROt4BU7l66K6HlmKhtWmzL4iUwar8rSdwskouytffieamkuuXCrCzViw7Bmfd8gZvwoxEv6/zI8Y9sjkKpGmzKIfVVerFRDJl1iNPthwO4k8yyT1l4NM2+h3JqlzEGIy15rygIA8cnEtqVXQTMqMKcYU0i606r1apxNhCSK4WtYLeUSt5gV3kFytkLJHUiZLdES5T1MUyNl0Q7MV3BGd65nhfaGRiDbGfMwga+3OlNQGkj0mdHKh5ldm0qtK5UPo6ksbwllvJ+8Ek7PQvet6Rx+YJkPe0iGgzCcf9GzEvmwSbNAl1tnrQn+c7tl4Jdw0I+KITAb0nBQLw9FWBs99A/sTbRvipW7mwfraZqduEz/t4iBnSe5Gr3XxLCnjrwZrAwgx2pq2HeVXAr84Ss75M//VGHqrGvMvvSnkZrBzwegGfLwALBGB+P6wUmAZ4wc4Hc6OeGCh4q2cfOuRJxg7j4KT9NyBK3qIcFg2LCSn2e3yhn95XpArklGiLAqAv7cjgf0bxWypbt1b04U2re3ZqtLEFpy/dSTKu2NB3QPcdctCmHlFN3nfAuOFWESwGOHSsjWAxryYz9JFn3AS6/rTfsp4scxBQzdmGtqLNU3Ev8exAVX5IHgC9Px18yN3CA5m75MGSkQqYZYqkCiGrkp1CCAu1o1+ceo9t6vyr80ng8pWOJ40XRhDLTxdFSFLD/4inPPXzD0MLinARlas8+0Z11pvXdVLsTvghmisSZr1GHLI6DUKaWSvIR6V2jC31WhVihUPgrIxu8hWqdMr8tEBTIAdUVfMdfqgz28XtcAZqU9SZecKrp1drKZDATpcZ8KVKFb5MYrQxBEIz8/3NhlKRIq9r04q1vWuzWO1ObyPP5tXbm5tmdXQ0TWm2y/K62k2sKx/NH1+LzM6DU+zXT6ZPTH0+qG3gO/3czmwXr8MVGJGipc577Pj19iwtTSx20VMNxP/SW27MZshof4q2zTHeTXpbDTmbdp5EUaK3yysTyax00LsYN8FxipqD/iaaTKKxn/VehZJsvQYSV99bKzzeUnTp48ulhSe9o6ZeqyzHSWZpUdFRuBtp2I3YnfSm57jCyq6sWl54LRchWTLOk+pxn0Zww5JYbeGuE9P/crVpBoISVif1uLr8r9aYpiNEYVXSQL4dVFUJrEF2p8vFHm/1V/FVEpJi87PHZ6U7bUH9QCgtPBNDbMKVqZS4UrHeROfBoEd8iylVKdawfjLI8roGUvMSv9CscinV/i6IljcgLGV+t8Zk9sv9RXp72o3vqxlXoTbUDjtMX9MPQ3wH8vy56/6ATBWgjLYcxb9PmcktmX6fPK3eA8mRiJvfn1X2vK7LYu+wixSjxOnK18f/SChamzQmOXeEM0el0eZInCMK4kBjq6QPQy/iSbTmdCEm5FxzXSG79nNgNJ3xZoQNw88o59FOP7CQU1J1Yj6a/L87tYA1QEwz0VlE/aIcvPV32xnTI9MZm0QHiv14lY5W75rkmZKBci1oIVNa1qNcGVOyJ7lo9SqdDy+5cX1Bbk0pFlvOQWBw5VE1pb6rCzTdsxf4DRXTEqunIX5A1WzKq3DkVexF1f6EmJZQs8RQkT1jQUXZ7GmxllZEa7yltWT26Lgj7t2ZBcHeM3UvEQ9s/f7e4B7NkayC4l1nu78ihhbtL9qVf+rbqYW53gN/E4wIBIJi/MNzIHBq4TcNXxxEpZu6RbmoQrPxOKjMNuuy53lXx6Pm5FQQISvp5P/JpeyMXFdDPmSohfNc7GQt7Zpgxll3Mr8KBSU2skGqhfwFXqo75z4BC0Q7mRyBmSrUoPPSVJRShd5OTTUQIU2Smc+O0b6OpGaYtN8/g/YEUOihZGqoRZlKRpFa66UIhHrKsqW6JwRKeyPSAEHJ+pJ2CnVuuTzaRVb8BxY0TGnPoBYptHYaBA19hLsHPOcMNfCLNjbaQN11/duL6UQClJ2sh4LG0nYkKPJSVRpaYWYmvShT66RCKRYSiPFIYyLQFoTTn2eibdpw+m8n4QIDYVM5FNg//DNySnp0Ki3CaTf4rKiZYcRteEORs1pf3eP/5yHfGq+9SiYFwhDY6QQSgYdoZup47Kt4HYqRxLy1RV0BlJNYRz5pUIxhwfAL2CSUUlzk88qVVPzXPGzarmTX5U84+SEu95Ac9+myizyGNrHdk0jFCL2xIR8WtSWzbPwZ7nPuEyHlZHuKxGkqLpK6OcZBIXOpoSTofux+zLKiY1DoaDQ6Go2KkZHuoVJPxUxuiaE38glugi0jANPTWn2o8y7DfS7nd/iV7xO7Txyg5s17llc7sXIzKBgjr3I3JD2MRISXbm/2cN2PDWFG+HtbCi/t0Ytch5hvt6QspKTqbCV7n8Ca1esZUh2qWubHgvxsilJDL8pUhphSezP5IdL6lZlI04OnJ6VrEvUQlGjQppOgahMNB1+vSZflXNprqIaDWSyuwEwVaNB5ChWlBICTzwKi6ggYmHfT6FC8fNWk39TfQdQ57dJ2eOCSjQkB4QP8Eh3Sodt7JRQBZueqP60C8HkmGUSojTPvlcSRdKJ5adZroLSQwZv+UUfiCT+TEzP+nJC6Qj5xNYlQtukWK8VB6i+fHCCnbMEnoG+MMK80du2kEsvP6pDW5yDLHPkLPcaisiMiZxhnTtFPieUDUMzdfV/lz10PwSzPIA/qyy6Dcme+QLPDUTaBZ2DHX778xB6kuRvRnWzuyU/M2faXZv30v7xmdFOMthTRGKebZENn29GT47WNiMYYbZMJnTXprwojYnKsfiqiJc6wSA/Pj133ddGTXE1dz9iTiJ5gT3qdf9HbtR9yTaNOr76FONXerx1lzb2WsG9fnKZRGgnZXvaceOwri96JNm1EbEg0r/dHZ/fHnPw62GYm8b6P/lPvWM7YCL2RMiaztc+BkNa5fg8sWG1CXnlqfvqabG45eqXsyPgssMakUbtvGvNeUlpUdb01ZpUy+4Yh+JnSZJH49Xz3C+Pd30ktDIcyP+a4TfOPoDDe9Z3YiupotLfwcrxQe9ba3t6V62f5bnbrhRHCMW1TOjrbZvrIGm4WnJxAX2az1v3l66n3YKlgaW5el2fEaHlu4iuqRS6lOt4FUfLRy+43+N1qs9kj8zUozoEq+ULLQ8aVeols81REcjFb3p7i8jdXs+VivoH0AuWV4ikZofPGBUi00Rvu/W0niJU58r34Cj5KS3Ux4U1LQMzybHXrCH/78F97uitNYnGvJuKGGbGgaxfokGkW4L1y1Jcxzehz+MzbQ3458nsNwvTDb4eY00i27GTDgqfrPpYk6Tn6zlvwqmnhIC+LkiF8B4tWlKF1UXhphcrVB4IaOd9I+ozeFlIrc6phGXKuS1GoPpA2doyeiBVyKS3CEm+W6P3/SULs+6FPdqUrTWJ1104GIT3mceRSOgFaTyacHmAfvUSj7gQPk5kTIXm6I+uDG15UOaqIdWa5gcjcxz2ARV89wNl7mk32Lf+xNhqEnKRBml8uIbnrbCQe00haOCTP9821jO3UpB71SIzcT/MsYydqB+29YzKJZWmmLGaKnopPjLfwYsFoUEugUlIzICqzoA5VmHNpqfsE4P5op7OFFqqI4I8wTYuLBPZVTOjoudjjluGvi8ZhCzjQz1AxD2j0JqHqoiwvOVmv17cncoZh5lT91Fh+PDEtFGlftR097SVGFz72/Mvdc7HnZs6elW6Olq0Rxv/74UOMwwMrg18MJonjo0O3b/oxCZqrCVln4LEJ76LpYSQSvvgJyLdgXXgdm2JUPp6YhnTQKYG2MVSl3pxOXrrO0vPeX/j4XkrxU2eiJrW1HPmwN1nPsdPH5RSwK2JwkUNF8x8lIQkXKIyfJB+ov5z7GCxjcF8ycZmCR3P/B0vrt23cWN9bWhK0eMG6rViLv0LWJFSYTAnlsZfx+KHYhHLTXEOnzGVYdKdFtl+DXK9On2rQs0Vm88hmexnLzUrNMCrZet7R1+Nypercyiofn5lnKCmqmb2wGXPQrH0BMl+nLXi39LKDr0i3quT4rHfOBIVaYkjYicMIfpCwdbRLe3VBngIyJrMHWOlqqwRxBhs0UiCyfr4d7RHuIsnjlxfMDegCu5LfnTuIDTFhEP2r7+Q+c38psua/bR10cNMIdJ+PorNaVZQ/TgwkidXm/+IH64cIHDMqm2iTSmmufgfeCYICFyPdQWlylrCm671+plBqY/bdkNcRKQMPcVWA/ZYLYnnKSstTCDaZnsDUoJtvhZDVCnMmycx8EF5oKtHvhueilkLTdR0pWpV5tSTNbJrCmTlsBbwg9SPId2BGfGeSDFEb6hFsmUnCGPikRbEgLfrlfAYmZsab5wh752MKmT5UaMcA1V8ENKFCI+MvFxDGLrDEyNIMSjFp4O1zQq7YwMPZNQmlz2VkE+tleKG5WImacgtOUivMBK3EwVhzU15PpA7wNbhqwHHbxWV7SterMAyrCbIZ6Q6ydT/7+vEghoojUDFoKgFHReu2qLu/hZPD0Tw8ToUh6/laYzG6+XRUG6zQAyuMasNdlxqZxVftr5qKjgybi2lJrYIjnFLrT/tjbw3HD8exI4SUHXXP//ampDryvHZKq2AEDC2mXMcMwxGGYfqPQndOevQuYzV9O/QVjsCy/xNR1tZ9eu8VQKmia0BlO+J+DMkPnvB6ooNjthFjeJmygHJ0MAiRw5IiMdgHMb4dlUELlxya/BGNHUIRiLxMuT9mn3bGKhXWVLaubl0LBrXcBl9i/JnPHUH2ffVjdQz7V8V8BaErRcvno0a9wU1V+NeNKOSk1/jrkXozEQQ7Uh+kxtzeqSw3Dk2danygrGApitPuNzePOD4vGNwZGCwd57T8iT4EFi4v2Cto/dYqQBGXC1q+tQj2FKD+WJ6x3Ikj8/mYuHYGASPFEOjjYiURgVTny6PLF+A4W0QpoR6JaM//iP/3CiWhnhTRFg5OwCUuF2AdQviFPVfrFkkQJxFWwCMOXr7BSo0ISAiqeCkmRtYeh+HzyTjWjcuDRG7k+vi9VZmhBp3/8Wn6UxAdShnn59SL6RcV+Gvp18BZPePTCktYryrREfWiiLoq1KsSVmphg8mX1JJPab9qwv92+S8KddwuE+63q/+NPiQPMB7hZ6xms88S8NMum/Hn56bwvnvfhGOxKeUtTKGDcqalOZcCGbC4KUMmwv25KcLnvmv/YomNPZtQ41yPvI+2iuQFaJ2UkaPIoGVrxIFkKW8HozlK25oS8EvanQ5JW8DXytdqW/hBn6TN4ZS0B/0tKRqOopxjc3DrdFqE2OzlbIUCR0x+re5McpRzNIVMLXYYziLgEo0UHYYObRjDlEzFpupsGTqNQ07+3/3z/muGGyqWHk8jmHk8gpnWj7FYhh3G+XAFqaqtjk6ra68mkQN1dTR6Xa1//EUH+QF9mCqU706hGjmmKiyXGpeAwgcWrbJN4BflyacFKnQ9Tc3LVF7HWIHXz2+z6ehlCp2JQuVMM4bM/ZciMZiNbKxbpDNxeNjR90J6U5lnCS1e9IsKlEOUSa0yWqs5Bn2HvLQ8fV7Qymt0ORvYmpQcdGK2V9wOJl9ch19350YS8Q0MxAvbyvPQF7MVgYTFvvQCZL/sdOVpqH5ZIie9HFMUbOC72MWpS5qEmDoe6Kmr94OG7PqK8uw6gz6nrrwip14nZgqyppzAwWQFLf65m0Xvk/qg0D0q6XSylHHOqLndiWSOo2G6wt2lOg47w6RW4T0V5jc/pPL2gkIrVLIBSySRb1Kp7xUcSTURX0pGtsNL15zgYC5j5Rha0VA4mYXLKxJkPqjZQ+n5YnmbnpJes3npSBO8ASHKtvWDUgtUsgH6RqVdZbKH6CjK18cJA7fTNkQV3f4/AfuIIiQRc3Q04UnQmFNdmIrJuf0znbobXPNzTBJPaULYelMzVIY0vJr2xwlyA42Wz0A2VVs/CqdQ43w4F+S4dz41gZ7J8+C80UEq+yeQFK9MRTLOHEN0/U9CMaKOYGRmuTBhG0q7DBFfQkNbnPFhz7OlSdhCHFaFJiaAZ8h38MPCdytTi0XYlYa+oyjhrgzSeBqtPvoi9TjYpwGZ5c2TiX/jTEfZW7giq483wefldfadziOP1+lDUT7oy/orP8b0Ovx1kvfXXF6n18edAAbDk1YmI1cmJc1CJs/CCps/3EfhyaEJ+5KTDyRi9tH5Ij13xmS+mysrkldHxa2gYSsy6+YbsKxMuVF9SlyBEGGPsXp9jvAzeOxd2U+TSVWrOtjsztWVJFLl6k42u2NV1e8QXyOVP4SgN3Jpigb1Qx/mOFlFIKooxzGY4xQVkaAiH59HWUtjrKVQ1jJoa1HGqkG1JoxDo0Fhau2g3vhBTVhUqjcOarRh6TSaIkyjGdQbyhYRNH+NQKK7/yYTieS/u9HoPf4v1z3soeHlH3KhL8HPDmZKWrWAPfWOhfLLfJE+K6iNrMws/6Dj9O065OSkeHj0eTcs5F/ZBrnFnW+OrJwiF3PlRYrYdH6FVAcftj6aDGH1cXoMJLdB678xv1LlaRop+fmC9WmLCFCcjGT8FYEx519d1y3s1nYL5nWr/c6tj/C694gQvO6Rc6u3Rz1f0KPtEfasq1eBfAf5KDtvtobbEzEsMiI+PIP3YTanQKVxU0R8J68r1hpP1rlKo/3fRCFhsRFRiRH3hbejvY5iFaN8EQh6hI3zZXplYK6rQR0YvaLmlJVux3tkUmJOvpW+tE5avftdZbPo9MLgu3SR5LyZUlad+iNY25tW24u+W32yX9V/Uus3QaBAqu2qmMKOwVXrVq0tlZXrlLWrnJXrwfqp2xdQX9HyPlGoVyj0K2T1QRHlyuS+Xh1CzDec00QIvD5PMmUycfNGm+S7nbv7Vf1HtEeWlrv/7VRtV6Vl4JZPsp6Oh0WzUzgVIZqq2Yl1AcRnC3KTkQVx13ZqOWSHrh8JNndu9cl61XZYTEzmmhXL5v6Ijj+wvOnxvAnoxljh+bLKsLrix+hVgcY57tOVBa2jECHQ4ls1o/xXVgwvQOw9ZNbgcr+fT1D5w8qijzcbsE5JkYHy6GP7KLSdwrdGnR6WuFDQzJZp+XRw2eHaZo5MI5waVEQpwGIbcjA5eRCZ/FvaJpIrU7iqC8kDV+PQqXIrMnnbAeS2nXEIaw6wIUkvF+qVNAcK4vbS0QABD6DRAJ4AJA5AJRwA1IpTsjjpBRb7Ign5Cx7+778k+n8b8+cdZFLyD9w8GV7HKibxvBCq5ken1WASAqT4X3EQcIPMljggFkSW1h4NE3fx4AZZoOBHVfmfKdwgCxQ0TqOs1vjEroO4QWZLCIoUcHXgBlmgoFjF8lrwn/8pDFynpi3YIxJillJZyeJuJLhOzUDeVH7F/HfWolwvNXvyytEmIxVK6MY5rlPTFvAqFHDjwXVqBvK+tEKd9z35eAzTR+pEYVzqXdZm9OUIgHSTBnipXQf8fAGy6+yShbhBFpkbE8Cl3kDjDViZCTe7aoMsv91mXbysSG637hZzF2usYVJ66DHSDE+vijiKl/CyqZkshnEUL+FlvIJX8Rpexxt4E2/hbXnnFvO5yvkXu+YPe8ZoGGSMh2kEcKvlfFNxcJ9J/rCXs1ZM2dEcLAYAOjF23veg3Z+oF7a0l3V15pNp7FFvNAMBD4eoZh61A+AXF+D/Xuvfz7/xD8CPfzcADIHbF1+SfgDAgxvw5ZBNslXm9xpFusTi3Bp5AH1vdYNsKuXbFwDDGjiNLPz7xujKKftCUV8P8gBukreS7odWFm6QTYNQ+QQgxQWyaUW+fUffcV9Bdk8MtuqfI3t7PbR3YvtvpCwxO/Me4wVcmBhs0z9Ld+5LAM9qYoUVtWCPjM5cyCoAFWZxA/RFr8cab+GSbdM/5evqTnZ+BXOxTf/ECEabIGcptEdDS20cyC8E63eGjCI6iNOsSXZ2Jtg9FOlLfQ9QF+nGDqD/jnLceP40bp9PlzHpJ4eJJy3HL4lTL9rAqWUSPYDnu08gc7ycBNiZE80QoGVC1AWHEMiehBLf5OwaC4rytwtFYdAmUCNI0VYKro/A5yYC8SCBxmbR9rUGnOkQFvS15yaES1OKYpmuYvuMTAnkbiGirF1s1n7w2a1R0PyDRJpt+go+Ag2+HHSq47maW9zrLEqPTaxS5onhiwLWnfcnN01b+6eyajPY6zpqz8buNLu5WdJFNhZSR9/oX2LKwakBVr0JsCl9X2C0XNLWBwQAfsGznqLQPxIA+uxXL4eVQCfT0TcMY5sizIJ83zneMhuFnSElSEY3yJLtd2ksES7HYWTEbzKdvwUyPJ0gsHvKz9HEnaMc+ltxsIUvAHQVk6R6q8FGfp+xHsTN7A41egDQN5JKV9DBT0LbclRG3ALAxQ8MzW3nYWa696sjhZGn2YMTqlwdKmhIfgbrl+f/kTUy+7rVDv2dRKyJjdMfKWgHAelfUoEJAjUPLIDMw0nHBSNdKNKMvA/Ck7YpePeuNQS2oO3wtpR0W6eMFtjI7OzuRi54uUD9R2sQARPEt6scTovOBdVU/8vjASxHH2VwCNAnLKDU7XvGEVYULRI5IkkgkRIvFe4iCUVpt8buivwi3jVGUCeNAPhlf2a4fact72MivclBEAd0K/ieiC/Cl52Yrs98yNxA4SJhkAI+t7HkFnaoo2pzNXn2/mjMqZo7dSAGEAauCZX3DBS341UB5zlfHG1WT1IdS7SBsk6H/ieghlZyKgU+QHVwFQOU3ksqOABO+wBHGoXQMsS7yobfJRWAtN1AzANCZWYNOOaV2ch0GhJ3F7LFEwrAiGgbxogfDFiKSxgeoLAQexunyLNoEHdD1IlIWjlIb99jvWlIAsSIv0wASHZIuELINWTv1j3LakaVWcxygYx7qK2DMUooqdnXthmBwLIVIhcQrSQ36S/dlqoeFYn0CkB19Cp6J9dKAlfeOZ6VyoJ8leH7JHZvpUD0E9qGgW47WPEzgO5XSHQKQmtDsQDQ+8SlAtCmLMvwAU2BPPIPaaBajrp/ViuUoI/w8QOKYTpX8NvD/EsHIJdyNGfvzzvYe81C4gUIIxgaoN2mXBG9gB97FRlEpIqzYcGVFcgFuYjzVcic0NCV8BKZnWVSZnwEeYNNnzlcB67mHomxWulUSFJClBDCNFvuPxzM1CBfeKmiGjPNjIKHMfZBUSs2GAwypR1g7IC5srEskdnQMJRm1myNjVCN2p3DfABxjTqaQ59u1OOfMW3rwQIfoubBUXDVCCxzLrAFoKGeHGsLlKjHFCBhXVCjAETXGMVfWiDml//4db0zAMrd8yoi/KprYtWwxPk2ZPBxWYEJZECs5JSA+M5xBQnpRbXmSb0PrZwX6FKf/jk8785svh6FqJ6bYUKceE3uM0ylnwhcPyVg6e1CnIX7gOV0n03ATQrpBsxMj0wuxoy4dSdbEkaIdqVTBhEdIvSKfnZeG2M5sLSuPG6CK55WrPhkqEYqLuFxZ12SeT7ZbCcWTuHXmrRGEPtkN4lHGBF3gPXCUaJunGOQ8y7qNTlrIUHUv4CgnmfmWpu17jxUhwsOB1Elq3NVDo7Ply9buLTO3ma05lGDuX0ujV/ri6UpESL1+OgdpEcABsflrgkG0KkTc9wB22Om6PcVx4wCc3EYLbgJ0bxyYmH7yYoCruFt0alBOTUt2kt2xk/lF33zs+6g8V5HmcgfV0agCsFYjBjKZlrR/75PDgDGXsWlqcxCRe9isDFMd8vxxsmmVerrpzvUw1zTQsKFCq4cZZg7A9AfXAXBnUOjviPi96Ce5xDSnZooUeEgZMigoxQysoIT1c2bWDZhQj7cMKDEMkdUrhpjg1wv4FtFEDCsoISkctBREmlRTF9uuxUAFldcJN+g8h5L2XLk7+hljGsFXnJB883zGzR1TnSenK3AxPc7HzXgCTwJBFyxLC8v3Bnsur7DOj8rk7094587p/krFHpFwgrmR1vInAtoOQSCLubQvbxO+aJ36v1IBW148x86e+TJc/X8Gc+l5nqcjJ8z1jcnJ3udtgaP+sAT0zzdUv94Ue/IalFtKqDuM4CwxrYKsTi7pgqjeHM8nLp2x80lVQsJxDW1Dw33PoiywKWhFU6PDiffkmwl0vrTXXOU7BIE0UJLhGPy1yTXC04VE8j83RiQvVm2GRaD3TrTR9LUJQWfDaLGtvZxk6bhdOQeLmoHpiwZhGrWRtndM1FUTJ+SgeNPnTZRI954Ku09ZYPMnrbUfUfLziXZibULwB2WzGED59jFlfkwTmwxfd8zAD7LmHOud2JqJ+2mkLB6U3sYJ6AR48AEx4ARLSnJIMakfDr8P/pKBwXPOhCRn5eogBLg/1GFwdn7lv/iBjtkwPo1cBeRu7p4eAVvmgYWRj2SFBm2bjQq4z5gYaZuqTZZuKO6+ClA/g93U+LEsxWg5BgHXPxyXo0/k4xz0Q09KwDn5WZF377z9BcbPJ2NP+022CX2t1fkDPnuAT2Uyp6iW7uzcYke3k4eeOgio3af219kPbMJzzDgJdv41bzZQOHuEJo5/9SUGoun+uKFxrxUqkqc5Ffz1aV4bdPrK77tonrDLQrrMwRxFqhB64FkBlmG/Q8RGBJQZECBQw2KFmzSx5Sr8Myc0dnwui1r7yXkyR9pW4rJs79NtRTUQvqhqQcCbGB++xFzdGvJgmWycT1YFH2+JLV02cZ0ksioK8tzmvXtKeNRDokIcD6tty5niw4LJFPiTj2qHI/u0tqdsUyOIpugQr7aPC+8Ha7AtjAHh0bmI1uezwG7svCqLki3UKswj2Me3SjkUwAGAbv96BMB+GwAlBo0zjU9ezCjZJhDwdefM8chbjBiJlUhDpU+5DY7dhEiNJV86iZ2UJpCnHXPB8klqtwJZupopM8023FTIO4D2aMn/BO/2jYEhwDsNV+yZM/ehFWMYOeqY8XEBaBI4BmFMzoYDH8OBjIUBwgU4ChB4IHRzJqHKZNWciBnAotE5WJBJ5tnK72wA8MegKvs0JEyBYEUGm524gVSdEauYWoJQ6iimTLQCjVwpPNqYTBV8PJJAYvr+jFRLm6BoDi7kt0B2hb1tPRcjZxlYZwxVZNRqR2YVnoREPPZ9YjcSLP0Mg56t4wUZXX3BdnG9CXRFiUaukraR38h6nutLXMvf1m2t7yIxbDXUHRbTUlf6CCbXK1da6J+gUWin8+E4HV9JCgAZQap181IUKPkqcLmqaDOCGc9AoBfJS6W3L1lvjKAP10A/o/Vx4AAwGdXBZ+m97fpkXMOQAYHgMB/+V8HSN+eQfOfH4yLg6ycu0aucwCXAHcb5TUDaatjMI79ec1rH9Fcf8KnHyFVD/5Y43cDzOArcrbcgeKsQgIVDb7TBAoH3wqqMEr6dv9rws//pOXgzjkp5SLIWymhQ3QBfsk9/IvefxbJG27fnTJKf2mCGCvc8Ua+q0oPfNzQ6GzPTdHuU+gcEVvk9QSSFc/rCT+bCHtSnI4rJ9y+KGzy25voC1seKIjHRWptDijKXyg1y7PnSflJIN3yM8hb5P1rCBh+Y5BSZ9cTypjz1XDbngMCejc4FWR3U7UWsW7EGRm5+LG0FDnv8AK4gA5YSACMLWwGzjTbR7SICLJZs09RKPigbsHkQsezCbK5goLqbKGw1QWGGsekzOxsd7DdSYUD9EgZMpFtGyHsfBCtKbgBTCgCkqC6/kWi+JUCKfbK3dKzaX6FcPqUKL1Q4yJJTdrdN56RAg7YQLMh5+mAb4sxzfKK1gVZduM+dSic1XMxMhyRRuw4dzFIQWaj3HWV/JvbZtKgTQxLh11k8NByNhUgvllUyAS6Q634aQIT4S60QBl08NmAjcF+gDVCG5yDC1AJDeABF4yDEVADi2C9tPG8vADgMgAgUJ7rRHOqUARAFhw6CRwBaY4DRyED/Ago+sSgR32OIs0+z9JE9wUmx9EXcXLa+2LG53VlkZlFox+CBNqOSNgZNDqu1QhqGDWIgSN8zFcvk3lECvqfSotw7kj3VttcEk/IXwjWPCNr1SvNuVY5QyBIXo4RTEgQ1oxD0Jesf5KB06dPKCI/Ekq44AaPW97qAOPo+C4EFZzyxEISJcAACalOzuwQ8aNUeII/LU3MxoRGENSIYbOWrVlxaJatL9iiyRAWkqZZ61JcGHEihoXFrYmNI+gVlaJch8NJcTzB0vA0MWWCKx2MRemerSJsBpHLx/GRNGHUGGrChtuDhA0gceMxyEUIN3pBHtrvM43g0IY9GADY884uhN61D7VOXbr16NWn34BBQwjhKUZVwvhXP2nKtBmz5lCTkoFxaJ07d3Ce1h5ePn4BQbny5CtQaLvzztqhTLkuFS6qdM4FV4e+sqsW/yraqdpCu9xxy201XnjmlTq16g3XYIQNRmo0SpNmrVqMNsZdY7VrM06H8TbabZYJOk00yUuHPPc6IfIKisjQJkkyJBQ0DCwcvARxDCDaBsFDqLpFEfGC/fb5ySmnHXHUFlvFJ4xn7HXSDDogeRbTKFJimGJqT8VwqZnmmI0mTCmllVFWOeVVUFEllZWgYUpUkpKFFEpoYYQVTngRRBRJZFFEFU10McQUi7XWhS0OUxwWziWWBWKqwXDFI4I/+cC/xPKztxxARkKxmFapTeGjN889P5vvviG/JkUCCSWSWBJJJZNcqUqTQunKUKaUUkntq/BfrAaFZ2k8K+cxwqCS5/TLQyh81pC1SWlpsnJPIxTS3OpHufcx3lzkj16ELs28SY3RGZmSGTmpG+lN9wQUDNQzsM1QbSLm8rfaFP3RZ9TRKZzR7eWPta5VPIlSMm4ReiiVRENYBSMnYdzByXNejKkip3cjoHsCCgbqKeiGgoKAbhioZ6CgoBsdrLYOLAyDFgx0k8xObjcr+zY1M0cbTO72TiPi61vV3MAinZz6jt0Xqedk9w2+2OFit9OXSv2qfD3wWj4PPMuXgRf7Btvch/XN9rM04Fnl/JCRDryVFEaNO4Ue6/MUHssOOr7Z+1AI3of2+N/r3+8NcmjIITf+PNDk1X5VYpljFvDCaJvgTfRYPqIIsb8b4qXG/ufztau3DOiSvWc+AAA=\") format('woff2');\n unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2212, U+2215;\n }\n</style>\n<script>\n (function () {\n // FontFaceObserver https://github.com/bramstein/fontfaceobserver\n (function () { function e(e, t) { document.addEventListener ? e.addEventListener(\"scroll\", t, !1) : e.attachEvent(\"scroll\", t) } function t(e) { document.body ? e() : document.addEventListener ? document.addEventListener(\"DOMContentLoaded\", function t() { document.removeEventListener(\"DOMContentLoaded\", t), e() }) : document.attachEvent(\"onreadystatechange\", function n() { if (\"interactive\" == document.readyState || \"complete\" == document.readyState) document.detachEvent(\"onreadystatechange\", n), e() }) } function n(e) { this.a = document.createElement(\"div\"), this.a.setAttribute(\"aria-hidden\", \"true\"), this.a.appendChild(document.createTextNode(e)), this.b = document.createElement(\"span\"), this.c = document.createElement(\"span\"), this.h = document.createElement(\"span\"), this.f = document.createElement(\"span\"), this.g = -1, this.b.style.cssText = \"max-width:none;display:inline-block;position:absolute;height:100%;width:100%;overflow:scroll;font-size:16px;\", this.c.style.cssText = \"max-width:none;display:inline-block;position:absolute;height:100%;width:100%;overflow:scroll;font-size:16px;\", this.f.style.cssText = \"max-width:none;display:inline-block;position:absolute;height:100%;width:100%;overflow:scroll;font-size:16px;\", this.h.style.cssText = \"display:inline-block;width:200%;height:200%;font-size:16px;max-width:none;\", this.b.appendChild(this.h), this.c.appendChild(this.f), this.a.appendChild(this.b), this.a.appendChild(this.c) } function r(e, t) { e.a.style.cssText = \"max-width:none;min-width:20px;min-height:20px;display:inline-block;overflow:hidden;position:absolute;width:auto;margin:0;padding:0;top:-999px;left:-999px;white-space:nowrap;font:\" + t + \";\" } function i(e) { var t = e.a.offsetWidth, n = t + 100; return e.f.style.width = n + \"px\", e.c.scrollLeft = n, e.b.scrollLeft = e.b.scrollWidth + 100, e.g !== t ? (e.g = t, !0) : !1 } function s(t, n) { function r() { var e = s; i(e) && null !== e.a.parentNode && n(e.g) } var s = t; e(t.b, r), e(t.c, r), i(t) } function o(e, t) { var n = t || {}; this.family = e, this.style = n.style || \"normal\", this.weight = n.weight || \"normal\", this.stretch = n.stretch || \"normal\" } function l() { if (null === a) { var e = document.createElement(\"div\"); try { e.style.font = \"condensed 100px sans-serif\" } catch (t) { } a = \"\" !== e.style.font } return a } function c(e, t) { return [e.style, e.weight, l() ? e.stretch : \"\", \"100px\", t].join(\" \") } var u = null, a = null, f = null; o.prototype.load = function (e, i) { var o = this, a = e || \"BESbswy\", l = i || 3e3, h = (new Date).getTime(); return new Promise(function (e, i) { null === f && (f = !!window.FontFace); if (f) { var p = new Promise(function (e, t) { function n() { (new Date).getTime() - h >= l ? t() : document.fonts.load(c(o, o.family), a).then(function (t) { 1 <= t.length ? e() : setTimeout(n, 25) }, function () { t() }) } n() }), d = new Promise(function (e, t) { setTimeout(t, l) }); Promise.race([d, p]).then(function () { e(o) }, function () { i(o) }) } else t(function () { function t() { var t; if (t = -1 != m && -1 != g || -1 != m && -1 != S || -1 != g && -1 != S) (t = m != g && m != S && g != S) || (null === u && (t = /AppleWebKit\\/([0-9]+)(?:\\.([0-9]+))/.exec(window.navigator.userAgent), u = !!t && (536 > parseInt(t[1], 10) || 536 === parseInt(t[1], 10) && 11 >= parseInt(t[2], 10))), t = u && (m == x && g == x && S == x || m == T && g == T && S == T || m == N && g == N && S == N)), t = !t; t && (null !== C.parentNode && C.parentNode.removeChild(C), clearTimeout(L), e(o)) } function f() { if ((new Date).getTime() - h >= l) null !== C.parentNode && C.parentNode.removeChild(C), i(o); else { var e = document.hidden; if (!0 === e || void 0 === e) m = p.a.offsetWidth, g = d.a.offsetWidth, S = v.a.offsetWidth, t(); L = setTimeout(f, 50) } } var p = new n(a), d = new n(a), v = new n(a), m = -1, g = -1, S = -1, x = -1, T = -1, N = -1, C = document.createElement(\"div\"), L = 0; C.dir = \"ltr\", r(p, c(o, \"sans-serif\")), r(d, c(o, \"serif\")), r(v, c(o, \"monospace\")), C.appendChild(p.a), C.appendChild(d.a), C.appendChild(v.a), document.body.appendChild(C), x = p.a.offsetWidth, T = d.a.offsetWidth, N = v.a.offsetWidth, f(), s(p, function (e) { m = e, t() }), r(p, c(o, '\"' + o.family + '\",sans-serif')), s(d, function (e) { g = e, t() }), r(d, c(o, '\"' + o.family + '\",serif')), s(v, function (e) { S = e, t() }), r(v, c(o, '\"' + o.family + '\",monospace')) }) }) }, \"undefined\" != typeof module ? module.exports = o : (window.FontFaceObserver = o, window.FontFaceObserver.prototype.load = o.prototype.load) })();\n var fontASubset = new FontFaceObserver('Source Sans Pro');\n Promise.all([fontASubset.load()]).then(function () {});\n })();\n</script>\n<!-- Google Search site Link start-->\n<script type=\"application/ld+json\">\r\n {\r\n \"@context\": \"http://schema.org\",\r\n \"@type\": \"WebSite\",\r\n \"url\": \"https://www.allrecipes.com/\",\r\n \"potentialAction\": {\r\n \"@type\": \"SearchAction\",\r\n \"target\": \"https://www.allrecipes.com/search/results/?wt={search_term_string}&sort=re\",\r\n \"query-input\": \"required name=search_term_string\"\r\n }\r\n }\r\n </script>\n<!-- Google Search site Link end-->\n<meta content=\"2d256c34-f922-4c16-8f6d-f5bf79c1ac12\" name=\"correlationId\">\n<script src=\"https://secureimages.allrecipes.com/assets/deployables/v-1.185.0.5222/segment-analytics.bundled.js\"></script>\n<script type=\"text/javascript\">\r\n window.segment = {};\r\n var segmentShim = new SegmentShim(dataLayer, window.segment);\r\n window.segmentAnalytics.page(window.segment);\r\n </script>\n</meta></meta></head>\n<!-- ARLOG SERVER: RD0003FFA88238 LOCAL_IP: 10.255.22.140 MERCH_KEY: MerchData_8_1_1_1_US_11_51_63_68 -->\n<body data-scoby-impression='{\"id\": \"\", \"eventType\": \"Allrecipes.HomePage.PageView\", \"eventCategory\": \"Page.View\", \"value\": {\"user\": {\"loginStatus\":\"yes\",\"visitorType\":\"free\"}}}' ng-app=\"allrecipes\">\n<script>\r\n var Pubsub = function () {\r\n \"use strict\";\r\n var cache = {};\r\n var instance = this;\r\n this.isListening = function (topicName, subscriberName) {\r\n var ret = false;\r\n if (cache[topicName]) {\r\n ret = cache[topicName].filter(function (item) {\r\n return item.name == subscriberName;\r\n }).length > 0;\r\n }\r\n return ret;\r\n };\r\n\r\n this.listen = function (topicName, subscriberName, subscribingFunction) {\r\n if (cache[topicName] == undefined) {\r\n cache[topicName] = [];\r\n }\r\n\r\n cache[topicName].push({ name: subscriberName, func: subscribingFunction });\r\n console.log(\"pub sub is listening to \" + topicName + \" for \" + subscriberName);\r\n };\r\n\r\n this.broadcast = function (topicName, args) {\r\n\r\n if (!cache[topicName] || cache[topicName].length < 1) {\r\n return;\r\n }\r\n var i = 0;\r\n do {\r\n console.log(\"listening function \" + cache[topicName][i].name + \" firing for broadcast \" + topicName);\r\n cache[topicName][i].func.apply(null, args || []);\r\n i++;\r\n } while (i < cache[topicName].length);\r\n }\r\n };\r\n </script>\n<a id=\"top\"></a>\n<a class=\"skip-to-content\" href=\"#main-content\">Skip to main content</a>\n<a class=\"newThisMonth\" href=\"/new-this-month/\" rel=\"nofollow\">New<> this month</a>\n<!-- Begin comScore Tag - Part 2 -->\n<noscript>\n<img src=\"https://sb.scorecardresearch.com/p?c1=2&c2=6036305&cv=2.0&cj=1&cs_ucfr=1\"/>\n</noscript>\n<!-- End comScore Tag - Part 2 -->\n<div class=\"slider-container\" global-ui-events=\"\">\n<div class=\"leaderboard-wrapper\" data-ad-container-autocollapse=\"\" id=\"docking-leaderboard-container\">\n<div class=\"docking-leaderboard-container\">\n<div class=\"docking-leaderboard\" data-tier=\"1\" id=\"div-gpt-leaderboard-flex-1\"></div>\n</div>\n</div>\n<div class=\"site-content\">\n<header class=\"header new-nav\">\n<div class=\"branch-journeys-top\"></div>\n<section class=\"magazine-bar\">\n<ul class=\"magazine-bar__social\">\n<li>Follow us on:</li>\n<li><a aria-label=\"Pinterest\" class=\"pinterest\" data-header-link-tracking='{\"label\": \"Social > Pinterest\"}' href=\"http://pinterest.com/allrecipes/\" target=\"_blank\" title=\"Pinterest\"><span class=\"svg-icon--social--pinterest svg-icon--social--pinterest-dims\"></span></a></li>\n<li><a aria-label=\"Facebook\" class=\"facebook\" data-header-link-tracking='{\"label\": \"Social > Facebook\"}' href=\"https://www.facebook.com/allrecipes\" target=\"_blank\" title=\"Facebook\"><span class=\"svg-icon--social--facebook svg-icon--social--facebook-dims\"></span></a></li>\n<li><a aria-label=\"Instagram\" class=\"instagram\" data-header-link-tracking='{\"label\": \"Social > Instagram\"}' href=\"http://instagram.com/allrecipes\" target=\"_blank\" title=\"Instagram\"><span class=\"svg-icon--social--instagram svg-icon--social--instagram-dims\"></span></a></li>\n<li><a aria-label=\"Twitter\" class=\"twitter\" data-header-link-tracking='{\"label\": \"Social > Twitter\"}' href=\"https://twitter.com/Allrecipes\" target=\"_blank\" title=\"Twitter\"><span class=\"svg-icon--social--twitter svg-icon--social--twitter-dims\"></span></a></li>\n</ul>\n<a class=\"magazine-bar__link\" data-header-link-tracking='{\"label\": \"Magazine\"}' href=\"http://armagazine.com/upper-nav\" target=\"_blank\">Get the Allrecipes magazine</a>\n</section>\n<section ng-controller=\"ar_controllers_top_nav\" ng-init=\"init()\">\n<ul class=\"ar-nav-section\">\n<li class=\"ar-logo-tab\">\n<a aria-label=\"Allrecipes home page\" data-header-link-tracking='{\"label\": \"Brand Logo\"}' href=\"https://www.allrecipes.com\">\n<div class=\"ar-logo\" ng-click=\"setAnalyticsCookie('ARlogo')\">\n<img alt=\"Allrecipes\" height=\"27\" src=\"https://secureimages.allrecipes.com/ar-images/ARlogoNew.svg\" width=\"110\"/> </div>\n</a>\n</li>\n<li class=\"browse-recipes\">\n<a class=\"recipes-txt {active:topBrowseRecipePanel_showing}\" data-header-link-tracking='{\"label\": \"Browse\"}' href=\"\" id=\"navmenu_recipes\" popup-trigger=\"topBrowseRecipePanel\"><span>BROWSE</span><span class=\"icon--chevron-down\"></span></a>\n</li>\n<li class=\"search-tab\" ng-controller=\"ar_controllers_search\">\n<div class=\"nav-search\">\n<input id=\"searchText\" name=\"searchText\" ng-keypress=\"isEnterKey($event) && performSearch()\" ng-model=\"search.keywords\" placeholder=\"Find a recipe\" type=\"text\"/>\n<button aria-label=\"Search\" class=\"btn-basic--small search-button\" ng-click=\"performSearch()\">\n<span class=\"svg-icon--top-nav-bar--search-magnify svg-icon--top-nav-bar--search-magnify-dims\"></span>\n</button>\n<div ar-event-focus=\"click\" ar-event-focus-id=\"setFocus-keywordSearch\" class=\"ingredient-searchtxt\" id=\"ingredientSearch\" popup-trigger=\"topNavSearchMenu\">Ingredient Search</div>\n</div>\n</li>\n<li class=\"social-notification\" ng-class=\"{active: notifications_showing}\" popup-trigger=\"notifications\">\n<a class=\"socialNotification\" href=\"\" ng-click=\"setNotificationsViewed()\" ng-cloak=\"\" ng-controller=\"ar_controllers_notifications\" title=\"Notifications\">\n<span aria-label=\"notifications\" class=\"svg-icon--top-nav-bar--nav-bell svg-icon--top-nav-bar--nav-bell-dims\"></span>\n<span class=\"notification-count\" ng-bind=\"notificationCount\" ng-show=\"displayCount\"></span>\n</a>\n</li>\n<li class=\"nav-favorites\" ng-click=\"setAnalyticsCookie('favorites')\">\n<a aria-label=\"My Favorites\" data-header-link-tracking='{\"label\": \"Favorites\"}' href=\"https://www.allrecipes.com/cook/my/favorites/\" title=\"My Favorites\">\n<span class=\"svg-icon--top-nav-bar--grey-heart svg-icon--top-nav-bar--grey-heart-dims\"></span>\n</a>\n</li>\n<li class=\"nav-profile standard\">\n<a class=\"user--photo\" href=\"https://www.allrecipes.com/cook/my/\">\n<img alt=\"\" class=\"img-profile\" ng-click=\"setAnalyticsCookie('icon|profile')\" src=\"https://images.media-allrecipes.com/userphotos/50x50/5674141.jpg\"/>\n</a>\n<div class=\"login-state authenticated-user\" ng-class=\"{active:topNavProfileMenu_showing}\" popup-trigger=\"topNavProfileMenu\">\n<span class=\"username\" id=\"offCanvasDisplayName\">Test</span>\n<span class=\"icon--chevron-down\"></span>\n</div>\n</li>\n<li class=\"small-screen search-phone--landscape\" ng-class=\"{active:topNavSearchMenu_showing}\">\n<a ar-event-focus=\"click\" ar-event-focus-id=\"setFocus-keywordSearch\" href=\"\" popup-trigger=\"topNavSearchMenu\">\n<div class=\"nav-search\">\n<span class=\"svg-icon--top-nav-bar--search-magnify-gray svg-icon--top-nav-bar--search-magnify-gray-dims\"></span>\n</div>\n</a>\n</li>\n<li class=\"small-screen profile-phone--landscape\" ng-class=\"{active:topNavProfileMenu_showing}\" popup-trigger=\"topNavProfileMenu\">\n<a aria-label=\"Open Profile\" data-link-tracking='{\"label\": \"Open Profile\"}' href=\"\">\n<div class=\"login-state\">\n<img alt=\"\" class=\"img-profile\" src=\"https://images.media-allrecipes.com/userphotos/50x50/5674141.jpg\"/>\n</div>\n</a>\n</li>\n<li class=\"hamburger-tab\" ng-class=\"{active: topNavHamburgerMenu_showing}\" popup-trigger=\"topNavHamburgerMenu\">\n<a aria-label=\"secondary\" data-link-tracking='{\"label\": \"Open Hamburger Menu\"}' href=\"\" ng-click=\"trackHamburgerMenuToggle(this)\" ng-switch=\"\" role=\"navigation\" title=\"More menu\">\n<div class=\"hamburger-nav\">\n<span class=\"browse-recipes-iconbar\"></span>\n<span class=\"browse-recipes-iconbar\"></span>\n<span class=\"browse-recipes-iconbar\"></span>\n</div>\n</a>\n</li>\n</ul>\n<social-notification ng-cloak=\"\" popup-panel=\"notifications\"></social-notification>\n<div class=\"nav-tab nav-tab__search ng-hide\" ng-cloak=\"\" popup-panel=\"topNavSearchMenu\">\n<form>\n<div data-ng-controller=\"ar_controllers_search\">\n<span class=\"icon--close\" hidewhenclicked=\"\" title=\"Close Ingredient Search\"></span>\n<div class=\"input-wrap--home\">\n<span class=\"svg-icon--top-nav-bar--search-magnify-gray svg-icon--top-nav-bar--search-magnify-gray-dims\"></span>\n<input class=\"setFocus-keywordSearch\" id=\"searchText\" ng-model=\"search.keywords\" placeholder=\"Keywords\" type=\"text\"/>\n</div>\n<div class=\"input-wrap--home ingredients\">\n<div class=\"ingredient-clipping-frame\">\n<ul class=\"ingredient-scroller\" onselectstart=\"return false;\" unselectable=\"on\">\n<li ng-repeat=\"ingredient in search.ingredientsInclude\">\n<span>\n<span ng-bind=\"::ingredient\"></span>\n<span class=\"icon--x\" ng-click=\"removeIngredientInclude(ingredient, $event)\" unsubscribe-global-click-handler=\"\">✕</span>\n</span>\n</li>\n</ul>\n<div class=\"ingredient-add-exclude\">\n<input class=\"setFocus-includeIng\" id=\"includeIngText\" name=\"txtIncludeIng\" ng-attr-placeholder=\"{{includeIngPlaceholderText}}\" ng-keydown=\"(isBackspaceKey($event) && removeLastIngredientInclude($event)) || (isTabKey($event) && addIngredientInclude($event))\" ng-model=\"includeIngredient\" type=\"text\"/>\n</div>\n</div>\n<a ar-event-focus=\"click\" ar-event-focus-id=\"setFocus-includeIng\" class=\"btn-basic--small include\" ng-class=\"{ 'grayed-out': includeIngHitMax }\" ng-click=\"addIngredientInclude($event)\"><span>+</span></a>\n</div>\n<div class=\"input-wrap--home ingredients\">\n<div class=\"ingredient-clipping-frame\">\n<ul class=\"ingredient-scroller\" onselectstart=\"return false;\" unselectable=\"on\">\n<li ng-repeat=\"ingredient in search.ingredientsExclude\">\n<span class=\"exclude-item\">\n<span ng-bind=\"::ingredient\"></span>\n<span class=\"icon--x\" ng-click=\"removeIngredientExclude(ingredient, $event)\" unsubscribe-global-click-handler=\"\">✕</span>\n</span>\n</li>\n</ul>\n<div class=\"ingredient-add-exclude\">\n<input class=\"setFocus-excludeIng\" id=\"excludeIngText\" name=\"txtExcludeIng\" ng-attr-placeholder=\"{{excludeIngPlaceholderText}}\" ng-keydown=\"(isBackspaceKey($event) && removeLastIngredientExclude($event)) || (isTabKey($event) && addIngredientExclude($event))\" ng-model=\"excludeIngredient\" type=\"text\"/>\n</div>\n</div>\n<a ar-event-focus=\"click\" ar-event-focus-id=\"setFocus-excludeIng\" class=\"btn-basic--small exclude\" ng-class=\"{ 'grayed-out': excludeIngHitMax }\" ng-click=\"addIngredientExclude($event)\"><span>—</span></a>\n</div>\n<div class=\"nav-tab__buttons\">\n<button class=\"btn-basic--small btn-search\" ng-click=\"performSearch()\" ng-cloak=\"\">Go</button>\n</div>\n</div>\n<ar-notification></ar-notification>\n</form>\n</div>\n<div class=\"browse-recipe-tab social ng-hide\" id=\"topBrowseRecipePanel\" ng-cloak=\"\" popup-panel=\"topBrowseRecipePanel\">\n<section class=\"hero-link nav-tab__options recipe-nav-tab__options\">\n<div class=\"grid underline_hero_link\">\n<ul class=\"browse-hubs\">\n<li class=\"browse-hubs__categories\">\n<h3>\n Meal Type\n </h3><span class=\"icon--chevron-right\"></span>\n<ul class=\"browse-hubs__subcategories\">\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Appetizers & Snacks\"}' href=\"https://www.allrecipes.com/recipes/76/appetizers-and-snacks/\" ng-click=\"setAnalyticsCookie('browse|appetizers \\u0026 snacks')\" title=\"Appetizers & Snacks Recipes\">\n Appetizers & Snacks\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Breakfast & Brunch\"}' href=\"https://www.allrecipes.com/recipes/78/breakfast-and-brunch/\" ng-click=\"setAnalyticsCookie('browse|breakfast \\u0026 brunch')\" title=\"Breakfast & Brunch Recipes\">\n Breakfast & Brunch\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Desserts\"}' href=\"https://www.allrecipes.com/recipes/79/desserts/\" ng-click=\"setAnalyticsCookie('browse|desserts')\" title=\"Desserts Recipes\">\n Desserts\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Dinner\"}' href=\"https://www.allrecipes.com/recipes/17562/dinner/\" ng-click=\"setAnalyticsCookie('browse|dinner')\" title=\"Dinner Recipes\">\n Dinner\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Drinks\"}' href=\"https://www.allrecipes.com/recipes/77/drinks/\" ng-click=\"setAnalyticsCookie('browse|drinks')\" title=\"Drinks Recipes\">\n Drinks\n </a>\n</li>\n</ul>\n</li>\n<li class=\"browse-hubs__categories\">\n<h3>\n Ingredient\n </h3><span class=\"icon--chevron-right\"></span>\n<ul class=\"browse-hubs__subcategories\">\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Beef\"}' href=\"https://www.allrecipes.com/recipes/200/meat-and-poultry/beef/\" ng-click=\"setAnalyticsCookie('browse|beef')\" title=\"Beef Recipes\">\n Beef\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Chicken\"}' href=\"https://www.allrecipes.com/recipes/201/meat-and-poultry/chicken/\" ng-click=\"setAnalyticsCookie('browse|chicken')\" title=\"Chicken Recipes\">\n Chicken\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Pasta\"}' href=\"https://www.allrecipes.com/recipes/95/pasta-and-noodles/\" ng-click=\"setAnalyticsCookie('browse|pasta')\" title=\"Pasta Recipes\">\n Pasta\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Pork\"}' href=\"https://www.allrecipes.com/recipes/205/meat-and-poultry/pork/\" ng-click=\"setAnalyticsCookie('browse|pork')\" title=\"Pork Recipes\">\n Pork\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Salmon\"}' href=\"https://www.allrecipes.com/recipes/416/seafood/fish/salmon/\" ng-click=\"setAnalyticsCookie('browse|salmon')\" title=\"Salmon Recipes\">\n Salmon\n </a>\n</li>\n</ul>\n</li>\n<li class=\"browse-hubs__categories\">\n<h3>\n Diet & Health\n </h3><span class=\"icon--chevron-right\"></span>\n<ul class=\"browse-hubs__subcategories\">\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Diabetic\"}' href=\"https://www.allrecipes.com/recipes/739/healthy-recipes/diabetic/\" ng-click=\"setAnalyticsCookie('browse|diabetic')\" title=\"Diabetic Recipes\">\n Diabetic\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Gluten Free\"}' href=\"https://www.allrecipes.com/recipes/741/healthy-recipes/gluten-free/\" ng-click=\"setAnalyticsCookie('browse|gluten free')\" title=\"Gluten Free Recipes\">\n Gluten Free\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Healthy\"}' href=\"https://www.allrecipes.com/recipes/84/healthy-recipes/\" ng-click=\"setAnalyticsCookie('browse|healthy')\" title=\"Healthy Recipes\">\n Healthy\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Low Calorie\"}' href=\"https://www.allrecipes.com/recipes/1232/healthy-recipes/low-calorie/\" ng-click=\"setAnalyticsCookie('browse|low calorie')\" title=\"Low Calorie Recipes\">\n Low Calorie\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Low Fat\"}' href=\"https://www.allrecipes.com/recipes/1231/healthy-recipes/low-fat/\" ng-click=\"setAnalyticsCookie('browse|low fat')\" title=\"Low Fat Recipes\">\n Low Fat\n </a>\n</li>\n</ul>\n</li>\n<li class=\"browse-hubs__categories\">\n<h3>\n Seasonal\n </h3><span class=\"icon--chevron-right\"></span>\n<ul class=\"browse-hubs__subcategories\">\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Lunar New Year\"}' href=\"https://www.allrecipes.com/recipes/17668/holidays-and-events/lunar-new-year/\" ng-click=\"setAnalyticsCookie('browse|lunar new year')\" title=\"Lunar New Year Recipes\">\n Lunar New Year\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Recipes for the Super Bowl®\"}' href=\"https://www.allrecipes.com/recipes/1419/holidays-and-events/big-game/\" ng-click=\"setAnalyticsCookie('browse|recipes for the super bowl®')\" title=\"Recipes for the Super Bowl®\">\n Recipes for the Super Bowl®\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Valentines Day\"}' href=\"https://www.allrecipes.com/recipes/199/holidays-and-events/valentines-day/\" ng-click=\"setAnalyticsCookie('browse|valentine\\u0027s day')\" title=\"Valentine's Day Recipes\">\n Valentine's Day\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Mardi Gras\"}' href=\"https://www.allrecipes.com/recipes/192/holidays-and-events/mardi-gras/\" ng-click=\"setAnalyticsCookie('browse|mardi gras')\" title=\"Mardi Gras Recipes\">\n Mardi Gras\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > More Holidays and Events\"}' href=\"https://www.allrecipes.com/recipes/85/holidays-and-events/\" ng-click=\"setAnalyticsCookie('browse|more holidays and events')\" title=\"More Holidays and Events Recipes\">\n More Holidays and Events\n </a>\n</li>\n</ul>\n</li>\n<li class=\"browse-hubs__categories\">\n<h3>\n Dish Type\n </h3><span class=\"icon--chevron-right\"></span>\n<ul class=\"browse-hubs__subcategories\">\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Breads\"}' href=\"https://www.allrecipes.com/recipes/156/bread/\" ng-click=\"setAnalyticsCookie('browse|breads')\" title=\"Breads Recipes\">\n Breads\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Cakes\"}' href=\"https://www.allrecipes.com/recipes/276/desserts/cakes/\" ng-click=\"setAnalyticsCookie('browse|cakes')\" title=\"Cakes Recipes\">\n Cakes\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Salads\"}' href=\"https://www.allrecipes.com/recipes/96/salad/\" ng-click=\"setAnalyticsCookie('browse|salads')\" title=\"Salads Recipes\">\n Salads\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Smoothies\"}' href=\"https://www.allrecipes.com/recipes/138/drinks/smoothies/\" ng-click=\"setAnalyticsCookie('browse|smoothies')\" title=\"Smoothies Recipes\">\n Smoothies\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Soups, Stews & Chili\"}' href=\"https://www.allrecipes.com/recipes/94/soups-stews-and-chili/\" ng-click=\"setAnalyticsCookie('browse|soups, stews \\u0026 chili')\" title=\"Soups, Stews & Chili Recipes\">\n Soups, Stews & Chili\n </a>\n</li>\n</ul>\n</li>\n<li class=\"browse-hubs__categories\">\n<h3>\n Cooking Style\n </h3><span class=\"icon--chevron-right\"></span>\n<ul class=\"browse-hubs__subcategories\">\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > BBQ & Grilling\"}' href=\"https://www.allrecipes.com/recipes/88/bbq-grilling/\" ng-click=\"setAnalyticsCookie('browse|bbq \\u0026 grilling')\" title=\"BBQ & Grilling Recipes\">\n BBQ & Grilling\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Quick & Easy\"}' href=\"https://www.allrecipes.com/recipes/1947/everyday-cooking/quick-and-easy/\" ng-click=\"setAnalyticsCookie('browse|quick \\u0026 easy')\" title=\"Quick & Easy Recipes\">\n Quick & Easy\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Slow Cooker\"}' href=\"https://www.allrecipes.com/recipes/253/everyday-cooking/slow-cooker/\" ng-click=\"setAnalyticsCookie('browse|slow cooker')\" title=\"Slow Cooker Recipes\">\n Slow Cooker\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Vegan\"}' href=\"https://www.allrecipes.com/recipes/1227/everyday-cooking/vegan/\" ng-click=\"setAnalyticsCookie('browse|vegan')\" title=\"Vegan Recipes\">\n Vegan\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Vegetarian\"}' href=\"https://www.allrecipes.com/recipes/87/everyday-cooking/vegetarian/\" ng-click=\"setAnalyticsCookie('browse|vegetarian')\" title=\"Vegetarian Recipes\">\n Vegetarian\n </a>\n</li>\n</ul>\n</li>\n<li class=\"browse-hubs__categories\">\n<h3>\n World Cuisine\n </h3><span class=\"icon--chevron-right\"></span>\n<ul class=\"browse-hubs__subcategories\">\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Asian\"}' href=\"https://www.allrecipes.com/recipes/227/world-cuisine/asian/\" ng-click=\"setAnalyticsCookie('browse|asian')\" title=\"Asian Recipes\">\n Asian\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Indian\"}' href=\"https://www.allrecipes.com/recipes/233/world-cuisine/asian/indian/\" ng-click=\"setAnalyticsCookie('browse|indian')\" title=\"Indian Recipes\">\n Indian\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Italian\"}' href=\"https://www.allrecipes.com/recipes/723/world-cuisine/european/italian/\" ng-click=\"setAnalyticsCookie('browse|italian')\" title=\"Italian Recipes\">\n Italian\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Mexican\"}' href=\"https://www.allrecipes.com/recipes/728/world-cuisine/latin-american/mexican/\" ng-click=\"setAnalyticsCookie('browse|mexican')\" title=\"Mexican Recipes\">\n Mexican\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Southern\"}' href=\"https://www.allrecipes.com/recipes/15876/us-recipes/southern/\" ng-click=\"setAnalyticsCookie('browse|southern')\" title=\"Southern Recipes\">\n Southern\n </a>\n</li>\n</ul>\n</li>\n<li class=\"browse-hubs__categories\">\n<h3>\n Special Collections\n </h3><span class=\"icon--chevron-right\"></span>\n<ul class=\"browse-hubs__subcategories\">\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Food Wishes with Chef John\"}' href=\"https://www.allrecipes.com/recipes/16791/everyday-cooking/special-collections/web-show-recipes/food-wishes/\" ng-click=\"setAnalyticsCookie('browse|food wishes with chef john')\" title=\"Food Wishes with Chef John Recipes\">\n Food Wishes with Chef John\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Allrecipes Magazine Recipes\"}' href=\"https://www.allrecipes.com/recipes/17235/everyday-cooking/allrecipes-magazine-recipes/\" ng-click=\"setAnalyticsCookie('browse|allrecipes magazine recipes')\" title=\"Allrecipes Magazine Recipes\">\n Allrecipes Magazine Recipes\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Our Newest Recipes\"}' href=\"https://www.allrecipes.com/recipes/22908/everyday-cooking/special-collections/new/\" ng-click=\"setAnalyticsCookie('browse|our newest recipes')\" title=\"Our Newest Recipes\">\n Our Newest Recipes\n </a>\n</li>\n<li>\n<a data-header-link-tracking='{\"label\": \"Browse Recipes > Trusted Brands\"}' href=\"http://dish.allrecipes.com/trusted-brand-pages/\" ng-click=\"setAnalyticsCookie('browse|trusted brands')\" title=\"Trusted Brands Recipes\">\n Trusted Brands\n </a>\n</li>\n</ul>\n</li>\n</ul>\n</div>\n<a class=\"recipe-hero-link__item__text\" href=\"https://www.allrecipes.com/recipes/\" ng-click=\"setAnalyticsCookie('browse|all categories')\">All Categories</a>\n</section>\n</div>\n<!-- user sign in area -->\n<div class=\"nav-tab social profile-nav ng-hide\" ng-cloak=\"\" popup-panel=\"topNavProfileMenu\">\n<ul class=\"nav-tab__options\">\n<li ng-click=\"setAnalyticsCookie('profile|feed', 'menu')\">\n<a href=\"https://www.allrecipes.com/\" id=\"navmenu_myFeed\">\n<span class=\"nav-icon svg-icon--top-nav-bar--home svg-icon--top-nav-bar--home-dims\"></span>\n<span class=\"itemText\">Feed</span>\n</a>\n</li>\n<li ng-click=\"setAnalyticsCookie('profile|profile', 'menu')\">\n<a href=\"https://www.allrecipes.com/cook/my/\" id=\"navmenu_myprofile\">\n<span class=\"nav-icon svg-icon--top-nav-bar--nav-profile svg-icon--top-nav-bar--nav-profile-dims\"></span>\n<span class=\"itemText\">Profile</span>\n</a>\n</li>\n<li ng-click=\"setAnalyticsCookie('profile|favorites', 'menu')\">\n<a href=\"https://www.allrecipes.com/cook/my/favorites/\" id=\"navmenu_recipebox\" rel=\"nofollow\">\n<span class=\"nav-icon svg-icon--top-nav-bar--grey-heart svg-icon--top-nav-bar--grey-heart-dims\"></span>\n<span class=\"itemText\">Favorites</span>\n</a>\n</li>\n<li ng-click=\"setAnalyticsCookie('profile|friends', 'menu')\">\n<a href=\"https://www.allrecipes.com/cook/my/findfriends/\" id=\"navmenu_findfriends\" rel=\"nofollow\">\n<span class=\"nav-icon svg-icon--top-nav-bar--nav-friends svg-icon--top-nav-bar--nav-friends-dims\"></span>\n<span class=\"itemText\">Friends</span>\n</a>\n</li>\n<li ng-click=\"setAnalyticsCookie('profile|shopping list', 'menu')\">\n<a href=\"https://www.allrecipes.com/my/shopping-lists/\" id=\"navmenu_shoppinglist\" rel=\"nofollow\">\n<span class=\"nav-icon svg-icon--top-nav-bar--grey-shopping svg-icon--top-nav-bar--grey-shopping-dims\"></span>\n<span class=\"itemText\">Shopping List</span>\n</a>\n</li>\n<li ng-click=\"setAnalyticsCookie('profile|settings', 'menu')\">\n<a href=\"https://www.allrecipes.com/cook/my/account-settings/\" id=\"navmenu_settings\" rel=\"nofollow\">\n<span class=\"nav-icon svg-icon--top-nav-bar--nav-settings svg-icon--top-nav-bar--nav-settings-dims\"></span>\n<span class=\"itemText\">Settings</span>\n</a>\n</li>\n</ul>\n<div class=\"signout\" ng-click=\"setAnalyticsCookie('profile|sign out ', 'menu')\">\n<button class=\"btn-basic--large\" id=\"offCanvasSignOutBtn\" onclick=\"location.href='https://www.allrecipes.com/account/signout' ; \">Sign out</button>\n</div>\n</div>\n<!-- hub links, etc. -->\n<div class=\"nav-tab last ng-hide\" ng-cloak=\"\" popup-panel=\"topNavHamburgerMenu\">\n<ul class=\"nav-tab__options\">\n<li class=\"underline_link\">\n<a href=\"\" id=\"navmenu_recipes\" ng-click=\"browseNav()\" popup-trigger=\"browseRecipePanel\">\n<span class=\"nav-icon svg-icon--top-nav-bar--nav-browse-orange svg-icon--top-nav-bar--nav-browse-orange-dims\" ng-class=\"{'active': isActive}\"></span>\n<span class=\"nav-icon svg-icon--top-nav-bar--nav-browse svg-icon--top-nav-bar--nav-browse-dims\" ng-class=\"{'hidden': isActive}\"></span>\n<span class=\"nav-link-text\">Browse Recipes</span>\n<span class=\"icon-chevron\" ng-class=\"{'active': isActive}\"></span>\n</a>\n</li>\n<li class=\"browse-div-option ng-hide\" id=\"mobile-nav-container\" popup-panel=\"browseRecipePanel\">\n<ul class=\"nav-tab__mobile-browse\">\n<li>\n<input id=\"Meal Type\" type=\"checkbox\"/><label for=\"Meal Type\">Meal Type<span class=\"icon-chevron\"></span></label>\n<ul class=\"mobile-browse-subnav\">\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Appetizers & Snacks\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/76/appetizers-and-snacks/\" ng-click=\"setAnalyticsCookie('browse|appetizers \\u0026 snacks')\" title=\"Appetizers & Snacks Recipes\">Appetizers & Snacks</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Breakfast & Brunch\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/78/breakfast-and-brunch/\" ng-click=\"setAnalyticsCookie('browse|breakfast \\u0026 brunch')\" title=\"Breakfast & Brunch Recipes\">Breakfast & Brunch</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Desserts\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/79/desserts/\" ng-click=\"setAnalyticsCookie('browse|desserts')\" title=\"Desserts Recipes\">Desserts</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Dinner\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/17562/dinner/\" ng-click=\"setAnalyticsCookie('browse|dinner')\" title=\"Dinner Recipes\">Dinner</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Drinks\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/77/drinks/\" ng-click=\"setAnalyticsCookie('browse|drinks')\" title=\"Drinks Recipes\">Drinks</a></li>\n</ul>\n</li>\n<li>\n<input id=\"Ingredient\" type=\"checkbox\"/><label for=\"Ingredient\">Ingredient<span class=\"icon-chevron\"></span></label>\n<ul class=\"mobile-browse-subnav\">\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Beef\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/200/meat-and-poultry/beef/\" ng-click=\"setAnalyticsCookie('browse|beef')\" title=\"Beef Recipes\">Beef</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Chicken\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/201/meat-and-poultry/chicken/\" ng-click=\"setAnalyticsCookie('browse|chicken')\" title=\"Chicken Recipes\">Chicken</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Pasta\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/95/pasta-and-noodles/\" ng-click=\"setAnalyticsCookie('browse|pasta')\" title=\"Pasta Recipes\">Pasta</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Pork\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/205/meat-and-poultry/pork/\" ng-click=\"setAnalyticsCookie('browse|pork')\" title=\"Pork Recipes\">Pork</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Salmon\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/416/seafood/fish/salmon/\" ng-click=\"setAnalyticsCookie('browse|salmon')\" title=\"Salmon Recipes\">Salmon</a></li>\n</ul>\n</li>\n<li>\n<input id=\"Diet & Health\" type=\"checkbox\"/><label for=\"Diet & Health\">Diet & Health<span class=\"icon-chevron\"></span></label>\n<ul class=\"mobile-browse-subnav\">\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Diabetic\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/739/healthy-recipes/diabetic/\" ng-click=\"setAnalyticsCookie('browse|diabetic')\" title=\"Diabetic Recipes\">Diabetic</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Gluten Free\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/741/healthy-recipes/gluten-free/\" ng-click=\"setAnalyticsCookie('browse|gluten free')\" title=\"Gluten Free Recipes\">Gluten Free</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Healthy\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/84/healthy-recipes/\" ng-click=\"setAnalyticsCookie('browse|healthy')\" title=\"Healthy Recipes\">Healthy</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Low Calorie\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/1232/healthy-recipes/low-calorie/\" ng-click=\"setAnalyticsCookie('browse|low calorie')\" title=\"Low Calorie Recipes\">Low Calorie</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Low Fat\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/1231/healthy-recipes/low-fat/\" ng-click=\"setAnalyticsCookie('browse|low fat')\" title=\"Low Fat Recipes\">Low Fat</a></li>\n</ul>\n</li>\n<li>\n<input id=\"Seasonal\" type=\"checkbox\"/><label for=\"Seasonal\">Seasonal<span class=\"icon-chevron\"></span></label>\n<ul class=\"mobile-browse-subnav\">\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Lunar New Year\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/17668/holidays-and-events/lunar-new-year/\" ng-click=\"setAnalyticsCookie('browse|lunar new year')\" title=\"Lunar New Year Recipes\">Lunar New Year</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Recipes for the Super Bowl®\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/1419/holidays-and-events/big-game/\" ng-click=\"setAnalyticsCookie('browse|recipes for the super bowl®')\" title=\"Recipes for the Super Bowl®\">Recipes for the Super Bowl®</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Valentines Day\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/199/holidays-and-events/valentines-day/\" ng-click=\"setAnalyticsCookie('browse|valentine\\u0027s day')\" title=\"Valentine's Day Recipes\">Valentine's Day</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Mardi Gras\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/192/holidays-and-events/mardi-gras/\" ng-click=\"setAnalyticsCookie('browse|mardi gras')\" title=\"Mardi Gras Recipes\">Mardi Gras</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > More Holidays and Events\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/85/holidays-and-events/\" ng-click=\"setAnalyticsCookie('browse|more holidays and events')\" title=\"More Holidays and Events Recipes\">More Holidays and Events</a></li>\n</ul>\n</li>\n<li>\n<input id=\"Dish Type\" type=\"checkbox\"/><label for=\"Dish Type\">Dish Type<span class=\"icon-chevron\"></span></label>\n<ul class=\"mobile-browse-subnav\">\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Breads\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/156/bread/\" ng-click=\"setAnalyticsCookie('browse|breads')\" title=\"Breads Recipes\">Breads</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Cakes\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/276/desserts/cakes/\" ng-click=\"setAnalyticsCookie('browse|cakes')\" title=\"Cakes Recipes\">Cakes</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Salads\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/96/salad/\" ng-click=\"setAnalyticsCookie('browse|salads')\" title=\"Salads Recipes\">Salads</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Smoothies\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/138/drinks/smoothies/\" ng-click=\"setAnalyticsCookie('browse|smoothies')\" title=\"Smoothies Recipes\">Smoothies</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Soups, Stews & Chili\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/94/soups-stews-and-chili/\" ng-click=\"setAnalyticsCookie('browse|soups, stews \\u0026 chili')\" title=\"Soups, Stews & Chili Recipes\">Soups, Stews & Chili</a></li>\n</ul>\n</li>\n<li>\n<input id=\"Cooking Style\" type=\"checkbox\"/><label for=\"Cooking Style\">Cooking Style<span class=\"icon-chevron\"></span></label>\n<ul class=\"mobile-browse-subnav\">\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > BBQ & Grilling\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/88/bbq-grilling/\" ng-click=\"setAnalyticsCookie('browse|bbq \\u0026 grilling')\" title=\"BBQ & Grilling Recipes\">BBQ & Grilling</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Quick & Easy\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/1947/everyday-cooking/quick-and-easy/\" ng-click=\"setAnalyticsCookie('browse|quick \\u0026 easy')\" title=\"Quick & Easy Recipes\">Quick & Easy</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Slow Cooker\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/253/everyday-cooking/slow-cooker/\" ng-click=\"setAnalyticsCookie('browse|slow cooker')\" title=\"Slow Cooker Recipes\">Slow Cooker</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Vegan\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/1227/everyday-cooking/vegan/\" ng-click=\"setAnalyticsCookie('browse|vegan')\" title=\"Vegan Recipes\">Vegan</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Vegetarian\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/87/everyday-cooking/vegetarian/\" ng-click=\"setAnalyticsCookie('browse|vegetarian')\" title=\"Vegetarian Recipes\">Vegetarian</a></li>\n</ul>\n</li>\n<li>\n<input id=\"World Cuisine\" type=\"checkbox\"/><label for=\"World Cuisine\">World Cuisine<span class=\"icon-chevron\"></span></label>\n<ul class=\"mobile-browse-subnav\">\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Asian\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/227/world-cuisine/asian/\" ng-click=\"setAnalyticsCookie('browse|asian')\" title=\"Asian Recipes\">Asian</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Indian\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/233/world-cuisine/asian/indian/\" ng-click=\"setAnalyticsCookie('browse|indian')\" title=\"Indian Recipes\">Indian</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Italian\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/723/world-cuisine/european/italian/\" ng-click=\"setAnalyticsCookie('browse|italian')\" title=\"Italian Recipes\">Italian</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Mexican\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/728/world-cuisine/latin-american/mexican/\" ng-click=\"setAnalyticsCookie('browse|mexican')\" title=\"Mexican Recipes\">Mexican</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Southern\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/15876/us-recipes/southern/\" ng-click=\"setAnalyticsCookie('browse|southern')\" title=\"Southern Recipes\">Southern</a></li>\n</ul>\n</li>\n<li>\n<input id=\"Special Collections\" type=\"checkbox\"/><label for=\"Special Collections\">Special Collections<span class=\"icon-chevron\"></span></label>\n<ul class=\"mobile-browse-subnav\">\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Food Wishes with Chef John\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/16791/everyday-cooking/special-collections/web-show-recipes/food-wishes/\" ng-click=\"setAnalyticsCookie('browse|food wishes with chef john')\" title=\"Food Wishes with Chef John Recipes\">Food Wishes with Chef John</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Allrecipes Magazine Recipes\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/17235/everyday-cooking/allrecipes-magazine-recipes/\" ng-click=\"setAnalyticsCookie('browse|allrecipes magazine recipes')\" title=\"Allrecipes Magazine Recipes\">Allrecipes Magazine Recipes</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Our Newest Recipes\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"https://www.allrecipes.com/recipes/22908/everyday-cooking/special-collections/new/\" ng-click=\"setAnalyticsCookie('browse|our newest recipes')\" title=\"Our Newest Recipes\">Our Newest Recipes</a></li>\n<li><a data-link-tracking='{\"label\": \"Hamburger Menu > Browse Recipes > Trusted Brands\", \"eventName\": \"Hamburger Nav Action Taken\"}' href=\"http://dish.allrecipes.com/trusted-brand-pages/\" ng-click=\"setAnalyticsCookie('browse|trusted brands')\" title=\"Trusted Brands Recipes\">Trusted Brands</a></li>\n</ul>\n</li>\n</ul>\n<div class=\"see-all\"><a href=\"https://www.allrecipes.com/recipes/\" target=\"_self\">See all categories</a></div>\n</li>\n<li ng-click=\"setAnalyticsData('allrecipes magazine')\">\n<a data-link-tracking='{\"label\": \"Hamburger Menu > Magazine\", \"eventName\": \"Header Action Taken\"}' href=\"https://www.magazine.store/allrecipes-magazine/\" id=\"navmenu_magazine\" rel=\"noopener\" target=\"_blank\">\n<span class=\"nav-icon svg-icon--top-nav-bar--nav-magazine svg-icon--top-nav-bar--nav-magazine-dims\"></span>\n<span>Allrecipes Magazine</span>\n</a>\n</li>\n<li ng-click=\"setAnalyticsData('dinner spinner tv')\">\n<a data-link-tracking='{\"label\": \"Hamburger Menu > Dinner Spinner TV\", \"eventName\": \"Header Action Taken\"}' href=\"http://dish.allrecipes.com/dinner-spinner-tv-show/\" id=\"navmenu_tv\" rel=\"noopener\">\n<span class=\"nav-icon svg-icon--top-nav-bar--tv_icon svg-icon--top-nav-bar--tv_icon-dims\"></span>\n<span>Dinner Spinner TV</span>\n</a>\n</li>\n<li class=\"underline_link\" ng-click=\"setAnalyticsData('cooking school')\">\n<a data-link-tracking='{\"label\": \"Hamburger Menu > Cooking School\", \"eventName\": \"Header Action Taken\"}' href=\"http://cookingschool.allrecipes.com/\" id=\"navmenu_cooking_school\" rel=\"noopener\" target=\"_blank\">\n<span class=\"nav-icon svg-icon--top-nav-bar--nav-cookingschool svg-icon--top-nav-bar--nav-cookingschool-dims\"></span>\n<span>Cooking School</span>\n</a>\n</li>\n<li ng-click=\"setAnalyticsCookie('newsletters')\">\n<a data-link-tracking='{\"label\": \"Hambuger Menu > Newsletters\", \"eventName\": \"Header Action Taken\"}' href=\"https://www.allrecipes.com/cook/my/account-settings/#NewslettersSubscription\" id=\"navmenu_social_gallery\" rel=\"noopener\">\n<span class=\"nav-icon svg-icon--top-nav-bar--nav-newsletters svg-icon--top-nav-bar--nav-newsletters-dims\"></span>\n<span>Newsletters</span>\n</a>\n</li>\n<li ng-click=\"setAnalyticsData('ask the community')\">\n<a data-link-tracking='{\"label\": \"Hambuger Menu > Ask The Community\", \"eventName\": \"Header Action Taken\"}' href=\"http://dish.allrecipes.com/ask-the-community/\" id=\"navmenu_dish\" rel=\"noopener\" target=\"_self\">\n<span class=\"nav-icon svg-icon--top-nav-bar--nav-community svg-icon--top-nav-bar--nav-community-dims\"></span>\n<span>Ask the Community</span>\n</a>\n</li>\n<li class=\"underline_link\" ng-click=\"setAnalyticsData('help')\">\n<a data-link-tracking='{\"label\": \"Hambuger Menu > Help\", \"eventName\": \"Header Action Taken\"}' href=\"http://dish.allrecipes.com/customer-service/\" id=\"navmenu_help\" rel=\"noopener\" target=\"_self\">\n<span class=\"nav-icon svg-icon--top-nav-bar--nav-help svg-icon--top-nav-bar--nav-help-dims\"></span>\n<span>Help</span>\n</a>\n</li>\n<li>\n<a data-link-tracking='{\"label\": \"Hamburger Menu > Jobs\", \"eventName\": \"Header Action Taken\"}' href=\"http://dish.allrecipes.com/allrecipes-jobs-2/\" ng-click=\"setAnalyticsData('jobs')\" rel=\"noopener\" target=\"_self\">Jobs</a>\n<a data-link-tracking='{\"label\": \"Hamburger Menu > Newsroom\", \"eventName\": \"Header Action Taken\"}' href=\"http://press.allrecipes.com/\" ng-click=\"setAnalyticsData('newsroom')\" rel=\"noopener\">Newsroom</a>\n</li>\n</ul>\n</div>\n</section>\n</header>\n<div class=\"container-content body-content\" id=\"main-content\">\n<div id=\"ad-is-mobile\"></div>\n<div id=\"ad-is-tablet\"></div>\n<script>\r\n (function($) {\r\n \r\n window.adConfiguration = {\r\n \"settings\": {\r\n \"responsiveGridSlots\": 3 }\r\n };\r\n\r\n \r\n var mobileAdElem = document.getElementById('ad-is-mobile');\r\n var isMobileAds = !(mobileAdElem.offsetWidth === 0 && mobileAdElem.offsetHeight === 0);\r\n\r\n var tabletAdElem = document.getElementById('ad-is-tablet');\r\n \r\n var isTablet = tabletAdElem && getComputedStyle(tabletAdElem)['display'] === 'none';\r\n\r\n \r\n window.karma = window.karma || {};\r\n window.karma.config = {\r\n apiVersion: 3,\r\n allTiersAllTheTime: true,\r\n isDesktop: window.innerWidth > 1024,\r\n isMobile: isMobileAds,\r\n tabletAds: isTablet,\r\n unitValues: {\r\n channel: \"home\"\r\n },\r\n targeting: {\r\n type: \"homepage\",\r\n mention_category: \"\",\r\n mention: \"\",\r\n channel: \"home\", \r\n ref_hub: window.refHub || false,\r\n \"status\": \"freeloggedin\",\n \"oid\": \"\",\n \"fit\": \"0\",\n \"id\": \"1\"\r\n },\r\n suppressInterstitial: true\r\n };\r\n\r\n !function (a, e) {\r\n if (window.AR_suppress_karma) {\r\n return true;\r\n }\r\n function r() {\r\n if (a.karma.vars = a.karma.vars || {}, !(a.karma.vars.kismetReported || null !== e.querySelector('img[src$=\"kismet/spacer.png\"]'))) {\r\n a.karma.vars.kismetReported = !0;\r\n var r = e.createElement(\"img\");\r\n r.src = \"/kismet/spacer.png\", e.body.appendChild(r)\r\n }\r\n }\r\n a.karma.cmd = a.karma.cmd || [], a.karma.config.go = function () {\r\n a.karma.cmd.push(\"go\")\r\n };\r\n var t = e.createElement(\"script\");\r\n t.src = \"https://karma.mdpcdn.com/service/js-min/karma.js\", t.onload = t.onreadystatechange = function () {\r\n var a = this.readyState;\r\n a && \"complete\" != a && \"loaded\" != a && r()\r\n }, t.onerror = r;\r\n var m = e.getElementsByTagName(\"script\")[0];\r\n m.parentNode.insertBefore(t, m)\r\n }(window, document);\r\n\r\n })(); \r\n\r\n</script>\n<div class=\"ad-siteskin\" id=\"ad-siteskin\" style=\"display: none\">\n<div data-tier=\"1\" id=\"div-gpt-mob-siteSkin\"></div>\n</div>\n<section class=\"home-page full-page\" id=\"ar_home_index\" ng-controller=\"arControllersHome\" ng-init=\"init({category: 'Homepage', contentType: 'Homepage'})\">\n<section class=\"slider\">\n<article class=\"slider__slide\">\n<a data-carousel-link=\"\" data-internal-referrer=\"hp_carousel 01_Comfort Food Casseroles Ready In An Hour\" data-referring-position=\"carousel 01\" href=\"https://www.allrecipes.com/article/comfort-food-casseroles-ready-in-60-minutes/\">\n<div class=\"slider__text\">\n<h3>Comfort Food Casseroles Ready In An Hour</h3>\n<p class=\"slider__description\">Here are a dozen homey ways to get dinner on the table <i>fast</i>.</p>\n</div>\n<img alt=\"Baked Ziti with Sausage\" class=\"slider__photo\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/images/72337.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\"/>\n</a>\n</article>\n<article class=\"slider__slide\">\n<a data-carousel-link=\"\" data-internal-referrer=\"hp_carousel 02_How to Make the Best Hot Chocolate\" data-referring-position=\"carousel 02\" href=\"https://www.allrecipes.com/article/how-to-make-hot-chocolate/\">\n<div class=\"slider__text\">\n<h3>How to Make the Best Hot Chocolate</h3>\n<p class=\"slider__description\">This decadent winter treat is more dessert than drink…if you're doing it right. Here's how to pull it off!</p>\n</div>\n<img alt=\"Creamy Hot Cocoa\" class=\"slider__photo\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/images/77846.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\"/>\n</a>\n</article>\n<article class=\"slider__slide\">\n<a data-carousel-link=\"\" data-internal-referrer=\"hp_carousel 03_Chef John's Buffalo Wings In A Jar\" data-referring-position=\"carousel 03\" href=\"https://www.allrecipes.com/video/9848/buffalo-chicken-wings-in-a-jar/\">\n<div class=\"slider__text\">\n<h3>Chef John's Buffalo Wings In A Jar</h3>\n<p class=\"slider__description\">Get all the spicy Buffalo chicken flavor you love in a no-fuss, no-muss package! See how to make this winning game day appetizer.</p>\n</div>\n<img alt=\"Buffalo Chicken Wings in a Jar\" class=\"slider__photo\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/images/90930.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\"/>\n</a>\n</article>\n<article class=\"slider__slide\">\n<a data-carousel-link=\"\" data-internal-referrer=\"hp_carousel 04_Recipes for Ground Turkey\" data-referring-position=\"carousel 04\" href=\"https://www.allrecipes.com/recipes/693/meat-and-poultry/turkey/ground/\">\n<div class=\"slider__text\">\n<h3>Recipes for Ground Turkey</h3>\n<p class=\"slider__description\">Browse more than 500 ways to turn ground turkey into delicious healthy dinners.</p>\n</div>\n<img alt=\"Cameron's Ground Turkey Salsa Ranchera for Tacos and Burritos\" class=\"slider__photo\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/images/88188.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\"/>\n</a>\n</article>\n<article class=\"slider__slide\">\n<a data-carousel-link=\"\" data-internal-referrer=\"hp_carousel 05_Our Best Soups Ready in Under an Hour\" data-referring-position=\"carousel 05\" href=\"https://www.allrecipes.com/gallery/the-best-soups-ready-in-under-an-hour/\">\n<div class=\"slider__text\">\n<h3>Our Best Soups Ready in Under an Hour</h3>\n<p class=\"slider__description\">Warm up to 18 of the world's best quick soups. These are classic, comforting soups, plus some soul-satisfying surprises.</p>\n</div>\n<img alt=\"Italian Sausage Soup\" class=\"slider__photo\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/images/90929.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\"/>\n</a>\n</article>\n</section>\n<div class=\"hub-streams home-hubs\">\n<section class=\"hub-daughters\" ng-controller=\"ar_controller_home_daughter_hubs\" ng-init=\"init()\">\n<div class=\"hub-daughters__wrap hidden\" id=\"homeDaughterHubsDiv\">\n<div class=\"hub-daughters__container\">\n<div carousel-scroll-target=\"hubs\" id=\"scrollDiv\">\n<!--navigation for carousel -->\n<a carousel-scroll-left=\"hubs\" ng-if=\"hubs_atLeftBound===false\">\n<div class=\"icon--chevron-left\"></div>\n</a>\n<a carousel-scroll-right=\"hubs\" ng-if=\"hubs_atRightBound===false\">\n<div class=\"icon--chevron-right\"></div>\n</a>\n<div class=\"grid slider\" id=\"insideScroll\">\n<ul>\n<li>\n<a class=\"grid-col--subnav\" data-internal-referrer-link=\"top hubs\" href=\"https://www.allrecipes.com/recipes/76/appetizers-and-snacks/\" target=\"_self\">\n<img alt=\"Appetizers and Snacks\" class=\"\" src=\"https://images.media-allrecipes.com/images/89754.jpg\" title=\"Appetizers and Snacks\"/>\n<span class=\"category-title\" data-ellipsis=\"\">Appetizers and Snacks</span>\n</a>\n</li>\n<li>\n<a class=\"grid-col--subnav\" data-internal-referrer-link=\"top hubs\" href=\"https://www.allrecipes.com/recipes/78/breakfast-and-brunch/\" target=\"_self\">\n<img alt=\"Breakfast and Brunch\" class=\"\" src=\"https://images.media-allrecipes.com/images/89786.jpg\" title=\"Breakfast and Brunch\"/>\n<span class=\"category-title\" data-ellipsis=\"\">Breakfast and Brunch</span>\n</a>\n</li>\n<li>\n<a class=\"grid-col--subnav\" data-internal-referrer-link=\"top hubs\" href=\"https://www.allrecipes.com/recipes/276/desserts/cakes/\" target=\"_self\">\n<img alt=\"Cake Recipes\" class=\"\" src=\"https://images.media-allrecipes.com/userphotos/140x140/05/15/47/5154795.jpg\" title=\"Cake Recipes\"/>\n<span class=\"category-title\" data-ellipsis=\"\">Cake Recipes</span>\n</a>\n</li>\n<li>\n<a class=\"grid-col--subnav\" data-internal-referrer-link=\"top hubs\" href=\"https://www.allrecipes.com/recipes/201/meat-and-poultry/chicken/\" target=\"_self\">\n<img alt=\"Chicken Recipes\" class=\"\" src=\"https://images.media-allrecipes.com/userphotos/140x140/00/01/00/10037.jpg\" title=\"Chicken Recipes\"/>\n<span class=\"category-title\" data-ellipsis=\"\">Chicken Recipes</span>\n</a>\n</li>\n<li>\n<a class=\"grid-col--subnav\" data-internal-referrer-link=\"top hubs\" href=\"https://www.allrecipes.com/recipes/362/desserts/cookies/\" target=\"_self\">\n<img alt=\"Cookies\" class=\"\" src=\"https://images.media-allrecipes.com/userphotos/140x140/00/08/27/82762.jpg\" title=\"Cookies\"/>\n<span class=\"category-title\" data-ellipsis=\"\">Cookies</span>\n</a>\n</li>\n<li>\n<a class=\"grid-col--subnav\" data-internal-referrer-link=\"top hubs\" href=\"https://www.allrecipes.com/recipes/22882/everyday-cooking/instant-pot/\" target=\"_self\">\n<img alt=\"Instant Pot&#174; Recipes\" class=\"\" src=\"https://images.media-allrecipes.com/userphotos/140x140/06/32/23/6322392.jpg\" title=\"Instant Pot&#174; Recipes\"/>\n<span class=\"category-title\" data-ellipsis=\"\">Instant Pot® Recipes</span>\n</a>\n</li>\n<li>\n<a class=\"grid-col--subnav\" data-internal-referrer-link=\"top hubs\" href=\"https://www.allrecipes.com/recipes/430/seafood/shellfish/shrimp/\" target=\"_self\">\n<img alt=\"Shrimp Recipes\" class=\"\" src=\"https://images.media-allrecipes.com/userphotos/140x140/05/17/53/5175363.jpg\" title=\"Shrimp Recipes\"/>\n<span class=\"category-title\" data-ellipsis=\"\">Shrimp Recipes</span>\n</a>\n</li>\n<li>\n<a class=\"grid-col--subnav\" data-internal-referrer-link=\"top hubs\" href=\"https://www.allrecipes.com/recipes/253/everyday-cooking/slow-cooker/\" target=\"_self\">\n<img alt=\"Slow Cooker Recipes\" class=\"\" src=\"https://images.media-allrecipes.com/userphotos/140x140/01/89/59/1895907.jpg\" title=\"Slow Cooker Recipes\"/>\n<span class=\"category-title\" data-ellipsis=\"\">Slow Cooker Recipes</span>\n</a>\n</li>\n<li>\n<a class=\"grid-col--subnav\" data-internal-referrer-link=\"top hubs\" href=\"https://www.allrecipes.com/recipes/94/soups-stews-and-chili/\" target=\"_self\">\n<img alt=\"Soups, Stews and Chili\" class=\"\" src=\"https://images.media-allrecipes.com/images/89761.jpg\" title=\"Soups, Stews and Chili\"/>\n<span class=\"category-title\" data-ellipsis=\"\">Soups, Stews and Chili</span>\n</a>\n</li>\n<li>\n<a class=\"grid-col--subnav\" data-internal-referrer-link=\"top hubs\" href=\"https://www.allrecipes.com/recipes/86/world-cuisine/\" target=\"_self\">\n<img alt=\"World Cuisine\" class=\"\" src=\"https://images.media-allrecipes.com/images/89760.jpg\" title=\"World Cuisine\"/>\n<span class=\"category-title\" data-ellipsis=\"\">World Cuisine</span>\n</a>\n</li>\n</ul>\n</div>\n</div>\n</div>\n</div>\n</section>\n</div>\n<tastes-carousel current-tastes='[{\"isSelected\":false,\"tasteName\":\"Heart Healthy\",\"displayText\":\"Heart-Healthy\",\"abbreviation\":\"hh\",\"imageUrl\":\"75129.jpg\"},{\"isSelected\":false,\"tasteName\":\"Quick Easy\",\"displayText\":\"Quick \\u0026 Easy\",\"abbreviation\":\"qe\",\"imageUrl\":\"75137.jpg\"},{\"isSelected\":false,\"tasteName\":\"Low Calorie\",\"displayText\":\"Low-Calorie\",\"abbreviation\":\"lo\",\"imageUrl\":\"75131.jpg\"},{\"isSelected\":false,\"tasteName\":\"Gluten Free\",\"displayText\":\"Gluten-Free\",\"abbreviation\":\"gf\",\"imageUrl\":\"75128.jpg\"},{\"isSelected\":false,\"tasteName\":\"Diabetic\",\"displayText\":\"Diabetic\",\"abbreviation\":\"db\",\"imageUrl\":\"75138.jpg\"},{\"isSelected\":false,\"tasteName\":\"Vegetarian\",\"displayText\":\"Vegetarian\",\"abbreviation\":\"vt\",\"imageUrl\":\"75135.jpg\"}]'></tastes-carousel>\n<ar-notification></ar-notification>\n<section class=\"recipe-section fixed-grid\" id=\"fixedGridSection\">\n<article class=\"fixed-recipe-card\">\n<ar-save-item class=\"favorite\" data-id=\"214614\" data-imageurl=\"'https://images.media-allrecipes.com/userphotos/300x300/786835.jpg'\" data-name='\"Chicken Parmesan Pasta Casserole\"' data-segmentpageproperties=\"segmentContentInfo\" data-type=\"'Recipe'\"></ar-save-item>\n<div class=\"grid-card-image-container\">\n<a data-click-id=\"cardslot 1\" data-content-provider-id=\"0\" data-internal-referrer-link=\"rotd\" href=\"https://www.allrecipes.com/recipe/214614/chicken-parmesan-pasta-casserole/\">\n<img alt=\"Chicken Parmesan Pasta Casserole Recipe - All the flavors and textures of a traditional chicken Parmesan are baked into this crowd-pleasing casserole.\" class=\"fixed-recipe-card__img\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/userphotos/300x300/786835.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\" style=\"display: inline;\" title=\"Chicken Parmesan Pasta Casserole Recipe\"/>\n</a>\n</div>\n<div class=\"fixed-recipe-card__info\">\n<h4 class=\"fixed-recipe-card__rotd\">\r\n Recipe of the Day\r\n </h4>\n<h3 class=\"fixed-recipe-card__h3\">\n<a class=\"fixed-recipe-card__title-link\" data-content-provider-id=\"0\" data-internal-referrer-link=\"rotd\" href=\"https://www.allrecipes.com/recipe/214614/chicken-parmesan-pasta-casserole/\">\n<span class=\"fixed-recipe-card__title-link\">Chicken Parmesan Pasta Casserole</span>\n</a>\n</h3>\n<a data-content-provider-id=\"0\" data-internal-referrer-link=\"rotd\" href=\"https://www.allrecipes.com/recipe/214614/chicken-parmesan-pasta-casserole/\">\n<div class=\"fixed-recipe-card__ratings\">\n<span aria-label=\"Rated 4.48 out of 5 stars\" class=\"stars stars-4-5\" data-ratingstars=\"4.48000001907349\" onclick=\"AnchorScroll('reviews')\"></span>\n<span class=\"fixed-recipe-card__reviews\"><format-large-number number=\"54\"></format-large-number></span>\n<div data-merch-type=\"Ads_LogoScroller_122x34\" style=\"margin: auto; margin-left: .5rem; display: inline-block; width: 122px; height: 34px\">\n<span id=\"ad-rotd\"></span><div data-tier=\"1\" id=\"div-gpt-sponsorLogo\"></div><div data-tier=\"1\" id=\"div-gpt-mob-sponsorLogo\"></div>\n</div>\n</div>\n<div class=\"fixed-recipe-card__description\" data-ellipsis=\"\" ng-cloak=\"\">All the flavors and textures of a traditional chicken Parmesan are baked into this crowd-pleasing casserole.</div>\n</a>\n<div class=\"fixed-recipe-card__profile\">\n<a class=\"ng-isolate-scope\" data-content-provider-id=\"0\" data-internal-referrer-link=\"rotd\" href=\"https://www.allrecipes.com/cook/598946/\" target=\"_self\">\n<ul class=\"cook-submitter-info\">\n<li>\n<img alt=\"profile image\" class=\"cook-img\" src=\"https://images.media-allrecipes.com/userphotos/50x50/401247.jpg\"/>\n</li>\n<li>\n<h4><span>By</span> RCHEISS</h4>\n</li>\n</ul>\n</a>\n</div>\n</div>\n</article>\n<article class=\"grid-ad\" id=\"dfp_container\">\n<div class=\"ad-search-grid\">\n<div id=\"ad-recipe-grid-responsive-1\">\n<div data-tier=\"1\" id=\"div-gpt-square-fixed-1\"></div>\n<div data-tier=\"1\" id=\"div-gpt-mob-square-fixed-1\"></div>\n</div>\n<span class=\"advertisement\">ADVERTISEMENT</span>\n</div>\n</article>\n<article class=\"fixed-recipe-card\">\n<ar-save-item class=\"favorite\" data-id=\"269592\" data-imageurl=\"'https://images.media-allrecipes.com/userphotos/300x300/6308774.jpg'\" data-name='\"Pork Chops in Garlic Mushroom Sauce\"' data-segmentpageproperties=\"segmentContentInfo\" data-type=\"'Recipe'\"></ar-save-item>\n<div class=\"grid-card-image-container\">\n<a data-click-id=\"cardslot 2\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/269592/pork-chops-in-garlic-mushroom-sauce/\">\n<img alt=\"Pork Chops in Garlic Mushroom Sauce Recipe - Simmering boneless pork chops in a garlicky mushroom sauce is a great dinner idea for Valentine's Day.\" class=\"fixed-recipe-card__img\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/userphotos/300x300/6308774.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\" style=\"display: inline;\" title=\"Pork Chops in Garlic Mushroom Sauce Recipe\">\n</img></a>\n</div>\n<div class=\"fixed-recipe-card__info\">\n<h3 class=\"fixed-recipe-card__h3\">\n<a class=\"fixed-recipe-card__title-link\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/269592/pork-chops-in-garlic-mushroom-sauce/\">\n<span class=\"fixed-recipe-card__title-link\">Pork Chops in Garlic Mushroom Sauce</span>\n</a>\n</h3>\n<a data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/269592/pork-chops-in-garlic-mushroom-sauce/\">\n<div class=\"fixed-recipe-card__ratings\">\n<span aria-label=\"Rated 4.58 out of 5 stars\" class=\"stars stars-4-5\" data-ratingstars=\"4.57999992370605\" onclick=\"AnchorScroll('reviews')\"></span>\n<span class=\"fixed-recipe-card__reviews\"><format-large-number number=\"199\"></format-large-number></span>\n</div>\n<div class=\"fixed-recipe-card__description\" data-ellipsis=\"\" ng-cloak=\"\">Simmering boneless pork chops in a garlicky mushroom sauce is a great dinner idea for Valentine's Day.</div>\n</a>\n<div class=\"fixed-recipe-card__profile\">\n<a class=\"ng-isolate-scope\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/cook/15904710/\" target=\"_self\">\n<ul class=\"cook-submitter-info\">\n<li>\n<img alt=\"profile image\" class=\"cook-img\" src=\"https://images.media-allrecipes.com/userphotos/50x50/3959214.jpg\"/>\n</li>\n<li>\n<h4><span>By</span> April Broxton</h4>\n</li>\n</ul>\n</a>\n</div>\n</div>\n</article>\n<article class=\"fixed-recipe-card\">\n<img src=\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker&sz=1x1&t=adpartner%3D&c=14e93038-433d-47f3-a5db-a80a41b80e8b\">\n<ar-save-item class=\"favorite\" data-id=\"71722\" data-imageurl=\"'https://images.media-allrecipes.com/userphotos/300x300/2630776.jpg'\" data-name='\"Asian Lettuce Wraps\"' data-segmentpageproperties=\"segmentContentInfo\" data-type=\"'Recipe'\"></ar-save-item>\n<div class=\"grid-card-image-container\">\n<a data-click-id=\"cardslot 3\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/71722/asian-lettuce-wraps/\">\n<img alt=\"Asian Lettuce Wraps Recipe and Video - Tangy marinated beef is wrapped in refreshing lettuce leaves in this quick and easy Asian lettuce wrap recipe.\" class=\"fixed-recipe-card__img\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/userphotos/300x300/2630776.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\" style=\"display: inline;\" title=\"Asian Lettuce Wraps Recipe and Video\"/>\n</a>\n<a data-click-id=\"cardslot 3\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/video/1112/asian-lettuce-wraps/\">\n<span class=\"watchButton\">\n<span class=\"watchButton__text\">WATCH</span>\n</span>\n</a>\n</div>\n<div class=\"fixed-recipe-card__info\">\n<h3 class=\"fixed-recipe-card__h3\">\n<a class=\"fixed-recipe-card__title-link\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/71722/asian-lettuce-wraps/\">\n<span class=\"fixed-recipe-card__title-link\">Asian Lettuce Wraps</span>\n</a>\n</h3>\n<a data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/71722/asian-lettuce-wraps/\">\n<div class=\"fixed-recipe-card__ratings\">\n<span aria-label=\"Rated 4.64 out of 5 stars\" class=\"stars stars-4-5\" data-ratingstars=\"4.6399998664856\" onclick=\"AnchorScroll('reviews')\"></span>\n<span class=\"fixed-recipe-card__reviews\"><format-large-number number=\"1792\"></format-large-number></span>\n</div>\n<div class=\"fixed-recipe-card__description\" data-ellipsis=\"\" ng-cloak=\"\">Tangy marinated beef is wrapped in refreshing lettuce leaves in this quick and easy Asian lettuce wrap recipe.</div>\n</a>\n<div class=\"fixed-recipe-card__profile\">\n<a class=\"ng-isolate-scope\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/cook/1072660/\" target=\"_self\">\n<ul class=\"cook-submitter-info\">\n<li>\n<img alt=\"profile image\" class=\"cook-img\" src=\"https://images.media-allrecipes.com/userphotos/50x50/5614249.jpg\"/>\n</li>\n<li>\n<h4><span>By</span> Rachel Castro</h4>\n</li>\n</ul>\n</a>\n</div>\n</div>\n</img></article>\n<article class=\"fixed-recipe-card\">\n<ar-save-item class=\"favorite\" data-id=\"237411\" data-imageurl=\"'https://images.media-allrecipes.com/userphotos/300x300/7280253.jpg'\" data-name='\"Easy Pork Chops for the Slow Cooker\"' data-segmentpageproperties=\"segmentContentInfo\" data-type=\"'Recipe'\"></ar-save-item>\n<div class=\"grid-card-image-container\">\n<a data-click-id=\"cardslot 4\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/237411/easy-pork-chops-for-the-slow-cooker/\">\n<img alt=\"Easy Pork Chops for the Slow Cooker Recipe - Use prepared soups, soup mix, and ranch dressing mix with onion and pork chops for an easy, savory dinner done in the slow cooker.\" class=\"fixed-recipe-card__img\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/userphotos/300x300/7280253.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\" style=\"display: inline;\" title=\"Easy Pork Chops for the Slow Cooker Recipe\">\n</img></a>\n</div>\n<div class=\"fixed-recipe-card__info\">\n<h3 class=\"fixed-recipe-card__h3\">\n<a class=\"fixed-recipe-card__title-link\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/237411/easy-pork-chops-for-the-slow-cooker/\">\n<span class=\"fixed-recipe-card__title-link\">Easy Pork Chops for the Slow Cooker</span>\n</a>\n</h3>\n<a data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/237411/easy-pork-chops-for-the-slow-cooker/\">\n<div class=\"fixed-recipe-card__ratings\">\n<span aria-label=\"Rated 4.39 out of 5 stars\" class=\"stars stars-4-5\" data-ratingstars=\"4.3899998664856\" onclick=\"AnchorScroll('reviews')\"></span>\n<span class=\"fixed-recipe-card__reviews\"><format-large-number number=\"130\"></format-large-number></span>\n</div>\n<div class=\"fixed-recipe-card__description\" data-ellipsis=\"\" ng-cloak=\"\">Use prepared soups, soup mix, and ranch dressing mix with onion and pork chops for an easy, savory dinner done in the slow cooker.</div>\n</a>\n<div class=\"fixed-recipe-card__profile\">\n<a class=\"ng-isolate-scope\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/cook/13460465/\" target=\"_self\">\n<ul class=\"cook-submitter-info\">\n<li>\n<img alt=\"profile image\" class=\"cook-img\" src=\"https://images.media-allrecipes.com/userphotos/50x50/5614250.jpg\"/>\n</li>\n<li>\n<h4><span>By</span> don t</h4>\n</li>\n</ul>\n</a>\n</div>\n</div>\n</article>\n<article class=\"fixed-recipe-card\">\n<ar-save-item class=\"favorite\" data-id=\"255253\" data-imageurl=\"'https://images.media-allrecipes.com/userphotos/300x300/4461109.jpg'\" data-name='\"\\\"OG\\\" Zuppa Toscana Soup\"' data-segmentpageproperties=\"segmentContentInfo\" data-type=\"'Recipe'\"></ar-save-item>\n<div class=\"grid-card-image-container\">\n<a data-click-id=\"cardslot 5\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/255253/og-zuppa-toscana-soup/\">\n<img \"zuppa=\"\" -=\"\" a=\"\" alt=\"\" and=\"\" class=\"fixed-recipe-card__img\" copycat=\"\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/userphotos/300x300/4461109.jpg\" easy=\"\" garden's(r)=\"\" is=\"\" italian=\"\" made=\"\" of=\"\" og\"=\"\" olive=\"\" perfect=\"\" potatoes,=\"\" quick=\"\" recipe=\"\" recipe\"=\"\" russet=\"\" sausage=\"\" soup=\"\" soup.\"\"=\"\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\" style=\"display: inline;\" this=\"\" title=\"\" toscana=\"\" with=\"\" zuppa=\"\">\n</img></a>\n</div>\n<div class=\"fixed-recipe-card__info\">\n<h3 class=\"fixed-recipe-card__h3\">\n<a class=\"fixed-recipe-card__title-link\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/255253/og-zuppa-toscana-soup/\">\n<span class=\"fixed-recipe-card__title-link\">\"OG\" Zuppa Toscana Soup</span>\n</a>\n</h3>\n<a data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/255253/og-zuppa-toscana-soup/\">\n<div class=\"fixed-recipe-card__ratings\">\n<span aria-label=\"Rated 4.79 out of 5 stars\" class=\"stars stars-5\" data-ratingstars=\"4.78999996185303\" onclick=\"AnchorScroll('reviews')\"></span>\n<span class=\"fixed-recipe-card__reviews\"><format-large-number number=\"84\"></format-large-number></span>\n</div>\n<div class=\"fixed-recipe-card__description\" data-ellipsis=\"\" ng-cloak=\"\">Made with Italian sausage and russet potatoes, this quick and easy recipe is a perfect copycat of Olive Garden's(R) \"Zuppa Toscana soup.\"</div>\n</a>\n<div class=\"fixed-recipe-card__profile\">\n<a class=\"ng-isolate-scope\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/cook/6643927/\" target=\"_self\">\n<ul class=\"cook-submitter-info\">\n<li>\n<img alt=\"profile image\" class=\"cook-img\" src=\"https://images.media-allrecipes.com/userphotos/50x50/5674165.jpg\"/>\n</li>\n<li>\n<h4><span>By</span> fire1676</h4>\n</li>\n</ul>\n</a>\n</div>\n</div>\n</article>\n<article class=\"fixed-recipe-card\">\n<ar-save-item class=\"favorite\" data-id=\"277292\" data-imageurl=\"'https://images.media-allrecipes.com/userphotos/300x300/7383694.jpg'\" data-name='\"Lemon-Blueberry Pancakes \"' data-segmentpageproperties=\"segmentContentInfo\" data-type=\"'Recipe'\"></ar-save-item>\n<div class=\"grid-card-image-container\">\n<a data-click-id=\"cardslot 6\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/277292/lemon-blueberry-pancakes/\">\n<img alt=\"Lemon-Blueberry Pancakes Recipe - Lemon-blueberry pancakes with juicy berries and tart lemon zest are delicious, and these are quick and easy to prepare for breakfast or brunch.\" class=\"fixed-recipe-card__img\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/userphotos/300x300/7383694.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\" style=\"display: inline;\" title=\"Lemon-Blueberry Pancakes Recipe\">\n</img></a>\n</div>\n<div class=\"fixed-recipe-card__info\">\n<h3 class=\"fixed-recipe-card__h3\">\n<a class=\"fixed-recipe-card__title-link\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/277292/lemon-blueberry-pancakes/\">\n<span class=\"fixed-recipe-card__title-link\">Lemon-Blueberry Pancakes </span>\n</a>\n</h3>\n<a data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/277292/lemon-blueberry-pancakes/\">\n<div class=\"fixed-recipe-card__ratings\">\n<span aria-label=\"Rated 4.39 out of 5 stars\" class=\"stars stars-4-5\" data-ratingstars=\"4.3899998664856\" onclick=\"AnchorScroll('reviews')\"></span>\n<span class=\"fixed-recipe-card__reviews\"><format-large-number number=\"42\"></format-large-number></span>\n</div>\n<div class=\"fixed-recipe-card__description\" data-ellipsis=\"\" ng-cloak=\"\">Lemon-blueberry pancakes with juicy berries and tart lemon zest are delicious, and these are quick and easy to prepare for breakfast or brunch.</div>\n</a>\n<div class=\"fixed-recipe-card__profile\">\n<a class=\"ng-isolate-scope\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/cook/767491/\" target=\"_self\">\n<ul class=\"cook-submitter-info\">\n<li>\n<img alt=\"profile image\" class=\"cook-img\" src=\"https://images.media-allrecipes.com/userphotos/50x50/96373.jpg\"/>\n</li>\n<li>\n<h4><span>By</span> IronChefLaurie</h4>\n</li>\n</ul>\n</a>\n</div>\n</div>\n</article>\n<article class=\"fixed-recipe-card\">\n<ar-save-item class=\"favorite\" data-id=\"213268\" data-imageurl=\"'https://images.media-allrecipes.com/userphotos/300x300/1108799.jpg'\" data-name='\"Classic Goulash\"' data-segmentpageproperties=\"segmentContentInfo\" data-type=\"'Recipe'\"></ar-save-item>\n<div class=\"grid-card-image-container\">\n<a data-click-id=\"cardslot 7\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/213268/classic-goulash/\">\n<img alt=\"Classic Goulash Recipe and Video - This stove top version of classic American beef goulash makes an easy one-pot meal for the whole family.\" class=\"fixed-recipe-card__img\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/userphotos/300x300/1108799.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\" style=\"display: inline;\" title=\"Classic Goulash Recipe and Video\">\n</img></a>\n<a data-click-id=\"cardslot 7\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/video/1295/classic-goulash/\">\n<span class=\"watchButton\">\n<span class=\"watchButton__text\">WATCH</span>\n</span>\n</a>\n</div>\n<div class=\"fixed-recipe-card__info\">\n<h3 class=\"fixed-recipe-card__h3\">\n<a class=\"fixed-recipe-card__title-link\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/213268/classic-goulash/\">\n<span class=\"fixed-recipe-card__title-link\">Classic Goulash</span>\n</a>\n</h3>\n<a data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/213268/classic-goulash/\">\n<div class=\"fixed-recipe-card__ratings\">\n<span aria-label=\"Rated 4.47 out of 5 stars\" class=\"stars stars-4-5\" data-ratingstars=\"4.46999979019165\" onclick=\"AnchorScroll('reviews')\"></span>\n<span class=\"fixed-recipe-card__reviews\"><format-large-number number=\"1453\"></format-large-number></span>\n</div>\n<div class=\"fixed-recipe-card__description\" data-ellipsis=\"\" ng-cloak=\"\">This stove top version of classic American beef goulash makes an easy one-pot meal for the whole family.</div>\n</a>\n<div class=\"fixed-recipe-card__profile\">\n<a class=\"ng-isolate-scope\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/cook/4259535/\" target=\"_self\">\n<ul class=\"cook-submitter-info\">\n<li>\n<img alt=\"profile image\" class=\"cook-img\" src=\"https://images.media-allrecipes.com/userphotos/50x50/5614254.jpg\"/>\n</li>\n<li>\n<h4><span>By</span> pathunt</h4>\n</li>\n</ul>\n</a>\n</div>\n</div>\n</article>\n<article class=\"fixed-recipe-card\">\n<ar-save-item class=\"favorite\" data-id=\"6903\" data-imageurl=\"'https://images.media-allrecipes.com/userphotos/300x300/343742.jpg'\" data-name=\""Grandma's English Muffin Bread"\" data-segmentpageproperties=\"segmentContentInfo\" data-type=\"'Recipe'\"></ar-save-item>\n<div class=\"grid-card-image-container\">\n<a data-click-id=\"cardslot 8\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/6903/grandmas-english-muffin-bread/\">\n<img alt=\"Grandma's English Muffin Bread Recipe - Like english muffins, slices of this bread must be toasted to taste right. Grandma used to bake this in large greased cans coated with cornmeal, which added to the english muffin appearance. Today she uses non-stick pans.\" class=\"fixed-recipe-card__img\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/userphotos/300x300/343742.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\" style=\"display: inline;\" title=\"Grandma's English Muffin Bread Recipe\">\n</img></a>\n</div>\n<div class=\"fixed-recipe-card__info\">\n<h3 class=\"fixed-recipe-card__h3\">\n<a class=\"fixed-recipe-card__title-link\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/6903/grandmas-english-muffin-bread/\">\n<span class=\"fixed-recipe-card__title-link\">Grandma's English Muffin Bread</span>\n</a>\n</h3>\n<a data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/6903/grandmas-english-muffin-bread/\">\n<div class=\"fixed-recipe-card__ratings\">\n<span aria-label=\"Rated 4.46 out of 5 stars\" class=\"stars stars-4-5\" data-ratingstars=\"4.46000003814697\" onclick=\"AnchorScroll('reviews')\"></span>\n<span class=\"fixed-recipe-card__reviews\"><format-large-number number=\"103\"></format-large-number></span>\n</div>\n<div class=\"fixed-recipe-card__description\" data-ellipsis=\"\" ng-cloak=\"\">Like english muffins, slices of this bread must be toasted to taste right. Grandma used to bake this in large greased cans coated with cornmeal, which added to the english muffin appearance. Today she uses non-stick pans.</div>\n</a>\n<div class=\"fixed-recipe-card__profile\">\n<a class=\"ng-isolate-scope\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/cook/[email protected]/\" target=\"_self\">\n<ul class=\"cook-submitter-info\">\n<li>\n<img alt=\"profile image\" class=\"cook-img\" src=\"https://images.media-allrecipes.com/userphotos/50x50/5614259.jpg\"/>\n</li>\n<li>\n<h4><span>By</span> Stephanie Knewasser</h4>\n</li>\n</ul>\n</a>\n</div>\n</div>\n</article>\n<article class=\"grid-ad\" id=\"dfp_container\">\n<div class=\"ad-search-grid\">\n<div id=\"ad-recipe-grid-responsive-2\">\n<div data-tier=\"3\" id=\"div-gpt-square-fixed-2\"></div>\n<div data-tier=\"3\" id=\"div-gpt-mob-square-fixed-2\"></div>\n</div>\n<span class=\"advertisement\">ADVERTISEMENT</span>\n</div>\n</article>\n<article class=\"fixed-recipe-card\">\n<img src=\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker&sz=1x1&t=adpartner%3Dallrecipesmagazine_earned_impression&c=4b1d88ea-d3bd-4a71-ad30-3dc4d45a117b\">\n<ar-save-item class=\"favorite\" data-id=\"10813\" data-imageurl=\"'https://images.media-allrecipes.com/userphotos/300x300/4462051.jpg'\" data-name='\"Best Chocolate Chip Cookies\"' data-segmentpageproperties=\"segmentContentInfo\" data-type=\"'Recipe'\"></ar-save-item>\n<div class=\"grid-card-image-container\">\n<a data-click-id=\"cardslot 9\" data-content-provider-id=\"451\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/10813/best-chocolate-chip-cookies/\">\n<img alt=\"Best Chocolate Chip Cookies Recipe and Video - Crisp edges, chewy middles, and so, so easy to make. Try this wildly-popular chocolate chip cookie recipe for yourself.\" class=\"fixed-recipe-card__img\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/userphotos/300x300/4462051.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\" style=\"display: inline;\" title=\"Best Chocolate Chip Cookies Recipe and Video\"/>\n</a>\n<a data-click-id=\"cardslot 9\" data-content-provider-id=\"451\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/video/626/best-chocolate-chip-cookies/\">\n<span class=\"watchButton\">\n<span class=\"watchButton__text\">WATCH</span>\n</span>\n</a>\n</div>\n<div class=\"fixed-recipe-card__info\">\n<h3 class=\"fixed-recipe-card__h3\">\n<a class=\"fixed-recipe-card__title-link\" data-content-provider-id=\"451\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/10813/best-chocolate-chip-cookies/\">\n<span class=\"fixed-recipe-card__title-link\">Best Chocolate Chip Cookies</span>\n</a>\n</h3>\n<a data-content-provider-id=\"451\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/10813/best-chocolate-chip-cookies/\">\n<div class=\"fixed-recipe-card__ratings\">\n<span aria-label=\"Rated 4.62 out of 5 stars\" class=\"stars stars-4-5\" data-ratingstars=\"4.61999988555908\" onclick=\"AnchorScroll('reviews')\"></span>\n<span class=\"fixed-recipe-card__reviews\"><format-large-number number=\"11753\"></format-large-number></span>\n</div>\n<div class=\"fixed-recipe-card__description\" data-ellipsis=\"\" ng-cloak=\"\">Crisp edges, chewy middles, and so, so easy to make. Try this wildly-popular chocolate chip cookie recipe for yourself.</div>\n</a>\n<div class=\"fixed-recipe-card__profile\">\n<a class=\"ng-isolate-scope\" data-content-provider-id=\"451\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/cook/Dora»Dora/\" target=\"_self\">\n<ul class=\"cook-submitter-info\">\n<li>\n<img alt=\"profile image\" class=\"cook-img\" src=\"https://images.media-allrecipes.com/userphotos/50x50/5674169.jpg\"/>\n</li>\n<li>\n<h4><span>By</span> Dora</h4>\n</li>\n</ul>\n</a>\n</div>\n</div>\n</img></article>\n<article class=\"fixed-recipe-card\">\n<ar-save-item class=\"favorite\" data-id=\"236867\" data-imageurl=\"'https://images.media-allrecipes.com/userphotos/300x300/7466519.jpg'\" data-name='\"Super Duper Slow Cooker Beef Stroganoff\"' data-segmentpageproperties=\"segmentContentInfo\" data-type=\"'Recipe'\"></ar-save-item>\n<div class=\"grid-card-image-container\">\n<a data-click-id=\"cardslot 10\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/236867/super-duper-slow-cooker-beef-stroganoff/\">\n<img alt=\"Super Duper Slow Cooker Beef Stroganoff Recipe - This slow cooker version of beef stroganoff uses sour cream, cream cheese, and cream of mushroom soup for a rich, savory classic main dish.\" class=\"fixed-recipe-card__img\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/userphotos/300x300/7466519.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\" style=\"display: inline;\" title=\"Super Duper Slow Cooker Beef Stroganoff Recipe\">\n</img></a>\n</div>\n<div class=\"fixed-recipe-card__info\">\n<h3 class=\"fixed-recipe-card__h3\">\n<a class=\"fixed-recipe-card__title-link\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/236867/super-duper-slow-cooker-beef-stroganoff/\">\n<span class=\"fixed-recipe-card__title-link\">Super Duper Slow Cooker Beef Stroganoff</span>\n</a>\n</h3>\n<a data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/236867/super-duper-slow-cooker-beef-stroganoff/\">\n<div class=\"fixed-recipe-card__ratings\">\n<span aria-label=\"Rated 4.64 out of 5 stars\" class=\"stars stars-4-5\" data-ratingstars=\"4.6399998664856\" onclick=\"AnchorScroll('reviews')\"></span>\n<span class=\"fixed-recipe-card__reviews\"><format-large-number number=\"165\"></format-large-number></span>\n</div>\n<div class=\"fixed-recipe-card__description\" data-ellipsis=\"\" ng-cloak=\"\">This slow cooker version of beef stroganoff uses sour cream, cream cheese, and cream of mushroom soup for a rich, savory classic main dish.</div>\n</a>\n<div class=\"fixed-recipe-card__profile\">\n<a class=\"ng-isolate-scope\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/cook/3066997/\" target=\"_self\">\n<ul class=\"cook-submitter-info\">\n<li>\n<img alt=\"profile image\" class=\"cook-img\" src=\"https://images.media-allrecipes.com/userphotos/50x50/1911187.jpg\"/>\n</li>\n<li>\n<h4><span>By</span> cwmom98</h4>\n</li>\n</ul>\n</a>\n</div>\n</div>\n</article>\n<article class=\"fixed-recipe-card\">\n<ar-save-item class=\"favorite\" data-id=\"70163\" data-imageurl=\"'https://images.media-allrecipes.com/userphotos/300x300/1101614.jpg'\" data-name='\"Easy Baked Tilapia\"' data-segmentpageproperties=\"segmentContentInfo\" data-type=\"'Recipe'\"></ar-save-item>\n<div class=\"grid-card-image-container\">\n<a data-click-id=\"cardslot 11\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/70163/easy-baked-tilapia/\">\n<img alt=\"Easy Baked Tilapia Recipe and Video - Just 35 minutes and six simple ingredients are all you need for this top-rated, flavorful tilapia recipe.\" class=\"fixed-recipe-card__img\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/userphotos/300x300/1101614.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\" style=\"display: inline;\" title=\"Easy Baked Tilapia Recipe and Video\">\n</img></a>\n<a data-click-id=\"cardslot 11\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/video/3263/easy-baked-tilapia/\">\n<span class=\"watchButton\">\n<span class=\"watchButton__text\">WATCH</span>\n</span>\n</a>\n</div>\n<div class=\"fixed-recipe-card__info\">\n<h3 class=\"fixed-recipe-card__h3\">\n<a class=\"fixed-recipe-card__title-link\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/70163/easy-baked-tilapia/\">\n<span class=\"fixed-recipe-card__title-link\">Easy Baked Tilapia</span>\n</a>\n</h3>\n<a data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/70163/easy-baked-tilapia/\">\n<div class=\"fixed-recipe-card__ratings\">\n<span aria-label=\"Rated 4.5 out of 5 stars\" class=\"stars stars-4-5\" data-ratingstars=\"4.5\" onclick=\"AnchorScroll('reviews')\"></span>\n<span class=\"fixed-recipe-card__reviews\"><format-large-number number=\"848\"></format-large-number></span>\n</div>\n<div class=\"fixed-recipe-card__description\" data-ellipsis=\"\" ng-cloak=\"\">Just 35 minutes and six simple ingredients are all you need for this top-rated, flavorful tilapia recipe.</div>\n</a>\n<div class=\"fixed-recipe-card__profile\">\n<a class=\"ng-isolate-scope\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/cook/916066/\" target=\"_self\">\n<ul class=\"cook-submitter-info\">\n<li>\n<img alt=\"profile image\" class=\"cook-img\" src=\"https://images.media-allrecipes.com/global/features/mini/121.jpg\"/>\n</li>\n<li>\n<h4><span>By</span> F_Gory</h4>\n</li>\n</ul>\n</a>\n</div>\n</div>\n</article>\n<article class=\"fixed-recipe-card\">\n<ar-save-item class=\"favorite\" data-id=\"82954\" data-imageurl=\"'https://images.media-allrecipes.com/userphotos/300x300/663994.jpg'\" data-name='\"Enchilada Meatballs\"' data-segmentpageproperties=\"segmentContentInfo\" data-type=\"'Recipe'\"></ar-save-item>\n<div class=\"grid-card-image-container\">\n<a data-click-id=\"cardslot 12\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/82954/enchilada-meatballs/\">\n<img alt=\"Enchilada Meatballs Recipe - These are a tasty change from the usual since they use cornbread crumbs instead of crackers or regular bread. They're always a hit at parties! A friend shared this recipe with me ages ago and I've tweaked it to come up with this.\" class=\"fixed-recipe-card__img\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/userphotos/300x300/663994.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\" style=\"display: inline;\" title=\"Enchilada Meatballs Recipe\">\n</img></a>\n</div>\n<div class=\"fixed-recipe-card__info\">\n<h3 class=\"fixed-recipe-card__h3\">\n<a class=\"fixed-recipe-card__title-link\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/82954/enchilada-meatballs/\">\n<span class=\"fixed-recipe-card__title-link\">Enchilada Meatballs</span>\n</a>\n</h3>\n<a data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/82954/enchilada-meatballs/\">\n<div class=\"fixed-recipe-card__ratings\">\n<span aria-label=\"Rated 4.26 out of 5 stars\" class=\"stars stars-4-5\" data-ratingstars=\"4.26000022888184\" onclick=\"AnchorScroll('reviews')\"></span>\n<span class=\"fixed-recipe-card__reviews\"><format-large-number number=\"144\"></format-large-number></span>\n</div>\n<div class=\"fixed-recipe-card__description\" data-ellipsis=\"\" ng-cloak=\"\">These are a tasty change from the usual since they use cornbread crumbs instead of crackers or regular bread. They're always a hit at parties! A friend shared this recipe with me ages ago and I've tweaked it to come up with this.</div>\n</a>\n<div class=\"fixed-recipe-card__profile\">\n<a class=\"ng-isolate-scope\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/cook/angelforalzheimers/\" target=\"_self\">\n<ul class=\"cook-submitter-info\">\n<li>\n<img alt=\"profile image\" class=\"cook-img\" src=\"https://images.media-allrecipes.com/userphotos/50x50/2732931.jpg\"/>\n</li>\n<li>\n<h4><span>By</span> Shelley</h4>\n</li>\n</ul>\n</a>\n</div>\n</div>\n</article>\n<article class=\"fixed-recipe-card\">\n<ar-save-item class=\"favorite\" data-id=\"275055\" data-imageurl=\"'https://images.media-allrecipes.com/userphotos/300x300/6884311.jpg'\" data-name='\"Grilled Teriyaki Shrimp and Pineapple Skewers\"' data-segmentpageproperties=\"segmentContentInfo\" data-type=\"'Recipe'\"></ar-save-item>\n<div class=\"grid-card-image-container\">\n<a data-click-id=\"cardslot 13\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/275055/grilled-teriyaki-shrimp-and-pineapple-skewers/\">\n<img alt=\"Grilled Teriyaki Shrimp and Pineapple Skewers Recipe - These simple savory-sweet shrimp and pineapple skewers are basted with a homemade teriyaki sauce and cook up in just a few minutes on the grill.\" class=\"fixed-recipe-card__img\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/userphotos/300x300/6884311.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\" style=\"display: inline;\" title=\"Grilled Teriyaki Shrimp and Pineapple Skewers Recipe\">\n</img></a>\n</div>\n<div class=\"fixed-recipe-card__info\">\n<h3 class=\"fixed-recipe-card__h3\">\n<a class=\"fixed-recipe-card__title-link\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/275055/grilled-teriyaki-shrimp-and-pineapple-skewers/\">\n<span class=\"fixed-recipe-card__title-link\">Grilled Teriyaki Shrimp and Pineapple Skewers</span>\n</a>\n</h3>\n<a data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/275055/grilled-teriyaki-shrimp-and-pineapple-skewers/\">\n<div class=\"fixed-recipe-card__ratings\">\n<span aria-label=\"Rated 4.43 out of 5 stars\" class=\"stars stars-4-5\" data-ratingstars=\"4.42999982833862\" onclick=\"AnchorScroll('reviews')\"></span>\n<span class=\"fixed-recipe-card__reviews\"><format-large-number number=\"13\"></format-large-number></span>\n</div>\n<div class=\"fixed-recipe-card__description\" data-ellipsis=\"\" ng-cloak=\"\">These simple savory-sweet shrimp and pineapple skewers are basted with a homemade teriyaki sauce and cook up in just a few minutes on the grill.</div>\n</a>\n<div class=\"fixed-recipe-card__profile\">\n<a class=\"ng-isolate-scope\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/cook/francecevallos/\" target=\"_self\">\n<ul class=\"cook-submitter-info\">\n<li>\n<img alt=\"profile image\" class=\"cook-img\" src=\"https://images.media-allrecipes.com/userphotos/50x50/6886266.jpg\"/>\n</li>\n<li>\n<h4><span>By</span> France C</h4>\n</li>\n</ul>\n</a>\n</div>\n</div>\n</article>\n<article class=\"fixed-recipe-card\">\n<img src=\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker&sz=1x1&t=adpartner%3Dallrecipesmagazine_earned_impression&c=b87a1e2a-0d1c-416d-a45c-5b65f196c1e0\">\n<ar-save-item class=\"favorite\" data-id=\"23600\" data-imageurl=\"'https://images.media-allrecipes.com/userphotos/300x300/3359675.jpg'\" data-name=\""World's Best Lasagna"\" data-segmentpageproperties=\"segmentContentInfo\" data-type=\"'Recipe'\"></ar-save-item>\n<div class=\"grid-card-image-container\">\n<a data-click-id=\"cardslot 14\" data-content-provider-id=\"451\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/23600/worlds-best-lasagna/\">\n<img alt=\"World's Best Lasagna Recipe and Video - Filling and satisfying, John Chandler's lasagna is our most popular recipe. With basil, sausage, ground beef and three types of cheese, it lives up to its name.\" class=\"fixed-recipe-card__img\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/userphotos/300x300/3359675.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\" style=\"display: inline;\" title=\"World's Best Lasagna Recipe and Video\"/>\n</a>\n<a data-click-id=\"cardslot 14\" data-content-provider-id=\"451\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/video/672/worlds-best-lasagna/\">\n<span class=\"watchButton\">\n<span class=\"watchButton__text\">WATCH</span>\n</span>\n</a>\n</div>\n<div class=\"fixed-recipe-card__info\">\n<h3 class=\"fixed-recipe-card__h3\">\n<a class=\"fixed-recipe-card__title-link\" data-content-provider-id=\"451\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/23600/worlds-best-lasagna/\">\n<span class=\"fixed-recipe-card__title-link\">World's Best Lasagna</span>\n</a>\n</h3>\n<a data-content-provider-id=\"451\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/23600/worlds-best-lasagna/\">\n<div class=\"fixed-recipe-card__ratings\">\n<span aria-label=\"Rated 4.8 out of 5 stars\" class=\"stars stars-5\" data-ratingstars=\"4.80000019073486\" onclick=\"AnchorScroll('reviews')\"></span>\n<span class=\"fixed-recipe-card__reviews\"><format-large-number number=\"12728\"></format-large-number></span>\n</div>\n<div class=\"fixed-recipe-card__description\" data-ellipsis=\"\" ng-cloak=\"\">Filling and satisfying, John Chandler's lasagna is our most popular recipe. With basil, sausage, ground beef and three types of cheese, it lives up to its name.</div>\n</a>\n<div class=\"fixed-recipe-card__profile\">\n<a class=\"ng-isolate-scope\" data-content-provider-id=\"451\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/cook/177901/\" target=\"_self\">\n<ul class=\"cook-submitter-info\">\n<li>\n<img alt=\"profile image\" class=\"cook-img\" src=\"https://images.media-allrecipes.com/userphotos/50x50/3873464.jpg\"/>\n</li>\n<li>\n<h4><span>By</span> John Chandler</h4>\n</li>\n</ul>\n</a>\n</div>\n</div>\n</img></article>\n<article class=\"fixed-recipe-card\">\n<ar-save-item class=\"favorite\" data-id=\"275451\" data-imageurl=\"'https://images.media-allrecipes.com/userphotos/300x300/7019430.jpg'\" data-name='\"Mexican Frittata\"' data-segmentpageproperties=\"segmentContentInfo\" data-type=\"'Recipe'\"></ar-save-item>\n<div class=\"grid-card-image-container\">\n<a data-click-id=\"cardslot 15\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/275451/mexican-frittata/\">\n<img alt=\"Mexican Frittata Recipe - The addition of peppers, cumin, and salsa provides Mexican flair to this quick and easy breakfast frittata that's perfect when cooking for one.\" class=\"fixed-recipe-card__img\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/userphotos/300x300/7019430.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\" style=\"display: inline;\" title=\"Mexican Frittata Recipe\">\n</img></a>\n</div>\n<div class=\"fixed-recipe-card__info\">\n<h3 class=\"fixed-recipe-card__h3\">\n<a class=\"fixed-recipe-card__title-link\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/275451/mexican-frittata/\">\n<span class=\"fixed-recipe-card__title-link\">Mexican Frittata</span>\n</a>\n</h3>\n<a data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/275451/mexican-frittata/\">\n<div class=\"fixed-recipe-card__ratings\">\n<span aria-label=\"Rated 4.16 out of 5 stars\" class=\"stars stars-4\" data-ratingstars=\"4.15999984741211\" onclick=\"AnchorScroll('reviews')\"></span>\n<span class=\"fixed-recipe-card__reviews\"><format-large-number number=\"17\"></format-large-number></span>\n</div>\n<div class=\"fixed-recipe-card__description\" data-ellipsis=\"\" ng-cloak=\"\">The addition of peppers, cumin, and salsa provides Mexican flair to this quick and easy breakfast frittata that's perfect when cooking for one.</div>\n</a>\n<div class=\"fixed-recipe-card__profile\">\n<a class=\"ng-isolate-scope\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/cook/tracyfer/\" target=\"_self\">\n<ul class=\"cook-submitter-info\">\n<li>\n<img alt=\"profile image\" class=\"cook-img\" src=\"https://images.media-allrecipes.com/userphotos/50x50/895574.jpg\"/>\n</li>\n<li>\n<h4><span>By</span> tracyfer</h4>\n</li>\n</ul>\n</a>\n</div>\n</div>\n</article>\n<article class=\"grid-ad\" id=\"dfp_container\">\n<div class=\"ad-search-grid\">\n<div id=\"ad-recipe-grid-responsive-3\">\n<div data-tier=\"4\" id=\"div-gpt-square-fixed-3\"></div>\n<div data-tier=\"4\" id=\"div-gpt-mob-square-fixed-3\"></div>\n</div>\n<span class=\"advertisement\">ADVERTISEMENT</span>\n</div>\n</article>\n<article class=\"fixed-recipe-card\">\n<ar-save-item class=\"favorite\" data-id=\"25690\" data-imageurl=\"'https://images.media-allrecipes.com/userphotos/300x300/3208554.jpg'\" data-name=\""Andrea's Pasta Fagioli"\" data-segmentpageproperties=\"segmentContentInfo\" data-type=\"'Recipe'\"></ar-save-item>\n<div class=\"grid-card-image-container\">\n<a data-click-id=\"cardslot 16\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/25690/andreas-pasta-fagioli/\">\n<img alt=\"Andrea's Pasta Fagioli Recipe - Tomato sauce, cannelini beans, navy beans and ditalini pasta are seasoned with parsley, basil and oregano in this chunky soup.\" class=\"fixed-recipe-card__img\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/userphotos/300x300/3208554.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\" style=\"display: inline;\" title=\"Andrea's Pasta Fagioli Recipe\">\n</img></a>\n</div>\n<div class=\"fixed-recipe-card__info\">\n<h3 class=\"fixed-recipe-card__h3\">\n<a class=\"fixed-recipe-card__title-link\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/25690/andreas-pasta-fagioli/\">\n<span class=\"fixed-recipe-card__title-link\">Andrea's Pasta Fagioli</span>\n</a>\n</h3>\n<a data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/25690/andreas-pasta-fagioli/\">\n<div class=\"fixed-recipe-card__ratings\">\n<span aria-label=\"Rated 4.44 out of 5 stars\" class=\"stars stars-4-5\" data-ratingstars=\"4.44000005722046\" onclick=\"AnchorScroll('reviews')\"></span>\n<span class=\"fixed-recipe-card__reviews\"><format-large-number number=\"593\"></format-large-number></span>\n</div>\n<div class=\"fixed-recipe-card__description\" data-ellipsis=\"\" ng-cloak=\"\">Tomato sauce, cannelini beans, navy beans and ditalini pasta are seasoned with parsley, basil and oregano in this chunky soup.</div>\n</a>\n<div class=\"fixed-recipe-card__profile\">\n<a class=\"ng-isolate-scope\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/cook/95992/\" target=\"_self\">\n<ul class=\"cook-submitter-info\">\n<li>\n<img alt=\"profile image\" class=\"cook-img\" src=\"https://images.media-allrecipes.com/userphotos/50x50/5614242.jpg\"/>\n</li>\n<li>\n<h4><span>By</span> AVALERIO</h4>\n</li>\n</ul>\n</a>\n</div>\n</div>\n</article>\n<article class=\"fixed-recipe-card\">\n<ar-save-item class=\"favorite\" data-id=\"260852\" data-imageurl=\"'https://images.media-allrecipes.com/userphotos/300x300/4538307.jpg'\" data-name='\"Cheesy Pork Taco Rice\"' data-segmentpageproperties=\"segmentContentInfo\" data-type=\"'Recipe'\"></ar-save-item>\n<div class=\"grid-card-image-container\">\n<a data-click-id=\"cardslot 17\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/260852/cheesy-pork-taco-rice/\">\n<img alt=\"Cheesy Pork Taco Rice Recipe - For an easy, cheesy Tex-Mex skillet dinner, try this recipe for baked pork taco rice with roasted poblano peppers topped with a quick salsa.\" class=\"fixed-recipe-card__img\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/userphotos/300x300/4538307.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\" style=\"display: inline;\" title=\"Cheesy Pork Taco Rice Recipe\">\n</img></a>\n</div>\n<div class=\"fixed-recipe-card__info\">\n<h3 class=\"fixed-recipe-card__h3\">\n<a class=\"fixed-recipe-card__title-link\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/260852/cheesy-pork-taco-rice/\">\n<span class=\"fixed-recipe-card__title-link\">Cheesy Pork Taco Rice</span>\n</a>\n</h3>\n<a data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/260852/cheesy-pork-taco-rice/\">\n<div class=\"fixed-recipe-card__ratings\">\n<span aria-label=\"Rated 4.73 out of 5 stars\" class=\"stars stars-4-5\" data-ratingstars=\"4.73000001907349\" onclick=\"AnchorScroll('reviews')\"></span>\n<span class=\"fixed-recipe-card__reviews\"><format-large-number number=\"19\"></format-large-number></span>\n</div>\n<div class=\"fixed-recipe-card__description\" data-ellipsis=\"\" ng-cloak=\"\">For an easy, cheesy Tex-Mex skillet dinner, try this recipe for baked pork taco rice with roasted poblano peppers topped with a quick salsa.</div>\n</a>\n<div class=\"fixed-recipe-card__profile\">\n<a class=\"ng-isolate-scope\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/cook/16589076/\" target=\"_self\">\n<ul class=\"cook-submitter-info\">\n<li>\n<img alt=\"profile image\" class=\"cook-img\" src=\"https://images.media-allrecipes.com/userphotos/50x50/2683453.jpg\"/>\n</li>\n<li>\n<h4><span>By</span> Culinary Envy</h4>\n</li>\n</ul>\n</a>\n</div>\n</div>\n</article>\n<article class=\"fixed-recipe-card\">\n<ar-save-item class=\"favorite\" data-id=\"270200\" data-imageurl=\"'https://images.media-allrecipes.com/userphotos/300x300/6280096.jpg'\" data-name='\"Cranberry Orange Rolls\"' data-segmentpageproperties=\"segmentContentInfo\" data-type=\"'Recipe'\"></ar-save-item>\n<div class=\"grid-card-image-container\">\n<a data-click-id=\"cardslot 18\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/270200/cranberry-orange-rolls/\">\n<img alt=\"Cranberry Orange Rolls Recipe - A different take on sticky cinnamon rolls, made with cranberries and orange juice and frosted with a simple vanilla glaze.\" class=\"fixed-recipe-card__img\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/userphotos/300x300/6280096.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\" style=\"display: inline;\" title=\"Cranberry Orange Rolls Recipe\">\n</img></a>\n</div>\n<div class=\"fixed-recipe-card__info\">\n<h3 class=\"fixed-recipe-card__h3\">\n<a class=\"fixed-recipe-card__title-link\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/270200/cranberry-orange-rolls/\">\n<span class=\"fixed-recipe-card__title-link\">Cranberry Orange Rolls</span>\n</a>\n</h3>\n<a data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/270200/cranberry-orange-rolls/\">\n<div class=\"fixed-recipe-card__ratings\">\n<span aria-label=\"Rated 4.22 out of 5 stars\" class=\"stars stars-4\" data-ratingstars=\"4.21999979019165\" onclick=\"AnchorScroll('reviews')\"></span>\n<span class=\"fixed-recipe-card__reviews\"><format-large-number number=\"19\"></format-large-number></span>\n</div>\n<div class=\"fixed-recipe-card__description\" data-ellipsis=\"\" ng-cloak=\"\">A different take on sticky cinnamon rolls, made with cranberries and orange juice and frosted with a simple vanilla glaze.</div>\n</a>\n<div class=\"fixed-recipe-card__profile\">\n<a class=\"ng-isolate-scope\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/cook/k-i-m/\" target=\"_self\">\n<ul class=\"cook-submitter-info\">\n<li>\n<img alt=\"profile image\" class=\"cook-img\" src=\"https://images.media-allrecipes.com/userphotos/50x50/127985.jpg\"/>\n</li>\n<li>\n<h4><span>By</span> Kim</h4>\n</li>\n</ul>\n</a>\n</div>\n</div>\n</article>\n<article class=\"fixed-recipe-card\">\n<ar-save-item class=\"favorite\" data-id=\"274966\" data-imageurl=\"'https://images.media-allrecipes.com/userphotos/300x300/6967156.jpg'\" data-name='\"Sheet Pan Parmesan Chicken and Veggies\"' data-segmentpageproperties=\"segmentContentInfo\" data-type=\"'Recipe'\"></ar-save-item>\n<div class=\"grid-card-image-container\">\n<a data-click-id=\"cardslot 19\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/274966/sheet-pan-parmesan-chicken-and-veggies/\">\n<img alt=\"Sheet Pan Parmesan Chicken and Veggies Recipe - A cheesy Parmesan and panko mixture bakes up into an extra crispy crust on the chicken in this easy sheet pan dinner with herb-roasted vegetables.\" class=\"fixed-recipe-card__img\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/userphotos/300x300/6967156.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\" style=\"display: inline;\" title=\"Sheet Pan Parmesan Chicken and Veggies Recipe\">\n</img></a>\n</div>\n<div class=\"fixed-recipe-card__info\">\n<h3 class=\"fixed-recipe-card__h3\">\n<a class=\"fixed-recipe-card__title-link\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/274966/sheet-pan-parmesan-chicken-and-veggies/\">\n<span class=\"fixed-recipe-card__title-link\">Sheet Pan Parmesan Chicken and Veggies</span>\n</a>\n</h3>\n<a data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/274966/sheet-pan-parmesan-chicken-and-veggies/\">\n<div class=\"fixed-recipe-card__ratings\">\n<span aria-label=\"Rated 4.53 out of 5 stars\" class=\"stars stars-4-5\" data-ratingstars=\"4.53000020980835\" onclick=\"AnchorScroll('reviews')\"></span>\n<span class=\"fixed-recipe-card__reviews\"><format-large-number number=\"33\"></format-large-number></span>\n</div>\n<div class=\"fixed-recipe-card__description\" data-ellipsis=\"\" ng-cloak=\"\">A cheesy Parmesan and panko mixture bakes up into an extra crispy crust on the chicken in this easy sheet pan dinner with herb-roasted vegetables.</div>\n</a>\n<div class=\"fixed-recipe-card__profile\">\n<a class=\"ng-isolate-scope\" data-content-provider-id=\"0\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/cook/4179258/\" target=\"_self\">\n<ul class=\"cook-submitter-info\">\n<li>\n<img alt=\"profile image\" class=\"cook-img\" src=\"https://images.media-allrecipes.com/userphotos/50x50/5674135.jpg\"/>\n</li>\n<li>\n<h4><span>By</span> chpmnk42</h4>\n</li>\n</ul>\n</a>\n</div>\n</div>\n</article>\n<article class=\"fixed-recipe-card\">\n<img src=\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker&sz=1x1&t=adpartner%3D&c=11e8a597-2b0b-4086-a023-987eb0c308f4\">\n<ar-save-item class=\"favorite\" data-id=\"269574\" data-imageurl=\"'https://images.media-allrecipes.com/userphotos/300x300/6106331.jpg'\" data-name='\"Classic Overnight Oats\"' data-segmentpageproperties=\"segmentContentInfo\" data-type=\"'Recipe'\"></ar-save-item>\n<div class=\"grid-card-image-container\">\n<a data-click-id=\"cardslot 20\" data-content-provider-id=\"524\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/269574/classic-overnight-oats/\">\n<img alt=\"Classic Overnight Oats Recipe - Almond Breeze almondmilk and oats make a great no-cook oatmeal that you can make for breakfast up to 3 days in advance.\" class=\"fixed-recipe-card__img\" data-lazy-load=\"\" data-original-src=\"https://images.media-allrecipes.com/userphotos/300x300/6106331.jpg\" src=\"https://images.media-allrecipes.com/ar/spacer.gif\" style=\"display: inline;\" title=\"Classic Overnight Oats Recipe\"/>\n</a>\n</div>\n<div class=\"fixed-recipe-card__info\">\n<h3 class=\"fixed-recipe-card__h3\">\n<a class=\"fixed-recipe-card__title-link\" data-content-provider-id=\"524\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/269574/classic-overnight-oats/\">\n<span class=\"fixed-recipe-card__title-link\">Classic Overnight Oats</span>\n</a>\n</h3>\n<a data-content-provider-id=\"524\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/recipe/269574/classic-overnight-oats/\">\n<div class=\"fixed-recipe-card__ratings\">\n<span aria-label=\"Rated 4.78 out of 5 stars\" class=\"stars stars-5\" data-ratingstars=\"4.78000020980835\" onclick=\"AnchorScroll('reviews')\"></span>\n<span class=\"fixed-recipe-card__reviews\"><format-large-number number=\"14\"></format-large-number></span>\n</div>\n<div class=\"fixed-recipe-card__description\" data-ellipsis=\"\" ng-cloak=\"\">Almond Breeze almondmilk and oats make a great no-cook oatmeal that you can make for breakfast up to 3 days in advance.</div>\n</a>\n<div class=\"fixed-recipe-card__profile\">\n<a class=\"ng-isolate-scope\" data-content-provider-id=\"524\" data-internal-referrer-link=\"popular\" href=\"https://www.allrecipes.com/cook/almondbreeze/\" target=\"_self\">\n<ul class=\"cook-submitter-info\">\n<li>\n<img alt=\"profile image\" class=\"cook-img\" src=\"https://images.media-allrecipes.com/userphotos/50x50/6732378.jpg\"/>\n</li>\n<li>\n<h4><span>By</span> Almond Breeze</h4>\n</li>\n</ul>\n</a>\n</div>\n</div>\n</img></article></section> <a data-ar-infinite-scroll=\"\"></a>\n<noscript>\n<a href=\"https://www.allrecipes.com/?page=2\">Next Page</a>\n</noscript>\n<a href=\"https://www.magazine.store/allrecipes-magazine/?utm_source=allrecipes.com&utm_medium=owned&utm_campaign=i905arrfw1304a\" target=\"_blank\">\n<div class=\"ad-mag-homeBtm__footer\">\n<div class=\"ad-mag-homeBtm__img-wrap\">\n<img alt=\"Subscribe to Allrecipes Magazine\" class=\"ad-mag-homeBtm__img\" src=\"//images.media-allrecipes.com/images/86517.jpg\" title=\"Allrecipes Magazine\">\n</img></div>\n<div class=\"ad-mag-homeBtm__text-wrap\">\n<h4>Allrecipes Magazine</h4>\n<p>Cook 5-star dinners every time—get a full year for just $10.</p>\n<div class=\"btn-basic--small btn-orange\" href=\"https://www.magazine.store/allrecipes-magazine/?utm_source=allrecipes.com&utm_medium=owned&utm_campaign=i905arrfw1304a\" id=\"btn\">Subscribe</div>\n</div>\n</div>\n<div class=\"clearfix\"></div>\n</a>\n<script>\r\n window.toggles = window.toggles || {};\r\n window.toggles.TastePrefOverlays = true;\r\n </script>\n</section>\n</div>\n<footer class=\"full-page\" id=\"pageFooter\">\n<section class=\"grid grid-fixed\">\n<article class=\"grid-col grid-col--tiles footer-share\">\n<ul class=\"social-sharing__icons\">\n<li><a class=\"svg-icon--social--facebook svg-icon--social--facebook-dims\" data-footer-link-tracking='{\"label\": \"footer > social > facebook\"}' href=\"https://www.facebook.com/allrecipes\" id=\"footer_facebook\" rel=\"noopener\" target=\"_blank\" title=\"Facebook\"></a></li>\n<li><a class=\"svg-icon--social--pinterest svg-icon--social--pinterest-dims\" data-footer-link-tracking='{\"label\": \"footer > social > pinterest\"}' href=\"http://pinterest.com/allrecipes/\" id=\"footer_pinterest\" rel=\"noopener\" target=\"_blank\" title=\"Pinterest\"></a></li>\n<li><a class=\"svg-icon--social--twitter svg-icon--social--twitter-dims\" data-footer-link-tracking='{\"label\": \"footer > social > twitter\"}' href=\"https://twitter.com/Allrecipes\" id=\"footer_twitter\" rel=\"noopener\" target=\"_blank\" title=\"Twitter\"></a></li>\n<li><a class=\"svg-icon--social--instagram svg-icon--social--instagram-dims\" data-footer-link-tracking='{\"label\": \"footer > social > instagram\"}' href=\"http://instagram.com/allrecipes\" id=\"footer_instagram\" rel=\"noopener\" target=\"_blank\" title=\"Instagram\"></a></li>\n<li><a class=\"svg-icon--social--tumblr_wh svg-icon--social--tumblr_wh-dims\" data-footer-link-tracking='{\"label\": \"footer > social > tumblr\"}' href=\"http://allrecipes.tumblr.com/\" id=\"footer_tumblr\" rel=\"noopener\" target=\"_blank\" title=\"Tumblr\"></a></li>\n<li><a class=\"svg-icon--social--stumbleupon svg-icon--social--stumbleupon-dims\" data-footer-link-tracking='{\"label\": \"footer > social > stumbleupon\"}' href=\"http://www.stumbleupon.com/stumbler/Allrecipes\" id=\"footer_stumbleupon\" rel=\"noopener\" target=\"_blank\" title=\"StumbleUpon\"></a></li>\n<li><a class=\"svg-icon--social--youtube_wh svg-icon--social--youtube_wh-dims\" data-footer-link-tracking='{\"label\": \"footer > social > youtube\"}' href=\"https://www.youtube.com/user/allrecipes\" id=\"footer_youtube\" rel=\"noopener\" target=\"_blank\" title=\"YouTube\"></a></li>\n</ul>\n</article>\n<article class=\"grid-col grid-col--tiles\">\n<ul>\n<li>About Us</li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > newsroom\"}' href=\"http://press.allrecipes.com/\" id=\"footer_newsroom\" rel=\"noopener\">Newsroom</a></li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > jobs\"}' href=\"http://dish.allrecipes.com/allrecipes-jobs/\" id=\"footer_jobs\" rel=\"noopener\">Jobs at Allrecipes</a></li>\n</ul>\n</article>\n<article class=\"grid-col grid-col--tiles footer_advertising\">\n<ul>\n<li>Advertising</li>\n<li><a class=\"\" data-footer-link-tracking='{\"label\": \"footer > advertise\"}' href=\"http://www.meredith.com/national-media/digital\" id=\"footer_advertisewithus\" rel=\"noopener\">Advertise with Us</a></li>\n<li><a class=\"\" data-footer-link-tracking='{\"label\": \"footer > meredith womans network\"}' href=\"http://www.meredith.com/marketing_solutions/interactive_media.html\" id=\"footer_womensnetwork\" rel=\"noopener\">Meredith Women's Network</a></li>\n</ul>\n</article>\n<article class=\"grid-col grid-col--tiles\">\n<ul>\n<li>Support</li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > site map\"}' href=\"http://dish.allrecipes.com/faq-sitemap/\" id=\"footer_sitemap\" rel=\"noopener\">Site Map</a></li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > contact us\"}' href=\"http://dish.allrecipes.com/customer-service/contact-us-2/\" id=\"footer_contactus\" rel=\"noopener\">Contact Us</a></li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > customer support\"}' href=\"http://dish.allrecipes.com/customer-service/\" id=\"footer_customersupport\" rel=\"noopener\">Customer Support</a></li>\n</ul>\n</article>\n<article class=\"grid-col grid-col--tiles\" data-siteurl=\"https://www.allrecipes.com\" ng-controller=\"ar_controllers_footerLinks\">\n<ul>\n<li>Global Community</li>\n<li>\n<select aria-label=\"Change Country\" id=\"country-selector\" onchange=\"changesite(this.value);\">\n<option value=\"\">Select location</option>\n<option data-footer-link-tracking='{\"label\": \"footer > global community > austrailia & new zealand\"}' rel=\"noopener\" value=\"http://allrecipes.com.au\">Australia & New Zealand</option>\n<option data-footer-link-tracking='{\"label\": \"footer > global community > canada\"}' rel=\"noopener\" value=\"https://www.allrecipes.com?country=CA\">Canada</option>\n<option data-footer-link-tracking='{\"label\": \"footer > global community > quebec\"}' rel=\"noopener\" value=\"http://qc.allrecipes.ca\">Quebec</option>\n<option data-footer-link-tracking='{\"label\": \"footer > global community > united kingdom & ireland\"}' rel=\"noopener\" value=\"http://allrecipes.co.uk\">United Kingdom & Ireland</option>\n<option data-footer-link-tracking='{\"label\": \"footer > global community > united states\"}' rel=\"noopener\" value=\"https://www.allrecipes.com/?country=US\">United States</option>\n</select>\n</li>\n<li>© 2020 Allrecipes.com <br/>All Rights Reserved </li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > california rights\"}' href=\"http://www.meredith.com/legal/privacy\" id=\"footer_privacypolicy\" rel=\"noopener\" target=\"_blank\">Privacy Policy Your California Rights</a></li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > eu privacy policy\"}' href=\"http://www.meredith.com/legal/eu-privacy\" id=\"footer_eu_privacypolicy\" rel=\"noopener\" target=\"_blank\">EU Privacy Policy</a></li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > terms\"}' href=\"http://www.meredith.com/legal/terms\" id=\"footer_terms\" rel=\"noopener\" target=\"_blank\">Terms of Service</a></li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > data policy\"}' href=\"http://www.meredith.com/datapolicy.html\" id=\"footer_datapolicy\" rel=\"noopener\" target=\"_blank\">Data Policy</a></li>\n<li>\n<a class=\"privacy-notification-dsar\" data-footer-link-tracking='{\"label\": \"footer > eu data requests\"}' href=\"\" id=\"footer_dsar\" rel=\"noopener\" target=\"_blank\">EU Data Subject Requests</a>\n<!-- EU DSAR link removal start -->\n<script type=\"text/javascript\">\r\n // Find the EU DSAR link by link text.\r\n function getEUDSARlink() {\r\n let elem;\r\n const links = document.getElementsByTagName('a');\r\n for (let link of links) {\r\n if (link.innerHTML == 'EU Data Subject Requests') {\r\n elem = link;\r\n break;\r\n }\r\n }\r\n return elem;\r\n };\r\n // Pass EU DSAR link element to the link-hiding function.\r\n (function (elem) {\r\n if (elem && typeof elem !== 'undefined') {\r\n const d = window.document;\r\n const now = Math.floor(Date.now() / 1000);\r\n const newYear2020 = 1577836800;\r\n if (now >= newYear2020 || d.location.hash == '#ccpa') {\r\n elem.style.display = 'none';\r\n }\r\n }\r\n }(getEUDSARlink()));\r\n </script>\n<!-- EU DSAR link removal end -->\n</li>\n<li>\n<!-- Do Not Sell button start -->\n<!-- NOTE: OneTrust settings might override the button text. -->\n<!-- If text link is preferred, change to an <a> tag with the same class. -->\n<button class=\"ot-sdk-show-settings\">California Do Not Sell</button>\n<script type=\"text/javascript\">\r\n (function () {\r\n const d = window.document;\r\n const now = Math.floor(Date.now() / 1000);\r\n const newYear2020 = 1577836800;\r\n const otLinkClass = '.ot-sdk-show-settings';\r\n if (now < newYear2020 && d.location.hash != '#ccpa') {\r\n const otLink = d.querySelector(otLinkClass);\r\n otLink.style.display = 'none';\r\n }\r\n }());\r\n </script>\n<!-- Do Not Sell button end -->\n</li>\n<li>\n<!-- Ghostery Inc tag script_ghostery cid: 1333 pid: 282-->\n<a data-footer-link-tracking='{\"label\": \"footer > adchoices\"}' href=\"#\" id=\"_bapw-link\" rel=\"noopener\" target=\"_blank\"><span id=\"footer_adchoices\" style=\"vertical-align:middle !important;padding-right:5px\">AdChoices</span><img height=\"11\" id=\"_bapw-icon\" role=\"presentation\" style=\"border:0 !important;display:inline !important;vertical-align:middle !important;padding-right:5px !important;\"/></a>\n<a class=\"btns-one-small\" data-show-on-scroll=\"700\" id=\"footer_top_button\" ng-cloak=\"\" ng-show=\"yTrigger == true\" onclick=\"AnchorScroll('top')\">Top</a>\n</li>\n<script>(function () { var g = 282, i = 1333, a = false, h = document, j = h.getElementById(\"_bapw-link\"), e = (h.location.protocol == \"https:\"), f = (e ? \"https\" : \"http\") + \"://\", c = f + (e ? \"a248.e.akamai.net/betterad.download.akamai.com/91609\" : \"cdn.betrad.com\") + \"/pub/\"; function b(k) { var d = new Image(); d.src = f + \"l.betrad.com/pub/p.gif?pid=\" + g + \"&ocid=\" + i + \"&i\" + k + \"=1&r=\" + Math.random() } h.getElementById(\"_bapw-icon\").src = c + \"icon1.png\"; j.onmouseover = function () { if (/#$/.test(j.href)) { j.href = \"http://info.evidon.com/pub_info/\" + g + \"?v=1\" } }; j.onclick = function () { var k = window._bap_p_overrides; function d(n, q) { var o = h.getElementsByTagName(\"head\")[0] || h.documentElement, m = a, l = h.createElement(\"script\"); function p() { l.onload = l.onreadystatechange = null; o.removeChild(l); q() } l.src = n; l.onreadystatechange = function () { if (!m && (this.readyState == \"loaded\" || this.readyState == \"complete\")) { m = true; p() } }; l.onload = p; o.insertBefore(l, o.firstChild) } if (k && k.hasOwnProperty(g)) { if (k[g].new_window) { b(\"c\"); return true } } this.onclick = \"return \" + a; d(f + \"ajax.googleapis.com/ajax/libs/jquery/1.6.4/jquery.min.js\", function () { d(c + \"pub2.js\", function () { BAPW.i(j, { pid: g, ocid: i }) }) }); return a }; b(\"i\") }()); var _bap_p_overrides = _bap_p_overrides || {}; _bap_p_overrides[282] = { new_window: true };</script>\n</ul>\n</article>\n<article class=\"grid-col grid-col--tiles\">\n<ul>\n<li>More Allrecipes</li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > ar magazine subscribe\"}' href=\"https://www.magazine.store/allrecipes-magazine/\" id=\"footer_magazine\" rel=\"noopener\">Allrecipes Magazine <span><span>–</span> Subscribe</span></a></li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > apps\"}' href=\"http://dish.allrecipes.com/mobile-apps\" id=\"footer_apps\" rel=\"noopener\">Allrecipes Apps</a></li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > food wishes videos\"}' href=\"http://youtube.com/foodwishes\" id=\"footer_foodwishes\" rel=\"noopener\">Food Wishes Videos</a></li>\n<li><a data-footer-link-tracking='{\"label\": \"footer > ar blog\"}' href=\"http://press.allrecipes.com/blog/\" id=\"footer_blog\" rel=\"noopener\">The Allrecipes Blog</a></li>\n</ul>\n</article>\n</section>\n<script type=\"text/javascript\">\r\n function changesite(value) {\r\n window.location = value;\r\n }\r\n\r\n \r\n function readCookieValue(cookieName) {\r\n var cookieValueRegex = new RegExp('(?:(?:^|.*;\\\\s*)' + cookieName + '\\\\s*\\\\=\\\\s*([^;]*).*$)|^.*$');\r\n var cookieValue = document.cookie.replace(cookieValueRegex, \"$1\");\r\n return cookieValue; // empty string, if cookie not found\r\n }\r\n\r\n \r\n var dsarUrl = \"https://app-de.onetrust.com/app/#/webform/0c410d51-8e85-4308-9bb9-37c24a461ccb?\";\r\n\r\n var dsarUserId = readCookieValue(\"euConsentId\");\r\n if (!dsarUserId) {\r\n dsarUserId = readCookieValue(\"globalTI_SID\");\r\n }\r\n if (dsarUserId) {\r\n dsarUrl += 'uid=' + dsarUserId + '&';\r\n }\r\n\r\n var siteDomain = document.domain.replace(/^.*\\.([^.]+\\.\\w+)$/i, \"$1\");\r\n dsarUrl += 'domain=' + siteDomain;\r\n\r\n document.getElementById(\"footer_dsar\").href = dsarUrl;\r\n\r\n \r\n var notificationDsar = document.getElementById(\"privacy-notification_dsar\");\r\n if (notificationDsar != null) {\r\n notificationDsar.href = dsarUrl;\r\n }\r\n\r\n window.addEventListener(\"load\",\r\n function() {\r\n window.segmentAnalytics.identify(dsarUserId, window.dataLayer);\r\n });\r\n</script>\n<div data-ng-controller=\"ar_controllers_deferredAction\" data-ng-init=\"wireupAdIntegrationListeners();executePostLoginEvents();\">\n<ar-notification></ar-notification>\n<div data-loading-indicator=\"\"></div>\n</div>\n</footer>\n</div>\n</div>\n<div class=\"ad-footer--fixed\" id=\"ad-footer\">\n<div data-tier=\"1\" id=\"div-gpt-mob-adhesive-banner-fixed\"></div>\n</div>\n<div data-tier=\"1\" id=\"div-gpt-oopSponsorship\"></div>\n<script>\r\n window.Toggles={\"AdTest\":false,\"RecipePreferences\":true,\"AzureRelatedcontentRecipes\":true,\"RdpRightRailRecommendations\":true,\"RecipePagePerf\":true,\"StreamsTest\":true,\"TastePrefOverlays\":true,\"RdpTasteCarousel\":true,\"MonetizedIngredients\":false,\"FixedGrid\":true,\"VideoWatchIcon\":true,\"reCaptcha\":false,\"Optimizely\":false,\"WatchVideoRDP\":false,\"SearchAB\":false,\"EUCheck\":true,\"ShowTasteSuperCard\":true,\"Pushly\":true};\r\n\r\n\r\n var enviromentOmnitureId = 'rdirdallrecipes';\r\n var pubsub = new Pubsub();\r\n </script>\n<script src=\"https://secureimages.allrecipes.com/assets/deployables/v-1.185.0.5222/analytics.bundled.js\"></script>\n<!-- script_analyticstag -->\n<script>\r\n\r\n\r\n\r\n function completed(whenReady) {\r\n document.removeEventListener(\"DOMContentLoaded\", completed);\r\n window.removeEventListener(\"load\", completed);\r\n if (whenReady) {\r\n whenReady();\r\n }\r\n }\r\n\r\n function googAnalytics() {\r\n pubsub.broadcast(\"GoogleAnalytics\");\r\n }\r\n\r\n document.addEventListener(\"DOMContentLoaded\", completed(googAnalytics)); // use the handy event callback\r\n window.addEventListener(\"load\", completed(googAnalytics)); // a fallback to window.onload, that will always work\r\n\r\n if (typeof (window.dataLayer) !== \"undefined\" && dataLayer) {\r\n var clientAnalytics = new ClientAnalytics(window.dataLayer);\r\n var comscoreShim = new ComscoreShim(window.dataLayer, pubsub);\r\n var omniShim = new OmnitureShim(window.dataLayer, s, pubsub);\r\n var kruxShim = new KruxShim(window.dataLayer, pubsub);\r\n\r\n }\r\n </script>\n<script type=\"text/javascript\">\r\n window.Toggles={\"AdTest\":false,\"RecipePreferences\":true,\"AzureRelatedcontentRecipes\":true,\"RdpRightRailRecommendations\":true,\"RecipePagePerf\":true,\"StreamsTest\":true,\"TastePrefOverlays\":true,\"RdpTasteCarousel\":true,\"MonetizedIngredients\":false,\"FixedGrid\":true,\"VideoWatchIcon\":true,\"reCaptcha\":false,\"Optimizely\":false,\"WatchVideoRDP\":false,\"SearchAB\":false,\"EUCheck\":true,\"ShowTasteSuperCard\":true,\"Pushly\":true};\r\n\r\n var enviromentOmnitureId = 'rdirdallrecipes';\r\n\r\n try {\r\n var thirtyMinutesInMilliseconds = 1800000;\r\n window.localStorage.setItem(\"CurrentUserStateModel\", ''); //primarily used by private profile SPA, but pertains to current user in general\r\n window.localStorage.setItem(\"PublicProfileStateModel\", ''); //used by public profile SPA\r\n window.localStorage.setItem(\"CurrentUserStateModelExpirationDate\", Date.now() + thirtyMinutesInMilliseconds); //primarily used by private profile SPA, but pertains to current user in general\r\n window.localStorage.setItem(\"PublicProfileStateModelExpirationDate\", Date.now() + thirtyMinutesInMilliseconds); //used by public profile SPA\r\n } catch(err) {\r\n var CurrentUserStateCookie ='';\r\n var PublicProfileStateCookie = '';\r\n document.cookie = \"CurrentUserStateModel=\" + CurrentUserStateCookie;\r\n document.cookie = \"PublicProfileStateModel=\" + PublicProfileStateCookie;\r\n }\r\n\r\n </script>\n<script src=\"https://secureimages.allrecipes.com/assets/deployables/v-1.185.0.5222/home.bundled.js\"></script>\n<script src=\"https://secureimages.allrecipes.com/assets/deployables/v-1.185.0.5222/home-templates.bundled.js\"></script>\n<script src=\"https://secureimages.allrecipes.com/assets/deployables/v-1.185.0.5222/main-bottom-templates.bundled.js\"></script>\n<script>\r\n angular.module('allrecipes').value('feedItems', [{\"title\":\"Chicken Parmesan Pasta Casserole\",\"videoTitle\":\"\",\"reviewCount\":54,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/786835.jpg\",\"description\":\"All the flavors and textures of a traditional chicken Parmesan are baked into this crowd-pleasing casserole.\",\"stars\":{\"rating\":4.4800000190734863,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":598946,\"displayName\":\"RCHEISS\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/401247.jpg\",\"followersCount\":10,\"favoriteCount\":27,\"madeRecipesCount\":15,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/401247.jpg\",\"profileUrl\":\"/cook/598946/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeOfTheDay\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/214614/chicken-parmesan-pasta-casserole/\",\"videoDetailUrl\":\"\",\"altText\":\"Chicken Parmesan Pasta Casserole Recipe - All the flavors and textures of a traditional chicken Parmesan are baked into this crowd-pleasing casserole.\",\"titleText\":\"Chicken Parmesan Pasta Casserole Recipe\",\"id\":214614,\"analyticsType\":\"rotd\",\"contentProviderId\":\"0\"},{\"title\":\"Pork Chops in Garlic Mushroom Sauce\",\"reviewCount\":199,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/6308774.jpg\",\"description\":\"Simmering boneless pork chops in a garlicky mushroom sauce is a great dinner idea for Valentine\\u0027s Day.\",\"stars\":{\"rating\":4.5799999237060547,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":15904710,\"displayName\":\"April Broxton\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/3959214.jpg\",\"followersCount\":14,\"favoriteCount\":822,\"madeRecipesCount\":72,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/3959214.jpg\",\"profileUrl\":\"/cook/15904710/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/269592/pork-chops-in-garlic-mushroom-sauce/\",\"videoDetailUrl\":\"\",\"altText\":\"Pork Chops in Garlic Mushroom Sauce Recipe - Simmering boneless pork chops in a garlicky mushroom sauce is a great dinner idea for Valentine\\u0027s Day.\",\"titleText\":\"Pork Chops in Garlic Mushroom Sauce Recipe\",\"id\":269592,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Asian Lettuce Wraps\",\"reviewCount\":1792,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/2630776.jpg\",\"description\":\"Tangy marinated beef is wrapped in refreshing lettuce leaves in this quick and easy Asian lettuce wrap recipe.\",\"videoId\":1112,\"stars\":{\"rating\":4.6399998664855957,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":1072660,\"displayName\":\"Rachel Castro\",\"thumbnail\":\"\",\"followersCount\":25,\"favoriteCount\":23,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5614249.jpg\",\"profileUrl\":\"/cook/1072660/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/71722/asian-lettuce-wraps/\",\"videoDetailUrl\":\"/video/1112/asian-lettuce-wraps/\",\"altText\":\"Asian Lettuce Wraps Recipe and Video - Tangy marinated beef is wrapped in refreshing lettuce leaves in this quick and easy Asian lettuce wrap recipe.\",\"titleText\":\"Asian Lettuce Wraps Recipe and Video\",\"id\":71722,\"analyticsType\":\"popular\",\"sourceId\":509,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3D\\u0026c=14e93038-433d-47f3-a5db-a80a41b80e8b\",\"contentProviderId\":\"0\"},{\"title\":\"Easy Pork Chops for the Slow Cooker\",\"reviewCount\":130,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/7280253.jpg\",\"description\":\"Use prepared soups, soup mix, and ranch dressing mix with onion and pork chops for an easy, savory dinner done in the slow cooker.\",\"stars\":{\"rating\":4.3899998664855957,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":13460465,\"displayName\":\"don t\",\"thumbnail\":\"\",\"followersCount\":4,\"favoriteCount\":221,\"madeRecipesCount\":10,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5614250.jpg\",\"profileUrl\":\"/cook/13460465/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/237411/easy-pork-chops-for-the-slow-cooker/\",\"videoDetailUrl\":\"\",\"altText\":\"Easy Pork Chops for the Slow Cooker Recipe - Use prepared soups, soup mix, and ranch dressing mix with onion and pork chops for an easy, savory dinner done in the slow cooker.\",\"titleText\":\"Easy Pork Chops for the Slow Cooker Recipe\",\"id\":237411,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"\\u0022OG\\u0022 Zuppa Toscana Soup\",\"reviewCount\":84,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/4461109.jpg\",\"description\":\"Made with Italian sausage and russet potatoes, this quick and easy recipe is a perfect copycat of Olive Garden\\u0027s(R) \\u0022Zuppa Toscana soup.\\u0022\",\"stars\":{\"rating\":4.7899999618530273,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":6643927,\"displayName\":\"fire1676\",\"thumbnail\":\"\",\"followersCount\":3,\"favoriteCount\":77,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674165.jpg\",\"profileUrl\":\"/cook/6643927/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/255253/og-zuppa-toscana-soup/\",\"videoDetailUrl\":\"\",\"altText\":\"\\u0022OG\\u0022 Zuppa Toscana Soup Recipe - Made with Italian sausage and russet potatoes, this quick and easy recipe is a perfect copycat of Olive Garden\\u0027s(R) \\u0022Zuppa Toscana soup.\\u0022\",\"titleText\":\"\\u0022OG\\u0022 Zuppa Toscana Soup Recipe\",\"id\":255253,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Lemon-Blueberry Pancakes \",\"reviewCount\":42,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/7383694.jpg\",\"description\":\"Lemon-blueberry pancakes with juicy berries and tart lemon zest are delicious, and these are quick and easy to prepare for breakfast or brunch.\",\"stars\":{\"rating\":4.3899998664855957,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":767491,\"displayName\":\"IronChefLaurie\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/96373.jpg\",\"followersCount\":9,\"favoriteCount\":24,\"madeRecipesCount\":8,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/96373.jpg\",\"profileUrl\":\"/cook/767491/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/277292/lemon-blueberry-pancakes/\",\"videoDetailUrl\":\"\",\"altText\":\"Lemon-Blueberry Pancakes Recipe - Lemon-blueberry pancakes with juicy berries and tart lemon zest are delicious, and these are quick and easy to prepare for breakfast or brunch.\",\"titleText\":\"Lemon-Blueberry Pancakes Recipe\",\"id\":277292,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Classic Goulash\",\"reviewCount\":1453,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/1108799.jpg\",\"description\":\"This stove top version of classic American beef goulash makes an easy one-pot meal for the whole family.\",\"videoId\":1295,\"stars\":{\"rating\":4.46999979019165,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":4259535,\"displayName\":\"pathunt\",\"thumbnail\":\"\",\"followersCount\":10,\"favoriteCount\":65,\"madeRecipesCount\":9,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5614254.jpg\",\"profileUrl\":\"/cook/4259535/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/213268/classic-goulash/\",\"videoDetailUrl\":\"/video/1295/classic-goulash/\",\"altText\":\"Classic Goulash Recipe and Video - This stove top version of classic American beef goulash makes an easy one-pot meal for the whole family.\",\"titleText\":\"Classic Goulash Recipe and Video\",\"id\":213268,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Grandma\\u0027s English Muffin Bread\",\"reviewCount\":103,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/343742.jpg\",\"description\":\"Like english muffins, slices of this bread must be toasted to taste right. Grandma used to bake this in large greased cans coated with cornmeal, which added to the english muffin appearance. Today she uses non-stick pans.\",\"stars\":{\"rating\":4.4600000381469727,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":24328418,\"displayName\":\"Stephanie Knewasser\",\"thumbnail\":\"\",\"followersCount\":3,\"handle\":\"[email protected]\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5614259.jpg\",\"profileUrl\":\"/cook/[email protected]/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/6903/grandmas-english-muffin-bread/\",\"videoDetailUrl\":\"\",\"altText\":\"Grandma\\u0027s English Muffin Bread Recipe - Like english muffins, slices of this bread must be toasted to taste right. Grandma used to bake this in large greased cans coated with cornmeal, which added to the english muffin appearance. Today she uses non-stick pans.\",\"titleText\":\"Grandma\\u0027s English Muffin Bread Recipe\",\"id\":6903,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Best Chocolate Chip Cookies\",\"reviewCount\":11753,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/4462051.jpg\",\"description\":\"Crisp edges, chewy middles, and so, so easy to make. Try this wildly-popular chocolate chip cookie recipe for yourself.\",\"videoId\":626,\"stars\":{\"rating\":4.619999885559082,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":24323046,\"displayName\":\"Dora\",\"thumbnail\":\"\",\"followersCount\":34,\"handle\":\"Dora»Dora\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674169.jpg\",\"profileUrl\":\"/cook/Dora»Dora/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/10813/best-chocolate-chip-cookies/\",\"videoDetailUrl\":\"/video/626/best-chocolate-chip-cookies/\",\"altText\":\"Best Chocolate Chip Cookies Recipe and Video - Crisp edges, chewy middles, and so, so easy to make. Try this wildly-popular chocolate chip cookie recipe for yourself.\",\"titleText\":\"Best Chocolate Chip Cookies Recipe and Video\",\"id\":10813,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=4b1d88ea-d3bd-4a71-ad30-3dc4d45a117b\",\"contentProviderId\":\"451\"},{\"title\":\"Super Duper Slow Cooker Beef Stroganoff\",\"reviewCount\":165,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/7466519.jpg\",\"description\":\"This slow cooker version of beef stroganoff uses sour cream, cream cheese, and cream of mushroom soup for a rich, savory classic main dish.\",\"stars\":{\"rating\":4.6399998664855957,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":3066997,\"displayName\":\"cwmom98\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/1911187.jpg\",\"followersCount\":25,\"favoriteCount\":85,\"madeRecipesCount\":8,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/1911187.jpg\",\"profileUrl\":\"/cook/3066997/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/236867/super-duper-slow-cooker-beef-stroganoff/\",\"videoDetailUrl\":\"\",\"altText\":\"Super Duper Slow Cooker Beef Stroganoff Recipe - This slow cooker version of beef stroganoff uses sour cream, cream cheese, and cream of mushroom soup for a rich, savory classic main dish.\",\"titleText\":\"Super Duper Slow Cooker Beef Stroganoff Recipe\",\"id\":236867,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Easy Baked Tilapia\",\"reviewCount\":848,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/1101614.jpg\",\"description\":\"Just 35 minutes and six simple ingredients are all you need for this top-rated, flavorful tilapia recipe.\",\"videoId\":3263,\"stars\":{\"rating\":4.5,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":916066,\"displayName\":\"F_Gory\",\"thumbnail\":\"https://images.media-allrecipes.com/global/features/mini/121.jpg\",\"followersCount\":12,\"favoriteCount\":46,\"madeRecipesCount\":18,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/global/features/mini/121.jpg\",\"profileUrl\":\"/cook/916066/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/70163/easy-baked-tilapia/\",\"videoDetailUrl\":\"/video/3263/easy-baked-tilapia/\",\"altText\":\"Easy Baked Tilapia Recipe and Video - Just 35 minutes and six simple ingredients are all you need for this top-rated, flavorful tilapia recipe.\",\"titleText\":\"Easy Baked Tilapia Recipe and Video\",\"id\":70163,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Enchilada Meatballs\",\"reviewCount\":144,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/663994.jpg\",\"description\":\"These are a tasty change from the usual since they use cornbread crumbs instead of crackers or regular bread. They\\u0027re always a hit at parties! A friend shared this recipe with me ages ago and I\\u0027ve tweaked it to come up with this.\",\"stars\":{\"rating\":4.2600002288818359,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":1354900,\"displayName\":\"Shelley\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/2732931.jpg\",\"followersCount\":13,\"favoriteCount\":24,\"madeRecipesCount\":8,\"handle\":\"angelforalzheimers\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/2732931.jpg\",\"profileUrl\":\"/cook/angelforalzheimers/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/82954/enchilada-meatballs/\",\"videoDetailUrl\":\"\",\"altText\":\"Enchilada Meatballs Recipe - These are a tasty change from the usual since they use cornbread crumbs instead of crackers or regular bread. They\\u0027re always a hit at parties! A friend shared this recipe with me ages ago and I\\u0027ve tweaked it to come up with this.\",\"titleText\":\"Enchilada Meatballs Recipe\",\"id\":82954,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Grilled Teriyaki Shrimp and Pineapple Skewers\",\"reviewCount\":13,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/6884311.jpg\",\"description\":\"These simple savory-sweet shrimp and pineapple skewers are basted with a homemade teriyaki sauce and cook up in just a few minutes on the grill.\",\"stars\":{\"rating\":4.429999828338623,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":895249,\"displayName\":\"France C\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/6886266.jpg\",\"followersCount\":749,\"favoriteCount\":2073,\"madeRecipesCount\":511,\"handle\":\"francecevallos\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/6886266.jpg\",\"profileUrl\":\"/cook/francecevallos/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/275055/grilled-teriyaki-shrimp-and-pineapple-skewers/\",\"videoDetailUrl\":\"\",\"altText\":\"Grilled Teriyaki Shrimp and Pineapple Skewers Recipe - These simple savory-sweet shrimp and pineapple skewers are basted with a homemade teriyaki sauce and cook up in just a few minutes on the grill.\",\"titleText\":\"Grilled Teriyaki Shrimp and Pineapple Skewers Recipe\",\"id\":275055,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"World\\u0027s Best Lasagna\",\"reviewCount\":12728,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/3359675.jpg\",\"description\":\"Filling and satisfying, John Chandler\\u0027s lasagna is our most popular recipe. With basil, sausage, ground beef and three types of cheese, it lives up to its name.\",\"videoId\":672,\"stars\":{\"rating\":4.8000001907348633,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":177901,\"displayName\":\"John Chandler\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/3873464.jpg\",\"followersCount\":1339,\"favoriteCount\":27,\"madeRecipesCount\":11,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/3873464.jpg\",\"profileUrl\":\"/cook/177901/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/23600/worlds-best-lasagna/\",\"videoDetailUrl\":\"/video/672/worlds-best-lasagna/\",\"altText\":\"World\\u0027s Best Lasagna Recipe and Video - Filling and satisfying, John Chandler\\u0027s lasagna is our most popular recipe. With basil, sausage, ground beef and three types of cheese, it lives up to its name.\",\"titleText\":\"World\\u0027s Best Lasagna Recipe and Video\",\"id\":23600,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=b87a1e2a-0d1c-416d-a45c-5b65f196c1e0\",\"contentProviderId\":\"451\"},{\"title\":\"Mexican Frittata\",\"reviewCount\":17,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/7019430.jpg\",\"description\":\"The addition of peppers, cumin, and salsa provides Mexican flair to this quick and easy breakfast frittata that\\u0027s perfect when cooking for one.\",\"stars\":{\"rating\":4.1599998474121094,\"starsCssClasses\":\"stars stars-4\"},\"cook\":{\"id\":9038685,\"displayName\":\"tracyfer\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/895574.jpg\",\"favoriteCount\":31,\"handle\":\"tracyfer\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/895574.jpg\",\"profileUrl\":\"/cook/tracyfer/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/275451/mexican-frittata/\",\"videoDetailUrl\":\"\",\"altText\":\"Mexican Frittata Recipe - The addition of peppers, cumin, and salsa provides Mexican flair to this quick and easy breakfast frittata that\\u0027s perfect when cooking for one.\",\"titleText\":\"Mexican Frittata Recipe\",\"id\":275451,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Andrea\\u0027s Pasta Fagioli\",\"reviewCount\":593,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/3208554.jpg\",\"description\":\"Tomato sauce, cannelini beans, navy beans and ditalini pasta are seasoned with parsley, basil and oregano in this chunky soup.\",\"stars\":{\"rating\":4.440000057220459,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":95992,\"displayName\":\"AVALERIO\",\"thumbnail\":\"\",\"followersCount\":2,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5614242.jpg\",\"profileUrl\":\"/cook/95992/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/25690/andreas-pasta-fagioli/\",\"videoDetailUrl\":\"\",\"altText\":\"Andrea\\u0027s Pasta Fagioli Recipe - Tomato sauce, cannelini beans, navy beans and ditalini pasta are seasoned with parsley, basil and oregano in this chunky soup.\",\"titleText\":\"Andrea\\u0027s Pasta Fagioli Recipe\",\"id\":25690,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Cheesy Pork Taco Rice\",\"reviewCount\":19,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/4538307.jpg\",\"description\":\"For an easy, cheesy Tex-Mex skillet dinner, try this recipe for baked pork taco rice with roasted poblano peppers topped with a quick salsa.\",\"stars\":{\"rating\":4.7300000190734863,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":16589076,\"displayName\":\"Culinary Envy\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/2683453.jpg\",\"followersCount\":87,\"favoriteCount\":187,\"madeRecipesCount\":2,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/2683453.jpg\",\"profileUrl\":\"/cook/16589076/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/260852/cheesy-pork-taco-rice/\",\"videoDetailUrl\":\"\",\"altText\":\"Cheesy Pork Taco Rice Recipe - For an easy, cheesy Tex-Mex skillet dinner, try this recipe for baked pork taco rice with roasted poblano peppers topped with a quick salsa.\",\"titleText\":\"Cheesy Pork Taco Rice Recipe\",\"id\":260852,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Cranberry Orange Rolls\",\"reviewCount\":19,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/6280096.jpg\",\"description\":\"A different take on sticky cinnamon rolls, made with cranberries and orange juice and frosted with a simple vanilla glaze.\",\"stars\":{\"rating\":4.21999979019165,\"starsCssClasses\":\"stars stars-4\"},\"cook\":{\"id\":3217403,\"displayName\":\"Kim\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/127985.jpg\",\"followersCount\":118,\"favoriteCount\":1942,\"madeRecipesCount\":101,\"handle\":\"k-i-m\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/127985.jpg\",\"profileUrl\":\"/cook/k-i-m/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/270200/cranberry-orange-rolls/\",\"videoDetailUrl\":\"\",\"altText\":\"Cranberry Orange Rolls Recipe - A different take on sticky cinnamon rolls, made with cranberries and orange juice and frosted with a simple vanilla glaze.\",\"titleText\":\"Cranberry Orange Rolls Recipe\",\"id\":270200,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Sheet Pan Parmesan Chicken and Veggies\",\"reviewCount\":33,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/6967156.jpg\",\"description\":\"A cheesy Parmesan and panko mixture bakes up into an extra crispy crust on the chicken in this easy sheet pan dinner with herb-roasted vegetables.\",\"stars\":{\"rating\":4.53000020980835,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":4179258,\"displayName\":\"chpmnk42\",\"thumbnail\":\"\",\"followersCount\":4,\"favoriteCount\":691,\"madeRecipesCount\":157,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674135.jpg\",\"profileUrl\":\"/cook/4179258/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/274966/sheet-pan-parmesan-chicken-and-veggies/\",\"videoDetailUrl\":\"\",\"altText\":\"Sheet Pan Parmesan Chicken and Veggies Recipe - A cheesy Parmesan and panko mixture bakes up into an extra crispy crust on the chicken in this easy sheet pan dinner with herb-roasted vegetables.\",\"titleText\":\"Sheet Pan Parmesan Chicken and Veggies Recipe\",\"id\":274966,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Classic Overnight Oats\",\"reviewCount\":14,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/6106331.jpg\",\"description\":\"Almond Breeze almondmilk and oats make a great no-cook oatmeal that you can make for breakfast up to 3 days in advance.\",\"stars\":{\"rating\":4.78000020980835,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":20583828,\"displayName\":\"Almond Breeze\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/6732378.jpg\",\"followersCount\":27461,\"favoriteCount\":84,\"handle\":\"almondbreeze\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/6732378.jpg\",\"profileUrl\":\"/cook/almondbreeze/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/269574/classic-overnight-oats/\",\"videoDetailUrl\":\"\",\"altText\":\"Classic Overnight Oats Recipe - Almond Breeze almondmilk and oats make a great no-cook oatmeal that you can make for breakfast up to 3 days in advance.\",\"titleText\":\"Classic Overnight Oats Recipe\",\"id\":269574,\"analyticsType\":\"popular\",\"sourceId\":534,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3D\\u0026c=11e8a597-2b0b-4086-a023-987eb0c308f4\",\"contentProviderId\":\"524\"},{\"title\":\"Slow Cooker Southern Lima Beans and Ham\",\"reviewCount\":75,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/5701046.jpg\",\"description\":\"Dry baby lima beans soak overnight before simmering for hours in the slow cooker with a ham bone and Cajun seasoning. Serve this budget-friendly dish with Mexican corn bread and a side salad for a complete meal.\",\"stars\":{\"rating\":4.6399998664855957,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":7495292,\"displayName\":\"mammak\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/686025.jpg\",\"followersCount\":25,\"favoriteCount\":18,\"madeRecipesCount\":5,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/686025.jpg\",\"profileUrl\":\"/cook/7495292/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/223495/slow-cooker-southern-lima-beans-and-ham/\",\"videoDetailUrl\":\"\",\"altText\":\"Slow Cooker Southern Lima Beans and Ham Recipe - Dry baby lima beans soak overnight before simmering for hours in the slow cooker with a ham bone and Cajun seasoning. Serve this budget-friendly dish with Mexican corn bread and a side salad for a complete meal.\",\"titleText\":\"Slow Cooker Southern Lima Beans and Ham Recipe\",\"id\":223495,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Cheesy Cauliflower Casserole\",\"reviewCount\":194,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/4388819.jpg\",\"description\":\"This cauliflower is baked in a Cheddar cheese sauce dotted with colorful red and green bell pepper pieces.\",\"stars\":{\"rating\":4.440000057220459,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":782865,\"displayName\":\"kelliegirl21\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/319539.jpg\",\"followersCount\":4,\"favoriteCount\":338,\"madeRecipesCount\":35,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/319539.jpg\",\"profileUrl\":\"/cook/782865/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/61434/cheesy-cauliflower-casserole/\",\"videoDetailUrl\":\"\",\"altText\":\"Cheesy Cauliflower Casserole Recipe - This cauliflower is baked in a Cheddar cheese sauce dotted with colorful red and green bell pepper pieces.\",\"titleText\":\"Cheesy Cauliflower Casserole Recipe\",\"id\":61434,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Beef Stroganoff for Instant Pot(R)\",\"reviewCount\":525,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/6472891.jpg\",\"description\":\"An electric pressure cooker (such as Instant Pot(R)) makes it easy to get fork-tender beef in this stroganoff recipe served with egg noodles.\",\"videoId\":7859,\"stars\":{\"rating\":4.4600000381469727,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":1437524,\"displayName\":\"Lissa\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/5753029.jpg\",\"followersCount\":11,\"favoriteCount\":189,\"madeRecipesCount\":22,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5753029.jpg\",\"profileUrl\":\"/cook/1437524/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/258468/beef-stroganoff-for-instant-pot/\",\"videoDetailUrl\":\"/video/7859/beef-stroganoff-for-instant-pot/\",\"altText\":\"Beef Stroganoff for Instant Pot(R) Recipe and Video - An electric pressure cooker (such as Instant Pot(R)) makes it easy to get fork-tender beef in this stroganoff recipe served with egg noodles.\",\"titleText\":\"Beef Stroganoff for Instant Pot(R) Recipe and Video\",\"id\":258468,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Healing Cabbage Soup\",\"reviewCount\":1280,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/3016182.jpg\",\"description\":\"Comfort food on a cold winter\\u0027s night, cabbage simmered in chicken broth is also an age-old folk remedy for curing the common cold.\",\"videoId\":4244,\"stars\":{\"rating\":4.6599998474121094,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":1354264,\"displayName\":\"JGCASE\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/367180.jpg\",\"followersCount\":21,\"favoriteCount\":8,\"madeRecipesCount\":1,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/367180.jpg\",\"profileUrl\":\"/cook/1354264/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/82923/healing-cabbage-soup/\",\"videoDetailUrl\":\"/video/4244/healing-cabbage-soup/\",\"altText\":\"Healing Cabbage Soup Recipe and Video - Comfort food on a cold winter\\u0027s night, cabbage simmered in chicken broth is also an age-old folk remedy for curing the common cold.\",\"titleText\":\"Healing Cabbage Soup Recipe and Video\",\"id\":82923,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"The Best Rolled Sugar Cookies\",\"reviewCount\":8485,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/580841.jpg\",\"description\":\"Perfect for decorating! These classic sugar cookies are great for cookie-cutting and decorating during the holidays or anytime you feel festive.\",\"videoId\":624,\"stars\":{\"rating\":4.440000057220459,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":24323846,\"displayName\":\"J. Saunders\",\"thumbnail\":\"\",\"followersCount\":19,\"handle\":\"[email protected]\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5614284.jpg\",\"profileUrl\":\"/cook/[email protected]/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/10402/the-best-rolled-sugar-cookies/\",\"videoDetailUrl\":\"/video/624/the-best-rolled-sugar-cookies/\",\"altText\":\"The Best Rolled Sugar Cookies Recipe and Video - Perfect for decorating! These classic sugar cookies are great for cookie-cutting and decorating during the holidays or anytime you feel festive.\",\"titleText\":\"The Best Rolled Sugar Cookies Recipe and Video\",\"id\":10402,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=e4ce2076-a404-4177-a98d-cdc1f9501f16\",\"contentProviderId\":\"451\"},{\"title\":\"Baked Lemon-Butter Chicken Thighs\",\"reviewCount\":35,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/6505068.jpg\",\"description\":\"These low-carb, keto chicken thighs are basted with a tangy and rich butter sauce. They are easy enough for a weeknight meal and guaranteed to impress the pickiest eaters.\",\"stars\":{\"rating\":4.6100001335144043,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":895249,\"displayName\":\"France C\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/6886266.jpg\",\"followersCount\":749,\"favoriteCount\":2073,\"madeRecipesCount\":511,\"handle\":\"francecevallos\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/6886266.jpg\",\"profileUrl\":\"/cook/francecevallos/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/272544/baked-lemon-butter-chicken-thighs/\",\"videoDetailUrl\":\"\",\"altText\":\"Baked Lemon-Butter Chicken Thighs Recipe - These low-carb, keto chicken thighs are basted with a tangy and rich butter sauce. They are easy enough for a weeknight meal and guaranteed to impress the pickiest eaters.\",\"titleText\":\"Baked Lemon-Butter Chicken Thighs Recipe\",\"id\":272544,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Delicious Ham and Potato Soup\",\"reviewCount\":10400,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/962656.jpg\",\"description\":\"A hearty, easy soup that\\u0027s ready in 45 minutes. Perfect for using up leftover ham.\",\"videoId\":994,\"stars\":{\"rating\":4.8400001525878906,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":746864,\"displayName\":\"ELLIE11\",\"thumbnail\":\"\",\"followersCount\":61,\"favoriteCount\":1,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674162.jpg\",\"profileUrl\":\"/cook/746864/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/56927/delicious-ham-and-potato-soup/\",\"videoDetailUrl\":\"/video/994/delicious-ham-and-potato-soup/\",\"altText\":\"Delicious Ham and Potato Soup Recipe and Video - A hearty, easy soup that\\u0027s ready in 45 minutes. Perfect for using up leftover ham.\",\"titleText\":\"Delicious Ham and Potato Soup Recipe and Video\",\"id\":56927,\"analyticsType\":\"popular\",\"sourceId\":437,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3D\\u0026c=b7ef2449-d37f-4da3-aeae-3649a0dc8715\",\"contentProviderId\":\"0\"},{\"title\":\"Chicken Parmesan\",\"reviewCount\":2388,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/4572704.jpg\",\"description\":\"A classic Italian dish prepared with tomato sauce and mozzarella, with a few additions by Chef John. Sure to impress your friends and family!\",\"videoId\":352,\"stars\":{\"rating\":4.820000171661377,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":8601924,\"displayName\":\"Chef John\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/2267470.jpg\",\"followersCount\":70108,\"favoriteCount\":636,\"madeRecipesCount\":309,\"handle\":\"foodwisheswithchefjohn\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/2267470.jpg\",\"profileUrl\":\"/cook/foodwisheswithchefjohn/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/223042/chicken-parmesan/\",\"videoDetailUrl\":\"/video/352/chicken-parmesan/\",\"altText\":\"Chicken Parmesan Recipe and Video - A classic Italian dish prepared with tomato sauce and mozzarella, with a few additions by Chef John. Sure to impress your friends and family!\",\"titleText\":\"Chicken Parmesan Recipe and Video\",\"id\":223042,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=4e751554-c697-4285-9391-f322867fe224\",\"contentProviderId\":\"451\"},{\"title\":\"Slow Cooker Lemon Garlic Chicken II\",\"reviewCount\":1127,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/444473.jpg\",\"description\":\"Seasoned, browned chicken breasts slow cooked with lemon juice, garlic, and chicken bouillon. A wonderful \\u0027fix and forget\\u0027 recipe that is easy and pleases just about everyone. Great served with rice or pasta, or even alone.\",\"videoId\":4107,\"stars\":{\"rating\":4.1500000953674316,\"starsCssClasses\":\"stars stars-4\"},\"cook\":{\"id\":24321795,\"displayName\":\"Carla Joy\",\"thumbnail\":\"\",\"followersCount\":1,\"handle\":\"[email protected]\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674165.jpg\",\"profileUrl\":\"/cook/[email protected]/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/18032/slow-cooker-lemon-garlic-chicken-ii/\",\"videoDetailUrl\":\"/video/4107/slow-cooker-lemon-garlic-chicken-ii/\",\"altText\":\"Slow Cooker Lemon Garlic Chicken II Recipe and Video - Seasoned, browned chicken breasts slow cooked with lemon juice, garlic, and chicken bouillon. A wonderful \\u0027fix and forget\\u0027 recipe that is easy and pleases just about everyone. Great served with rice or pasta, or even alone.\",\"titleText\":\"Slow Cooker Lemon Garlic Chicken II Recipe and Video\",\"id\":18032,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Roasted Cabbage\",\"reviewCount\":192,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/1967562.jpg\",\"description\":\"Roasted cabbage, seasoned with garlic powder, red pepper flakes, and salt, is a quick and easy side dish for weeknight dinners.\",\"videoId\":7234,\"stars\":{\"rating\":4.429999828338623,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":7095900,\"displayName\":\"samnan2\",\"thumbnail\":\"\",\"followersCount\":15,\"favoriteCount\":5,\"madeRecipesCount\":1,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674154.jpg\",\"profileUrl\":\"/cook/7095900/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/240800/roasted-cabbage/\",\"videoDetailUrl\":\"/video/7234/roasted-cabbage/\",\"altText\":\"Roasted Cabbage Recipe and Video - Roasted cabbage, seasoned with garlic powder, red pepper flakes, and salt, is a quick and easy side dish for weeknight dinners.\",\"titleText\":\"Roasted Cabbage Recipe and Video\",\"id\":240800,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Easy Slow Cooker Cheesy Potato Soup with Ham\",\"reviewCount\":25,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/6559810.jpg\",\"description\":\"Use up leftover holiday ham in this rich and creamy soup. Using shortcuts like frozen hash brown potatoes, the soup takes only minutes to prepare and the slow cooker does the rest.\",\"stars\":{\"rating\":4.619999885559082,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":5118896,\"displayName\":\"fabeverydayblog\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/4270835.jpg\",\"followersCount\":148,\"favoriteCount\":294,\"madeRecipesCount\":464,\"handle\":\"fabeveryday\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/4270835.jpg\",\"profileUrl\":\"/cook/fabeveryday/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/273023/easy-slow-cooker-cheesy-potato-soup-with-ham/\",\"videoDetailUrl\":\"\",\"altText\":\"Easy Slow Cooker Cheesy Potato Soup with Ham Recipe - Use up leftover holiday ham in this rich and creamy soup. Using shortcuts like frozen hash brown potatoes, the soup takes only minutes to prepare and the slow cooker does the rest.\",\"titleText\":\"Easy Slow Cooker Cheesy Potato Soup with Ham Recipe\",\"id\":273023,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Chef John\\u0027s Perfect Prime Rib\",\"reviewCount\":1463,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/4886145.jpg\",\"description\":\"Perfectly cooked medium-rare prime rib is the result every time you use Chef John\\u0027s mathematical method.\",\"videoId\":479,\"stars\":{\"rating\":4.7699999809265137,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":8601924,\"displayName\":\"Chef John\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/2267470.jpg\",\"followersCount\":70108,\"favoriteCount\":636,\"madeRecipesCount\":309,\"handle\":\"foodwisheswithchefjohn\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/2267470.jpg\",\"profileUrl\":\"/cook/foodwisheswithchefjohn/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/221958/chef-johns-perfect-prime-rib/\",\"videoDetailUrl\":\"/video/479/chef-johns-perfect-prime-rib/\",\"altText\":\"Chef John\\u0027s Perfect Prime Rib Recipe and Video - Perfectly cooked medium-rare prime rib is the result every time you use Chef John\\u0027s mathematical method.\",\"titleText\":\"Chef John\\u0027s Perfect Prime Rib Recipe and Video\",\"id\":221958,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=9dd58138-5e01-467e-bf68-1335725cec35\",\"contentProviderId\":\"451\"},{\"title\":\"Quick and Easy Monkey Bread\",\"reviewCount\":219,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/3026392.jpg\",\"description\":\"Frozen dough and butterscotch pudding mix to get the job done much more quickly. The pudding mix, nuts, cinnamon sugar and butter make a gooey pullapart bread everyone will want to get their hands on.\",\"stars\":{\"rating\":4.4899997711181641,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":39277,\"displayName\":\"MARBALET\",\"thumbnail\":\"\",\"followersCount\":210,\"favoriteCount\":472,\"madeRecipesCount\":4,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674134.jpg\",\"profileUrl\":\"/cook/39277/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/7110/quick-and-easy-monkey-bread/\",\"videoDetailUrl\":\"\",\"altText\":\"Quick and Easy Monkey Bread Recipe - Frozen dough and butterscotch pudding mix to get the job done much more quickly. The pudding mix, nuts, cinnamon sugar and butter make a gooey pullapart bread everyone will want to get their hands on.\",\"titleText\":\"Quick and Easy Monkey Bread Recipe\",\"id\":7110,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Tex Mex Ultimate Carnitas Grilled Cheese\",\"reviewCount\":14,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/4526539.jpg\",\"description\":\"Carnitas, avocado, and cheese are grilled between thick slices of bread with more cheese on the outside in this Tex-Mex grilled cheese.\",\"videoId\":7626,\"stars\":{\"rating\":4.570000171661377,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":21650794,\"displayName\":\"Julie Hubert\",\"thumbnail\":\"\",\"followersCount\":102,\"madeRecipesCount\":3,\"handle\":\"juliehubert\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674139.jpg\",\"profileUrl\":\"/cook/juliehubert/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/258223/tex-mex-ultimate-carnitas-grilled-cheese/\",\"videoDetailUrl\":\"/video/7626/tex-mex-ultimate-carnitas-grilled-cheese/\",\"altText\":\"Tex Mex Ultimate Carnitas Grilled Cheese Recipe and Video - Carnitas, avocado, and cheese are grilled between thick slices of bread with more cheese on the outside in this Tex-Mex grilled cheese.\",\"titleText\":\"Tex Mex Ultimate Carnitas Grilled Cheese Recipe and Video\",\"id\":258223,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Easy Meatloaf\",\"reviewCount\":5648,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/682282.jpg\",\"description\":\"This easy meatloaf recipe is one of our best--made over 7,000 times and never disappoints! This no-fail meatloaf makes 8 servings.\",\"videoId\":675,\"stars\":{\"rating\":4.5799999237060547,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":24324003,\"displayName\":\"Janet Caldwell\",\"thumbnail\":\"\",\"followersCount\":20,\"handle\":\"[email protected]\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674130.jpg\",\"profileUrl\":\"/cook/[email protected]/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/16354/easy-meatloaf/\",\"videoDetailUrl\":\"/video/675/easy-meatloaf/\",\"altText\":\"Easy Meatloaf Recipe and Video - This easy meatloaf recipe is one of our best--made over 7,000 times and never disappoints! This no-fail meatloaf makes 8 servings.\",\"titleText\":\"Easy Meatloaf Recipe and Video\",\"id\":16354,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Instant Pot(R) Colorado Chile Verde\",\"reviewCount\":8,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/7174368.jpg\",\"description\":\"Cooked in the Instant Pot(R), this Colorado chile verde recipe is flavored with Hatch chiles and can be served with bread, tortillas, rice, or even over a breakfast burrito!\",\"stars\":{\"rating\":4.0999999046325684,\"starsCssClasses\":\"stars stars-4\"},\"cook\":{\"id\":1800959,\"displayName\":\"Diana71\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/7429665.jpg\",\"followersCount\":290,\"favoriteCount\":1620,\"madeRecipesCount\":251,\"handle\":\"diana71\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/7429665.jpg\",\"profileUrl\":\"/cook/diana71/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/276653/instant-pot-colorado-chile-verde/\",\"videoDetailUrl\":\"\",\"altText\":\"Instant Pot(R) Colorado Chile Verde Recipe - Cooked in the Instant Pot(R), this Colorado chile verde recipe is flavored with Hatch chiles and can be served with bread, tortillas, rice, or even over a breakfast burrito!\",\"titleText\":\"Instant Pot(R) Colorado Chile Verde Recipe\",\"id\":276653,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Janet\\u0027s Rich Banana Bread\",\"reviewCount\":6269,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/171761.jpg\",\"description\":\"Sour cream guarantees a moist and tender loaf. And bananas are sliced instead of mashed in this recipe, giving a concentrated banana taste in every bite.\",\"videoId\":1027,\"stars\":{\"rating\":4.820000171661377,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":8133,\"displayName\":\"vjonsson\",\"thumbnail\":\"\",\"followersCount\":50,\"madeRecipesCount\":3,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674138.jpg\",\"profileUrl\":\"/cook/8133/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/17066/janets-rich-banana-bread/\",\"videoDetailUrl\":\"/video/1027/janets-rich-banana-bread/\",\"altText\":\"Janet\\u0027s Rich Banana Bread Recipe and Video - Sour cream guarantees a moist and tender loaf. And bananas are sliced instead of mashed in this recipe, giving a concentrated banana taste in every bite.\",\"titleText\":\"Janet\\u0027s Rich Banana Bread Recipe and Video\",\"id\":17066,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Christmas Breakfast Sausage Casserole\",\"reviewCount\":1553,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/7274342.jpg\",\"description\":\"Sausage, eggs, bread, and cheese snuggle down in a casserole, chill overnight, and bake in the morning. It\\u0027s a hearty dish worth waking up for.\",\"videoId\":850,\"stars\":{\"rating\":4.5799999237060547,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":24326107,\"displayName\":\"M.K. Meredith\",\"thumbnail\":\"\",\"followersCount\":12,\"handle\":\"[email protected]\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674161.jpg\",\"profileUrl\":\"/cook/[email protected]/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/9247/christmas-breakfast-sausage-casserole/\",\"videoDetailUrl\":\"/video/850/christmas-breakfast-sausage-casserole/\",\"altText\":\"Christmas Breakfast Sausage Casserole Recipe and Video - Sausage, eggs, bread, and cheese snuggle down in a casserole, chill overnight, and bake in the morning. It\\u0027s a hearty dish worth waking up for.\",\"titleText\":\"Christmas Breakfast Sausage Casserole Recipe and Video\",\"id\":9247,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=9be9d3b5-a5eb-4e56-9671-cc288722355d\",\"contentProviderId\":\"451\"},{\"title\":\"Good Old Fashioned Pancakes\",\"reviewCount\":11947,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/4948036.jpg\",\"description\":\"Make delicious, fluffy pancakes from scratch. This recipe uses 7 ingredients you probably already have.\",\"videoId\":2619,\"stars\":{\"rating\":4.5900001525878906,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":663357,\"displayName\":\"dakota kelly\",\"thumbnail\":\"\",\"followersCount\":112,\"favoriteCount\":1,\"madeRecipesCount\":2,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674168.jpg\",\"profileUrl\":\"/cook/663357/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/21014/good-old-fashioned-pancakes/\",\"videoDetailUrl\":\"/video/2619/good-old-fashioned-pancakes/\",\"altText\":\"Good Old Fashioned Pancakes Recipe and Video - Make delicious, fluffy pancakes from scratch. This recipe uses 7 ingredients you probably already have.\",\"titleText\":\"Good Old Fashioned Pancakes Recipe and Video\",\"id\":21014,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=1e02431b-1ba7-4365-931a-941ebf6efffe\",\"contentProviderId\":\"451\"},{\"title\":\"Baked Ziti III\",\"reviewCount\":956,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/5643924.jpg\",\"description\":\"Layers of ziti pasta, beefy tomato sauce, sour cream and Italian cheeses get a final dash of flavor with a generous sprinkling of chopped fresh basil before baking.\",\"stars\":{\"rating\":4.7100000381469727,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":33870,\"displayName\":\"VERDIEIIIS\",\"thumbnail\":\"\",\"followersCount\":12,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674135.jpg\",\"profileUrl\":\"/cook/33870/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/17098/baked-ziti-iii/\",\"videoDetailUrl\":\"\",\"altText\":\"Baked Ziti III Recipe - Layers of ziti pasta, beefy tomato sauce, sour cream and Italian cheeses get a final dash of flavor with a generous sprinkling of chopped fresh basil before baking.\",\"titleText\":\"Baked Ziti III Recipe\",\"id\":17098,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Guacamole\",\"reviewCount\":4618,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/4521556.jpg\",\"description\":\"Cilantro and cayenne give this classic guacamole a tasty kick. Serve it smooth or chunky.\",\"videoId\":646,\"stars\":{\"rating\":4.809999942779541,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":24321584,\"displayName\":\"Bob Cody\",\"thumbnail\":\"\",\"followersCount\":17,\"handle\":\"[email protected]\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674162.jpg\",\"profileUrl\":\"/cook/[email protected]/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/14231/guacamole/\",\"videoDetailUrl\":\"/video/646/guacamole/\",\"altText\":\"Guacamole Recipe and Video - Cilantro and cayenne give this classic guacamole a tasty kick. Serve it smooth or chunky.\",\"titleText\":\"Guacamole Recipe and Video\",\"id\":14231,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=51255a0e-53cf-4d52-9f23-04a5eeb1073e\",\"contentProviderId\":\"451\"},{\"title\":\"Italian Sausage Soup\",\"reviewCount\":2403,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/6285692.jpg\",\"description\":\"This soup is easy to put together, and the flavor of the spicy sausage is balanced nicely by Great Northern beans, zucchini, fresh spinach, and carrots. Makes a delicious winter supper.\",\"videoId\":3253,\"stars\":{\"rating\":4.809999942779541,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":373048,\"displayName\":\"SALLYJUN\",\"thumbnail\":\"\",\"followersCount\":33,\"madeRecipesCount\":1,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5614242.jpg\",\"profileUrl\":\"/cook/373048/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/12942/italian-sausage-soup/\",\"videoDetailUrl\":\"/video/3253/italian-sausage-soup/\",\"altText\":\"Italian Sausage Soup Recipe and Video - This soup is easy to put together, and the flavor of the spicy sausage is balanced nicely by Great Northern beans, zucchini, fresh spinach, and carrots. Makes a delicious winter supper.\",\"titleText\":\"Italian Sausage Soup Recipe and Video\",\"id\":12942,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=6b70a0cb-e098-4a56-b12d-dab1d98f6adc\",\"contentProviderId\":\"451\"},{\"title\":\"Garlic Prime Rib\",\"reviewCount\":1656,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/6036515.jpg\",\"description\":\"A garlic, thyme, and olive oil marinade covers your prime rib roast for 5-star results. This “secret” recipe is a secret no more!\",\"videoId\":2690,\"stars\":{\"rating\":4.7899999618530273,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":732827,\"displayName\":\"Chef Mike\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/734695.jpg\",\"followersCount\":268,\"favoriteCount\":1,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/734695.jpg\",\"profileUrl\":\"/cook/732827/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/56352/garlic-prime-rib/\",\"videoDetailUrl\":\"/video/2690/garlic-prime-rib/\",\"altText\":\"Garlic Prime Rib Recipe and Video - A garlic, thyme, and olive oil marinade covers your prime rib roast for 5-star results. This “secret” recipe is a secret no more!\",\"titleText\":\"Garlic Prime Rib Recipe and Video\",\"id\":56352,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=30f32816-997a-4f11-87db-cd9992ffca83\",\"contentProviderId\":\"451\"},{\"title\":\"Scrambled Egg Muffins\",\"reviewCount\":269,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/2414284.jpg\",\"description\":\"These scrambled egg muffins with plenty of sausage and Cheddar cheese make a filling and fun treat at your next brunch. They\\u0027re pretty, hearty, and fun to serve.\",\"videoId\":5198,\"stars\":{\"rating\":4.5999999046325684,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":7669398,\"displayName\":\"aveal\",\"thumbnail\":\"\",\"followersCount\":10,\"favoriteCount\":23,\"madeRecipesCount\":1,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674139.jpg\",\"profileUrl\":\"/cook/7669398/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/222586/scrambled-egg-muffins/\",\"videoDetailUrl\":\"/video/5198/scrambled-egg-muffins/\",\"altText\":\"Scrambled Egg Muffins Recipe and Video - These scrambled egg muffins with plenty of sausage and Cheddar cheese make a filling and fun treat at your next brunch. They\\u0027re pretty, hearty, and fun to serve.\",\"titleText\":\"Scrambled Egg Muffins Recipe and Video\",\"id\":222586,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Baked Paprika-Parmesan Chicken\",\"reviewCount\":484,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/1039526.jpg\",\"description\":\"Chicken is breaded with a Parmesan-paprika coating and baked until golden and crispy in this easy recipe. This unique combination of ingredients makes a fantastic dish that all your family will love.\",\"videoId\":9177,\"stars\":{\"rating\":4.4600000381469727,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":1008751,\"displayName\":\"Renae\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/6446214.jpg\",\"followersCount\":24,\"favoriteCount\":259,\"madeRecipesCount\":136,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/6446214.jpg\",\"profileUrl\":\"/cook/1008751/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/102235/baked-paprika-parmesan-chicken/\",\"videoDetailUrl\":\"/video/9177/baked-paprika-parmesan-chicken/\",\"altText\":\"Baked Paprika-Parmesan Chicken Recipe and Video - Chicken is breaded with a Parmesan-paprika coating and baked until golden and crispy in this easy recipe. This unique combination of ingredients makes a fantastic dish that all your family will love.\",\"titleText\":\"Baked Paprika-Parmesan Chicken Recipe and Video\",\"id\":102235,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Roasted Brussels Sprouts\",\"reviewCount\":2934,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/812447.jpg\",\"description\":\"Brussels sprouts are simply seasoned with salt, pepper, and olive oil, then slow-roasted in a very hot oven until darkest brown. They are the perfect combination of sweet and salty, and make for perfect snack leftovers straight from the fridge the next day!\",\"videoId\":1324,\"stars\":{\"rating\":4.5900001525878906,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":788781,\"displayName\":\"JAQATAC\",\"thumbnail\":\"\",\"followersCount\":32,\"favoriteCount\":1,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674168.jpg\",\"profileUrl\":\"/cook/788781/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/67952/roasted-brussels-sprouts/\",\"videoDetailUrl\":\"/video/1324/roasted-brussels-sprouts/\",\"altText\":\"Roasted Brussels Sprouts Recipe and Video - Brussels sprouts are simply seasoned with salt, pepper, and olive oil, then slow-roasted in a very hot oven until darkest brown. They are the perfect combination of sweet and salty, and make for perfect snack leftovers straight from the fridge the next day!\",\"titleText\":\"Roasted Brussels Sprouts Recipe and Video\",\"id\":67952,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=709c5835-789f-47fd-93fc-c734dd525e8a\",\"contentProviderId\":\"451\"},{\"title\":\"Absolutely Ultimate Potato Soup\",\"reviewCount\":2044,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/168555.jpg\",\"description\":\"This easy potato soup is made with chicken stock, bacon, onion, celery, and herbs, then finished with a little cream for extra richness.\",\"videoId\":4060,\"stars\":{\"rating\":4.6700000762939453,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":24324916,\"displayName\":\"Karena\",\"thumbnail\":\"\",\"followersCount\":13,\"handle\":\"[email protected]\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674167.jpg\",\"profileUrl\":\"/cook/[email protected]/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/13218/absolutely-ultimate-potato-soup/\",\"videoDetailUrl\":\"/video/4060/absolutely-ultimate-potato-soup/\",\"altText\":\"Absolutely Ultimate Potato Soup Recipe and Video - This easy potato soup is made with chicken stock, bacon, onion, celery, and herbs, then finished with a little cream for extra richness.\",\"titleText\":\"Absolutely Ultimate Potato Soup Recipe and Video\",\"id\":13218,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Cheddar Bay Biscuits\",\"reviewCount\":1308,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/4536295.jpg\",\"description\":\"Buttermilk baking mix (e.g. Bisquick) does the trick in these savory biscuits, with the addition of Cheddar cheese, garlic, parsley, onion and water.\",\"videoId\":4859,\"stars\":{\"rating\":4.5799999237060547,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":24322343,\"displayName\":\"Cookie\",\"thumbnail\":\"\",\"followersCount\":1,\"handle\":\"Cookie»Cookie\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674136.jpg\",\"profileUrl\":\"/cook/Cookie»Cookie/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/7146/cheddar-bay-biscuits/\",\"videoDetailUrl\":\"/video/4859/cheddar-bay-biscuits/\",\"altText\":\"Cheddar Bay Biscuits Recipe and Video - Buttermilk baking mix (e.g. Bisquick) does the trick in these savory biscuits, with the addition of Cheddar cheese, garlic, parsley, onion and water.\",\"titleText\":\"Cheddar Bay Biscuits Recipe and Video\",\"id\":7146,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Cabbage Jambalaya\",\"reviewCount\":316,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/900365.jpg\",\"description\":\"An easy recipe that will have the kids eating cabbage like it\\u0027s going out of style! Ground beef and sausage are simmered together with tomatoes, onion, celery, cabbage and rice.\",\"videoId\":7147,\"stars\":{\"rating\":4.429999828338623,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":24328590,\"displayName\":\"Susan\",\"thumbnail\":\"\",\"followersCount\":2,\"handle\":\"[email protected]\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674146.jpg\",\"profileUrl\":\"/cook/[email protected]/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/36110/cabbage-jambalaya/\",\"videoDetailUrl\":\"/video/7147/cabbage-jambalaya/\",\"altText\":\"Cabbage Jambalaya Recipe and Video - An easy recipe that will have the kids eating cabbage like it\\u0027s going out of style! Ground beef and sausage are simmered together with tomatoes, onion, celery, cabbage and rice.\",\"titleText\":\"Cabbage Jambalaya Recipe and Video\",\"id\":36110,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Baked Teriyaki Chicken\",\"reviewCount\":5263,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/4530047.jpg\",\"description\":\"A spicy, homemade teriyaki of soy sauce, cider vinegar, ginger and garlic enlivens chicken thighs or pieces. Easy to double for a large group.\",\"videoId\":1070,\"stars\":{\"rating\":4.6500000953674316,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":-1,\"displayName\":\"Marian Collins\",\"thumbnail\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5614242.jpg\",\"profileUrl\":\"\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/9023/baked-teriyaki-chicken/\",\"videoDetailUrl\":\"/video/1070/baked-teriyaki-chicken/\",\"altText\":\"Baked Teriyaki Chicken Recipe and Video - A spicy, homemade teriyaki of soy sauce, cider vinegar, ginger and garlic enlivens chicken thighs or pieces. Easy to double for a large group.\",\"titleText\":\"Baked Teriyaki Chicken Recipe and Video\",\"id\":9023,\"analyticsType\":\"popular\",\"sourceId\":254,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3D\\u0026c=21e146e9-590f-45ae-b7bc-070094c9b874\",\"contentProviderId\":\"0\"},{\"title\":\"Chicken Pot Pie IX\",\"reviewCount\":8687,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/4535759.jpg\",\"description\":\"A delicious chicken pot pie made from scratch with carrots, peas, and celery for a comfort food classic.\",\"videoId\":2567,\"stars\":{\"rating\":4.809999942779541,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":257438,\"displayName\":\"Robbie Rice\",\"thumbnail\":\"\",\"followersCount\":474,\"favoriteCount\":112,\"madeRecipesCount\":76,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674160.jpg\",\"profileUrl\":\"/cook/257438/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/26317/chicken-pot-pie-ix/\",\"videoDetailUrl\":\"/video/2567/chicken-pot-pie-ix/\",\"altText\":\"Chicken Pot Pie IX Recipe and Video - A delicious chicken pot pie made from scratch with carrots, peas, and celery for a comfort food classic.\",\"titleText\":\"Chicken Pot Pie IX Recipe and Video\",\"id\":26317,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=39f50b02-e492-413c-96f8-7882cc4312e9\",\"contentProviderId\":\"451\"},{\"title\":\"Vietnamese Spring Roll Pizza\",\"reviewCount\":5,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/6979709.jpg\",\"description\":\"Cauliflower crust serves as the base for this better-for-you pizza showcasing all of the fresh flavors of a Vietnamese spring roll with chicken, broccoli slaw, and herbs.\",\"stars\":{\"rating\":4.75,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":3991009,\"displayName\":\"SunnyDaysNora\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/5451024.jpg\",\"followersCount\":420,\"favoriteCount\":1836,\"madeRecipesCount\":1030,\"handle\":\"greengirl24\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5451024.jpg\",\"profileUrl\":\"/cook/greengirl24/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/275384/vietnamese-spring-roll-pizza/\",\"videoDetailUrl\":\"\",\"altText\":\"Vietnamese Spring Roll Pizza Recipe - Cauliflower crust serves as the base for this better-for-you pizza showcasing all of the fresh flavors of a Vietnamese spring roll with chicken, broccoli slaw, and herbs.\",\"titleText\":\"Vietnamese Spring Roll Pizza Recipe\",\"id\":275384,\"analyticsType\":\"popular\",\"sourceId\":582,\"contentProviderId\":\"0\"},{\"title\":\"Easy Alice Springs Chicken\",\"reviewCount\":20,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/4457593.jpg\",\"description\":\"In this quick and easy Alice Springs copycat recipe, chicken breasts are wrapped with hickory-smoked bacon, baked, and covered with cheese.\",\"stars\":{\"rating\":3.2899999618530273,\"starsCssClasses\":\"stars stars-3-5\"},\"cook\":{\"id\":2244087,\"displayName\":\"Lisa the Quik Learner\",\"thumbnail\":\"https://images.media-allrecipes.com/global/features/mini/54.jpg\",\"favoriteCount\":32,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/global/features/mini/54.jpg\",\"profileUrl\":\"/cook/2244087/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/254326/easy-alice-springs-chicken/\",\"videoDetailUrl\":\"\",\"altText\":\"Easy Alice Springs Chicken Recipe - In this quick and easy Alice Springs copycat recipe, chicken breasts are wrapped with hickory-smoked bacon, baked, and covered with cheese.\",\"titleText\":\"Easy Alice Springs Chicken Recipe\",\"id\":254326,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Italian Sausage Soup with Tortellini\",\"reviewCount\":1847,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/7167485.jpg\",\"description\":\"Italian sausage, garlic, tomatoes, red wine, and tortellini - this soup combines favorite ingredients from an Italian kitchen. You can use sweet or hot sausage, depending on your tastes, and fresh herbs if you have them on hand.\",\"videoId\":3015,\"stars\":{\"rating\":4.8299999237060547,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":24326438,\"displayName\":\"Mary P\",\"thumbnail\":\"\",\"followersCount\":4,\"handle\":\"Mary P»Mary P\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5614252.jpg\",\"profileUrl\":\"/cook/Mary P»Mary P/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/13436/italian-sausage-soup-with-tortellini/\",\"videoDetailUrl\":\"/video/3015/italian-sausage-soup-with-tortellini/\",\"altText\":\"Italian Sausage Soup with Tortellini Recipe and Video - Italian sausage, garlic, tomatoes, red wine, and tortellini - this soup combines favorite ingredients from an Italian kitchen. You can use sweet or hot sausage, depending on your tastes, and fresh herbs if you have them on hand.\",\"titleText\":\"Italian Sausage Soup with Tortellini Recipe and Video\",\"id\":13436,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=ad371031-f1bd-4556-b7ac-820fb6029e5f\",\"contentProviderId\":\"451\"},{\"title\":\"Perfect Crab-Stuffed Mushrooms\",\"reviewCount\":113,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/5939577.jpg\",\"description\":\"Button mushrooms stuffed with crab and Monterey Jack cheese make a delicious bite-size appetizer. Your guests are sure to be dazzled!\",\"stars\":{\"rating\":4.53000020980835,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":45493,\"displayName\":\"Lisa Felton Nash\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/2010072.jpg\",\"followersCount\":16,\"favoriteCount\":87,\"madeRecipesCount\":8,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/2010072.jpg\",\"profileUrl\":\"/cook/45493/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/229826/perfect-crab-stuffed-mushrooms/\",\"videoDetailUrl\":\"\",\"altText\":\"Perfect Crab-Stuffed Mushrooms Recipe - Button mushrooms stuffed with crab and Monterey Jack cheese make a delicious bite-size appetizer. Your guests are sure to be dazzled!\",\"titleText\":\"Perfect Crab-Stuffed Mushrooms Recipe\",\"id\":229826,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Easy Cabbage Roll Casserole\",\"reviewCount\":103,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/3350217.jpg\",\"description\":\"This time-saving casserole has all the flavors of the classic cabbage roll, but without all the work.\",\"videoId\":8787,\"stars\":{\"rating\":4.4800000190734863,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":48400,\"displayName\":\"Tracy\",\"thumbnail\":\"\",\"followersCount\":1,\"favoriteCount\":215,\"madeRecipesCount\":5,\"handle\":\"tracy1\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674162.jpg\",\"profileUrl\":\"/cook/tracy1/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/231094/easy-cabbage-roll-casserole/\",\"videoDetailUrl\":\"/video/8787/easy-cabbage-roll-casserole/\",\"altText\":\"Easy Cabbage Roll Casserole Recipe and Video - This time-saving casserole has all the flavors of the classic cabbage roll, but without all the work.\",\"titleText\":\"Easy Cabbage Roll Casserole Recipe and Video\",\"id\":231094,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Hamburger Steak with Onions and Gravy\",\"reviewCount\":2738,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/5938981.jpg\",\"description\":\"An easy-to-make classic featuring tasty hamburger \\u0027steaks\\u0027 smothered in gravy and onions. It\\u0027s a great way to dress up a pound of ground beef, and you probably have all the ingredients on hand!\",\"videoId\":4266,\"stars\":{\"rating\":4.5999999046325684,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":1240035,\"displayName\":\"Anne Marie Sweden\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/407233.jpg\",\"followersCount\":64,\"favoriteCount\":82,\"madeRecipesCount\":23,\"handle\":\"annemariesweden\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/407233.jpg\",\"profileUrl\":\"/cook/annemariesweden/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/78370/hamburger-steak-with-onions-and-gravy/\",\"videoDetailUrl\":\"/video/4266/hamburger-steak-with-onions-and-gravy/\",\"altText\":\"Hamburger Steak with Onions and Gravy Recipe and Video - An easy-to-make classic featuring tasty hamburger \\u0027steaks\\u0027 smothered in gravy and onions. It\\u0027s a great way to dress up a pound of ground beef, and you probably have all the ingredients on hand!\",\"titleText\":\"Hamburger Steak with Onions and Gravy Recipe and Video\",\"id\":78370,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Banana Cake VI\",\"reviewCount\":2206,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/848894.jpg\",\"description\":\"This cake was first made for me by a friend while I was visiting her after she had delivered her 11th child. I told her, \\u0027I should have baked for you!\\u0027\",\"videoId\":4352,\"stars\":{\"rating\":4.7600002288818359,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":3340,\"displayName\":\"Cindy Carnes\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/68607.jpg\",\"followersCount\":196,\"favoriteCount\":143,\"madeRecipesCount\":60,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/68607.jpg\",\"profileUrl\":\"/cook/3340/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/8333/banana-cake-vi/\",\"videoDetailUrl\":\"/video/4352/banana-cake-vi/\",\"altText\":\"Banana Cake VI Recipe and Video - This cake was first made for me by a friend while I was visiting her after she had delivered her 11th child. I told her, \\u0027I should have baked for you!\\u0027\",\"titleText\":\"Banana Cake VI Recipe and Video\",\"id\":8333,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=57abd377-4fca-4dc4-9233-7bb416c2391a\",\"contentProviderId\":\"451\"},{\"title\":\"Boilermaker Tailgate Chili\",\"reviewCount\":5217,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/890638.jpg\",\"description\":\"Ground beef, Italian sausage, beans, and a tomato base come together with lots of flavor and spice in this popular chili recipe. It\\u0027s perfect for tailgating before football games or any time of year.\",\"videoId\":1230,\"stars\":{\"rating\":4.820000171661377,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":591623,\"displayName\":\"MIGHTYPURDUE22\",\"thumbnail\":\"\",\"followersCount\":50,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674136.jpg\",\"profileUrl\":\"/cook/591623/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/78299/boilermaker-tailgate-chili/\",\"videoDetailUrl\":\"/video/1230/boilermaker-tailgate-chili/\",\"altText\":\"Boilermaker Tailgate Chili Recipe and Video - Ground beef, Italian sausage, beans, and a tomato base come together with lots of flavor and spice in this popular chili recipe. It\\u0027s perfect for tailgating before football games or any time of year.\",\"titleText\":\"Boilermaker Tailgate Chili Recipe and Video\",\"id\":78299,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=aacce39b-241a-4341-ac67-2531dc96da4a\",\"contentProviderId\":\"451\"},{\"title\":\"Foolproof Rib Roast\",\"reviewCount\":1561,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/792951.jpg\",\"description\":\"I was looking for an easy way to make our Christmas Rib Roast. It turned out PERFECT. Rib Roast can be expensive, so this is a total splurge or special occasion dish. Enjoy.\",\"videoId\":1193,\"stars\":{\"rating\":4.75,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":775611,\"displayName\":\"SALSIEPIE\",\"thumbnail\":\"\",\"followersCount\":18,\"favoriteCount\":108,\"madeRecipesCount\":29,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674153.jpg\",\"profileUrl\":\"/cook/775611/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/97621/foolproof-rib-roast/\",\"videoDetailUrl\":\"/video/1193/foolproof-rib-roast/\",\"altText\":\"Foolproof Rib Roast Recipe and Video - I was looking for an easy way to make our Christmas Rib Roast. It turned out PERFECT. Rib Roast can be expensive, so this is a total splurge or special occasion dish. Enjoy.\",\"titleText\":\"Foolproof Rib Roast Recipe and Video\",\"id\":97621,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Best Brownies\",\"reviewCount\":9396,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/3850414.jpg\",\"description\":\"Cakey on the outside and fudgy in the middle, this easy brownie recipe really is the best! Done in an hour.\",\"videoId\":617,\"stars\":{\"rating\":4.5399999618530273,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":24321025,\"displayName\":\"Angie\",\"thumbnail\":\"\",\"followersCount\":14,\"handle\":\"[email protected]\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5614246.jpg\",\"profileUrl\":\"/cook/[email protected]/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/10549/best-brownies/\",\"videoDetailUrl\":\"/video/617/best-brownies/\",\"altText\":\"Best Brownies Recipe and Video - Cakey on the outside and fudgy in the middle, this easy brownie recipe really is the best! Done in an hour.\",\"titleText\":\"Best Brownies Recipe and Video\",\"id\":10549,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=f9800c9d-2ec5-4824-bf16-8e1b052a42b4\",\"contentProviderId\":\"451\"},{\"title\":\"Chicken Marsala\",\"reviewCount\":3890,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/5107151.jpg\",\"description\":\"Herbed chicken in a sweet Marsala and mushroom sauce -- sounds simple, and it is -- simply delicious.\",\"videoId\":913,\"stars\":{\"rating\":4.5399999618530273,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":-1,\"displayName\":\"Lisa\",\"thumbnail\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5614242.jpg\",\"profileUrl\":\"\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/8887/chicken-marsala/\",\"videoDetailUrl\":\"/video/913/chicken-marsala/\",\"altText\":\"Chicken Marsala Recipe and Video - Herbed chicken in a sweet Marsala and mushroom sauce -- sounds simple, and it is -- simply delicious.\",\"titleText\":\"Chicken Marsala Recipe and Video\",\"id\":8887,\"analyticsType\":\"popular\",\"sourceId\":254,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3D\\u0026c=9fd6caab-9439-4531-b5ed-7bebcac5b045\",\"contentProviderId\":\"0\"},{\"title\":\"Grilled Bacon-Wrapped Chicken Tenders\",\"reviewCount\":8,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/6819226.jpg\",\"description\":\"These grilled bacon-wrapped chicken tenders deliver the perfect combination of smoky and sweet flavors. Use center-cut bacon since it is leaner and less likely to cause flare-ups.\",\"stars\":{\"rating\":4.820000171661377,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":895249,\"displayName\":\"France C\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/6886266.jpg\",\"followersCount\":749,\"favoriteCount\":2073,\"madeRecipesCount\":511,\"handle\":\"francecevallos\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/6886266.jpg\",\"profileUrl\":\"/cook/francecevallos/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/274735/grilled-bacon-wrapped-chicken-tenders/\",\"videoDetailUrl\":\"\",\"altText\":\"Grilled Bacon-Wrapped Chicken Tenders Recipe - These grilled bacon-wrapped chicken tenders deliver the perfect combination of smoky and sweet flavors. Use center-cut bacon since it is leaner and less likely to cause flare-ups.\",\"titleText\":\"Grilled Bacon-Wrapped Chicken Tenders Recipe\",\"id\":274735,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Creamy Chicken and Wild Rice Soup\",\"reviewCount\":2094,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/7335554.jpg\",\"description\":\"Instant wild rice is cooked in chicken broth with shredded chicken, then combined with thickened cream for a quick soup.\",\"videoId\":1149,\"stars\":{\"rating\":4.7699999809265137,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":24328413,\"displayName\":\"Stephanie G\",\"thumbnail\":\"\",\"followersCount\":4,\"handle\":\"[email protected]\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5614250.jpg\",\"profileUrl\":\"/cook/[email protected]/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/23852/creamy-chicken-and-wild-rice-soup/\",\"videoDetailUrl\":\"/video/1149/creamy-chicken-and-wild-rice-soup/\",\"altText\":\"Creamy Chicken and Wild Rice Soup Recipe and Video - Instant wild rice is cooked in chicken broth with shredded chicken, then combined with thickened cream for a quick soup.\",\"titleText\":\"Creamy Chicken and Wild Rice Soup Recipe and Video\",\"id\":23852,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Creamy Au Gratin Potatoes\",\"reviewCount\":4088,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/738814.jpg\",\"description\":\"Thinly sliced potatoes and onion are layered in a creamy cheese sauce creating the perfect au gratin potato recipe.\",\"videoId\":671,\"stars\":{\"rating\":4.5399999618530273,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":5910,\"displayName\":\"CathyM\",\"thumbnail\":\"\",\"followersCount\":38,\"madeRecipesCount\":2,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674156.jpg\",\"profileUrl\":\"/cook/5910/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/15925/creamy-au-gratin-potatoes/\",\"videoDetailUrl\":\"/video/671/creamy-au-gratin-potatoes/\",\"altText\":\"Creamy Au Gratin Potatoes Recipe and Video - Thinly sliced potatoes and onion are layered in a creamy cheese sauce creating the perfect au gratin potato recipe.\",\"titleText\":\"Creamy Au Gratin Potatoes Recipe and Video\",\"id\":15925,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=a6929a75-7be3-4cca-8443-926be9e78374\",\"contentProviderId\":\"451\"},{\"title\":\"Christmas Prime Rib\",\"reviewCount\":145,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/1875698.jpg\",\"description\":\"This boneless prime rib roast spends a night in the refrigerator to dry before being rubbed with horseradish and mustard, sprinkled with seasonings, and roasted to perfection. Serve it with the thin, savory pan gravy called au jus.\",\"videoId\":7959,\"stars\":{\"rating\":4.8400001525878906,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":626033,\"displayName\":\"JUDY2RIVER\",\"thumbnail\":\"\",\"followersCount\":10,\"favoriteCount\":335,\"madeRecipesCount\":40,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5614250.jpg\",\"profileUrl\":\"/cook/626033/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/219587/christmas-prime-rib/\",\"videoDetailUrl\":\"/video/7959/christmas-prime-rib/\",\"altText\":\"Christmas Prime Rib Recipe and Video - This boneless prime rib roast spends a night in the refrigerator to dry before being rubbed with horseradish and mustard, sprinkled with seasonings, and roasted to perfection. Serve it with the thin, savory pan gravy called au jus.\",\"titleText\":\"Christmas Prime Rib Recipe and Video\",\"id\":219587,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Fluffy Pancakes\",\"reviewCount\":11258,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/5079227.jpg\",\"description\":\"Tall, fluffy pancakes make the best breakfast, especially when there\\u0027s plenty of butter and syrup. Make it extra special with berries and whipped cream!\",\"videoId\":888,\"stars\":{\"rating\":4.8299999237060547,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":2846149,\"displayName\":\"kris\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/266396.jpg\",\"followersCount\":285,\"favoriteCount\":211,\"madeRecipesCount\":66,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/266396.jpg\",\"profileUrl\":\"/cook/2846149/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/162760/fluffy-pancakes/\",\"videoDetailUrl\":\"/video/888/fluffy-pancakes/\",\"altText\":\"Fluffy Pancakes Recipe and Video - Tall, fluffy pancakes make the best breakfast, especially when there\\u0027s plenty of butter and syrup. Make it extra special with berries and whipped cream!\",\"titleText\":\"Fluffy Pancakes Recipe and Video\",\"id\":162760,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=c74453e2-0e23-4351-b08a-802f0ac1b120\",\"contentProviderId\":\"451\"},{\"title\":\"Chicken Cordon Bleu II\",\"reviewCount\":4687,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/4535588.jpg\",\"description\":\"This is a standard recipe for Cordon Bleu, featuring stuffed chicken swimming in a creamy wine sauce.\",\"videoId\":954,\"stars\":{\"rating\":4.78000020980835,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":61092,\"displayName\":\"Behr\",\"thumbnail\":\"\",\"followersCount\":49,\"favoriteCount\":19,\"madeRecipesCount\":3,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674167.jpg\",\"profileUrl\":\"/cook/61092/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/8669/chicken-cordon-bleu-ii/\",\"videoDetailUrl\":\"/video/954/chicken-cordon-bleu-ii/\",\"altText\":\"Chicken Cordon Bleu II Recipe and Video - This is a standard recipe for Cordon Bleu, featuring stuffed chicken swimming in a creamy wine sauce.\",\"titleText\":\"Chicken Cordon Bleu II Recipe and Video\",\"id\":8669,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Super-Delicious Zuppa Toscana\",\"reviewCount\":2192,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/1115547.jpg\",\"description\":\"If you love the Zuppa Toscana at your local chain Italian restaurant, you will adore this soup. The rich soup is made with Italian sausage, potatoes, cream, and crushed red pepper.\",\"videoId\":4305,\"stars\":{\"rating\":4.820000171661377,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":2394219,\"displayName\":\"souporsweets\",\"thumbnail\":\"\",\"followersCount\":22,\"madeRecipesCount\":1,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674157.jpg\",\"profileUrl\":\"/cook/2394219/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/143069/super-delicious-zuppa-toscana/\",\"videoDetailUrl\":\"/video/4305/super-delicious-zuppa-toscana/\",\"altText\":\"Super-Delicious Zuppa Toscana Recipe and Video - If you love the Zuppa Toscana at your local chain Italian restaurant, you will adore this soup. The rich soup is made with Italian sausage, potatoes, cream, and crushed red pepper.\",\"titleText\":\"Super-Delicious Zuppa Toscana Recipe and Video\",\"id\":143069,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Quick and Easy Chicken Noodle Soup\",\"reviewCount\":2274,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/4545057.jpg\",\"description\":\"Egg noodles, carrots, celery, and chicken are simmered in broth seasoned with basil and oregano. Chicken noodle soup in 30 minutes!\",\"videoId\":793,\"stars\":{\"rating\":4.71999979019165,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":152017,\"displayName\":\"MARYVM\",\"thumbnail\":\"\",\"followersCount\":27,\"favoriteCount\":10,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674138.jpg\",\"profileUrl\":\"/cook/152017/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/26460/quick-and-easy-chicken-noodle-soup/\",\"videoDetailUrl\":\"/video/793/quick-and-easy-chicken-noodle-soup/\",\"altText\":\"Quick and Easy Chicken Noodle Soup Recipe and Video - Egg noodles, carrots, celery, and chicken are simmered in broth seasoned with basil and oregano. Chicken noodle soup in 30 minutes!\",\"titleText\":\"Quick and Easy Chicken Noodle Soup Recipe and Video\",\"id\":26460,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=7d090a3b-f6c9-4a64-8d38-0aabca83c3d4\",\"contentProviderId\":\"451\"},{\"title\":\"Instant Pot(R) Vegan Cabbage Detox Soup\",\"reviewCount\":159,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/4612405.jpg\",\"description\":\"This simple vegan cabbage soup is perfect for a detox diet. It\\u0027s a tasty no-fuss recipe that takes 30 minutes to make in your Instant Pot®.\",\"videoId\":8870,\"stars\":{\"rating\":4.5500001907348633,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":22212574,\"displayName\":\"Fioa\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/4540626.jpg\",\"followersCount\":110,\"favoriteCount\":7,\"madeRecipesCount\":18,\"handle\":\"fiorellasrecipes\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/4540626.jpg\",\"profileUrl\":\"/cook/fiorellasrecipes/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/261823/instant-pot-vegan-cabbage-detox-soup/\",\"videoDetailUrl\":\"/video/8870/instant-pot-vegan-cabbage-detox-soup/\",\"altText\":\"Instant Pot(R) Vegan Cabbage Detox Soup Recipe and Video - This simple vegan cabbage soup is perfect for a detox diet. It\\u0027s a tasty no-fuss recipe that takes 30 minutes to make in your Instant Pot®.\",\"titleText\":\"Instant Pot(R) Vegan Cabbage Detox Soup Recipe and Video\",\"id\":261823,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Buffalo Chicken Dip\",\"reviewCount\":3580,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/3095600.jpg\",\"description\":\"Five simple ingredients in your slow cooker make this creamy, cheesy, zesty hot dip that tastes just like Buffalo chicken wings.\",\"videoId\":679,\"stars\":{\"rating\":4.7100000381469727,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":613040,\"displayName\":\"NUNPUNCH\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/7971.jpg\",\"followersCount\":127,\"favoriteCount\":44,\"madeRecipesCount\":16,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/7971.jpg\",\"profileUrl\":\"/cook/613040/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/68461/buffalo-chicken-dip/\",\"videoDetailUrl\":\"/video/679/buffalo-chicken-dip/\",\"altText\":\"Buffalo Chicken Dip Recipe and Video - Five simple ingredients in your slow cooker make this creamy, cheesy, zesty hot dip that tastes just like Buffalo chicken wings.\",\"titleText\":\"Buffalo Chicken Dip Recipe and Video\",\"id\":68461,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"The Best Thai Coconut Soup\",\"reviewCount\":1064,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/2319909.jpg\",\"description\":\"This recipe uses a lot of ingredients common in Thai cooking to make a delicious and spicy soup featuring shrimp and shiitake mushrooms in a coconut milk flavored broth.\",\"videoId\":4893,\"stars\":{\"rating\":4.6100001335144043,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":1317560,\"displayName\":\"Jessica\",\"thumbnail\":\"\",\"followersCount\":46,\"favoriteCount\":107,\"madeRecipesCount\":35,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674158.jpg\",\"profileUrl\":\"/cook/1317560/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/146035/the-best-thai-coconut-soup/\",\"videoDetailUrl\":\"/video/4893/the-best-thai-coconut-soup/\",\"altText\":\"The Best Thai Coconut Soup Recipe and Video - This recipe uses a lot of ingredients common in Thai cooking to make a delicious and spicy soup featuring shrimp and shiitake mushrooms in a coconut milk flavored broth.\",\"titleText\":\"The Best Thai Coconut Soup Recipe and Video\",\"id\":146035,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Keto Smothered Chicken Thighs\",\"reviewCount\":25,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/6483083.jpg\",\"description\":\"Topped with bacon, mushrooms, green onions, and a creamy sauce, these chicken thighs are sure to become a favorite on your keto menu.\",\"stars\":{\"rating\":4.7899999618530273,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":895249,\"displayName\":\"France C\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/6886266.jpg\",\"followersCount\":749,\"favoriteCount\":2073,\"madeRecipesCount\":511,\"handle\":\"francecevallos\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/6886266.jpg\",\"profileUrl\":\"/cook/francecevallos/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/272437/keto-smothered-chicken-thighs/\",\"videoDetailUrl\":\"\",\"altText\":\"Keto Smothered Chicken Thighs Recipe - Topped with bacon, mushrooms, green onions, and a creamy sauce, these chicken thighs are sure to become a favorite on your keto menu.\",\"titleText\":\"Keto Smothered Chicken Thighs Recipe\",\"id\":272437,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Sweet and Sour Meatballs III\",\"reviewCount\":128,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/812267.jpg\",\"description\":\"Sauerkraut, cranberry sauce and spaghetti sauce bring their unique flavors to this rich sweet and sour sauce for meatballs.\",\"stars\":{\"rating\":4.5500001907348633,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":288145,\"displayName\":\"JOYJENSEN\",\"thumbnail\":\"\",\"followersCount\":2,\"favoriteCount\":194,\"madeRecipesCount\":25,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5614262.jpg\",\"profileUrl\":\"/cook/288145/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/25527/sweet-and-sour-meatballs-iii/\",\"videoDetailUrl\":\"\",\"altText\":\"Sweet and Sour Meatballs III Recipe - Sauerkraut, cranberry sauce and spaghetti sauce bring their unique flavors to this rich sweet and sour sauce for meatballs.\",\"titleText\":\"Sweet and Sour Meatballs III Recipe\",\"id\":25527,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Mongolian Beef and Spring Onions\",\"reviewCount\":679,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/6947892.jpg\",\"description\":\"Thin-sliced beef flank steak gets a quick fry in hot oil, then is simmered in a sweet soy-based sauce with fresh green onions for a dish that\\u0027s like eating out at home.\",\"videoId\":9210,\"stars\":{\"rating\":4.5999999046325684,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":3038453,\"displayName\":\"vkarlson\",\"thumbnail\":\"https://images.media-allrecipes.com/global/features/mini/3057.jpg\",\"followersCount\":15,\"favoriteCount\":50,\"madeRecipesCount\":21,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/global/features/mini/3057.jpg\",\"profileUrl\":\"/cook/3038453/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/201849/mongolian-beef-and-spring-onions/\",\"videoDetailUrl\":\"/video/9210/mongolian-beef-and-spring-onions/\",\"altText\":\"Mongolian Beef and Spring Onions Recipe and Video - Thin-sliced beef flank steak gets a quick fry in hot oil, then is simmered in a sweet soy-based sauce with fresh green onions for a dish that\\u0027s like eating out at home.\",\"titleText\":\"Mongolian Beef and Spring Onions Recipe and Video\",\"id\":201849,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Taco Seasoning I\",\"reviewCount\":4915,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/4538211.jpg\",\"description\":\"Chili powder, cumin, paprika, and a few other easy-to-find spices make up this taco mix recipe. Cheaper than packaged versions!\",\"videoId\":622,\"stars\":{\"rating\":4.820000171661377,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":346676,\"displayName\":\"BILL ECHOLS\",\"thumbnail\":\"\",\"followersCount\":40,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674167.jpg\",\"profileUrl\":\"/cook/346676/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/46653/taco-seasoning-i/\",\"videoDetailUrl\":\"/video/622/taco-seasoning-i/\",\"altText\":\"Taco Seasoning I Recipe and Video - Chili powder, cumin, paprika, and a few other easy-to-find spices make up this taco mix recipe. Cheaper than packaged versions!\",\"titleText\":\"Taco Seasoning I Recipe and Video\",\"id\":46653,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Instant Pot(R) Lasagna Soup\",\"reviewCount\":24,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/6287657.jpg\",\"description\":\"This soup has all of the flavors of lasagna without the hours in the kitchen. Thanks to the Instant Pot(R), it is made with minimal effort and fairly quickly.\",\"stars\":{\"rating\":4.7600002288818359,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":9292325,\"displayName\":\"Bren\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/3216287.jpg\",\"followersCount\":214,\"favoriteCount\":893,\"madeRecipesCount\":550,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/3216287.jpg\",\"profileUrl\":\"/cook/9292325/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/269946/instant-pot-lasagna-soup/\",\"videoDetailUrl\":\"\",\"altText\":\"Instant Pot(R) Lasagna Soup Recipe - This soup has all of the flavors of lasagna without the hours in the kitchen. Thanks to the Instant Pot(R), it is made with minimal effort and fairly quickly.\",\"titleText\":\"Instant Pot(R) Lasagna Soup Recipe\",\"id\":269946,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"To Die For Blueberry Muffins\",\"reviewCount\":10148,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/662790.jpg\",\"description\":\"Extra big blueberry muffins are topped with a sugary-cinnamon crumb mixture in this souped-up blueberry muffin recipe.\",\"videoId\":2654,\"stars\":{\"rating\":4.6399998664855957,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":24322317,\"displayName\":\"Colleen\",\"thumbnail\":\"\",\"followersCount\":11,\"handle\":\"[email protected]\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674168.jpg\",\"profileUrl\":\"/cook/[email protected]/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/6865/to-die-for-blueberry-muffins/\",\"videoDetailUrl\":\"/video/2654/to-die-for-blueberry-muffins/\",\"altText\":\"To Die For Blueberry Muffins Recipe and Video - Extra big blueberry muffins are topped with a sugary-cinnamon crumb mixture in this souped-up blueberry muffin recipe.\",\"titleText\":\"To Die For Blueberry Muffins Recipe and Video\",\"id\":6865,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=75d248af-999b-4550-9d73-268010039a96\",\"contentProviderId\":\"451\"},{\"title\":\"Alfredo Sauce\",\"reviewCount\":3127,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/672690.jpg\",\"description\":\"This creamy alfredo sauce turns a busy weeknight dinner into something special. Serve it with fettuccine or pour it over chicken breasts or steamed vegetables.\",\"videoId\":3808,\"stars\":{\"rating\":4.5500001907348633,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":24327556,\"displayName\":\"Rebecca Swift\",\"thumbnail\":\"\",\"followersCount\":5,\"handle\":\"[email protected]\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674154.jpg\",\"profileUrl\":\"/cook/[email protected]/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/22831/alfredo-sauce/\",\"videoDetailUrl\":\"/video/3808/alfredo-sauce/\",\"altText\":\"Alfredo Sauce Recipe and Video - This creamy alfredo sauce turns a busy weeknight dinner into something special. Serve it with fettuccine or pour it over chicken breasts or steamed vegetables.\",\"titleText\":\"Alfredo Sauce Recipe and Video\",\"id\":22831,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=76e98f46-8773-464f-8b4f-411cdc587e34\",\"contentProviderId\":\"451\"},{\"title\":\"Mom Moak\\u0027s Chicken Noodle Soup\",\"reviewCount\":30,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/996489.jpg\",\"description\":\"This chicken noodle soup recipe saves time with the use of pre-cooked or leftover chicken, and gets its creaminess from evaporated milk.\",\"stars\":{\"rating\":4.6399998664855957,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":8172875,\"displayName\":\"carolann\",\"thumbnail\":\"\",\"followersCount\":3,\"favoriteCount\":72,\"madeRecipesCount\":1,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674157.jpg\",\"profileUrl\":\"/cook/8172875/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/219773/mom-moaks-chicken-noodle-soup/\",\"videoDetailUrl\":\"\",\"altText\":\"Mom Moak\\u0027s Chicken Noodle Soup Recipe - This chicken noodle soup recipe saves time with the use of pre-cooked or leftover chicken, and gets its creaminess from evaporated milk.\",\"titleText\":\"Mom Moak\\u0027s Chicken Noodle Soup Recipe\",\"id\":219773,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Fried Cabbage with Bacon, Onion, and Garlic\",\"reviewCount\":901,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/230003.jpg\",\"description\":\"This is a side dish where the title says it all. Cabbage is fried with bacon, onion, and garlic for a side dish you\\u0027ll want to eat again and again.\",\"videoId\":3493,\"stars\":{\"rating\":4.619999885559082,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":3362287,\"displayName\":\"Kathi Richards Smith\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/601249.jpg\",\"followersCount\":66,\"favoriteCount\":227,\"madeRecipesCount\":14,\"handle\":\"addictedtothekitchen\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/601249.jpg\",\"profileUrl\":\"/cook/addictedtothekitchen/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/196732/fried-cabbage-with-bacon-onion-and-garlic/\",\"videoDetailUrl\":\"/video/3493/fried-cabbage-with-bacon-onion-and-garlic/\",\"altText\":\"Fried Cabbage with Bacon, Onion, and Garlic Recipe and Video - This is a side dish where the title says it all. Cabbage is fried with bacon, onion, and garlic for a side dish you\\u0027ll want to eat again and again.\",\"titleText\":\"Fried Cabbage with Bacon, Onion, and Garlic Recipe and Video\",\"id\":196732,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Slow Cooker Pot Roast\",\"reviewCount\":1261,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/5860681.jpg\",\"description\":\"A few packaged mixes make it so easy to serve a tender beef pot roast and savory gravy, all done in the slow cooker while you do other things.\",\"videoId\":4195,\"stars\":{\"rating\":4.6999998092651367,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":1828046,\"displayName\":\"keylimeone\",\"thumbnail\":\"https://images.media-allrecipes.com/global/features/mini/154.jpg\",\"followersCount\":40,\"favoriteCount\":106,\"madeRecipesCount\":1,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/global/features/mini/154.jpg\",\"profileUrl\":\"/cook/1828046/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/218547/slow-cooker-pot-roast/\",\"videoDetailUrl\":\"/video/4195/slow-cooker-pot-roast/\",\"altText\":\"Slow Cooker Pot Roast Recipe and Video - A few packaged mixes make it so easy to serve a tender beef pot roast and savory gravy, all done in the slow cooker while you do other things.\",\"titleText\":\"Slow Cooker Pot Roast Recipe and Video\",\"id\":218547,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Roasted Vegetables\",\"reviewCount\":1636,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/1486.jpg\",\"description\":\"Butternut squash, sweet potato, red peppers, and Yukon Gold potatoes are roasted with olive oil, balsamic vinegar, and herbs in this easy side dish.\",\"videoId\":909,\"stars\":{\"rating\":4.630000114440918,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":-1,\"displayName\":\"Saundra\",\"thumbnail\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5614242.jpg\",\"profileUrl\":\"\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/9377/roasted-vegetables/\",\"videoDetailUrl\":\"/video/909/roasted-vegetables/\",\"altText\":\"Roasted Vegetables Recipe and Video - Butternut squash, sweet potato, red peppers, and Yukon Gold potatoes are roasted with olive oil, balsamic vinegar, and herbs in this easy side dish.\",\"titleText\":\"Roasted Vegetables Recipe and Video\",\"id\":9377,\"analyticsType\":\"popular\",\"sourceId\":437,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3D\\u0026c=f1d666d1-f308-4383-a8b0-2e08355ea89a\",\"contentProviderId\":\"0\"},{\"title\":\"Slow Cooker Steak Fajitas\",\"reviewCount\":25,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/7088033.jpg\",\"description\":\"Melt-in-your-mouth flank steak served right out of the slow cooker make a great family-style meal that lets guests build their own fajitas.\",\"stars\":{\"rating\":4.3000001907348633,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":12291729,\"displayName\":\"Miguel Ruiz\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/3160045.jpg\",\"followersCount\":2,\"favoriteCount\":2,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/3160045.jpg\",\"profileUrl\":\"/cook/12291729/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/273820/slow-cooker-steak-fajitas/\",\"videoDetailUrl\":\"\",\"altText\":\"Slow Cooker Steak Fajitas Recipe - Melt-in-your-mouth flank steak served right out of the slow cooker make a great family-style meal that lets guests build their own fajitas.\",\"titleText\":\"Slow Cooker Steak Fajitas Recipe\",\"id\":273820,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Beef Stew VI\",\"reviewCount\":2386,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/729310.jpg\",\"description\":\"Beef, carrots, potatoes, and celery are seasoned with rosemary and parsley in this simple stovetop beef stew recipe.\",\"videoId\":1151,\"stars\":{\"rating\":4.6100001335144043,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":90405,\"displayName\":\"Paula Antoniou\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/2897954.jpg\",\"followersCount\":63,\"favoriteCount\":106,\"madeRecipesCount\":17,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/2897954.jpg\",\"profileUrl\":\"/cook/90405/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/25678/beef-stew-vi/\",\"videoDetailUrl\":\"/video/1151/beef-stew-vi/\",\"altText\":\"Beef Stew VI Recipe and Video - Beef, carrots, potatoes, and celery are seasoned with rosemary and parsley in this simple stovetop beef stew recipe.\",\"titleText\":\"Beef Stew VI Recipe and Video\",\"id\":25678,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=d3866327-2a73-47b7-9bc5-da442ea89ad1\",\"contentProviderId\":\"451\"},{\"title\":\"Mouth-Watering Stuffed Mushrooms\",\"reviewCount\":3029,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/414837.jpg\",\"description\":\"A restaurant-worthy appetizer stuffed with cream cheese, garlic, Parmesan cheese, and a hint of heat.\",\"videoId\":629,\"stars\":{\"rating\":4.619999885559082,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":1218326,\"displayName\":\"Angie Gorkoff\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/707139.jpg\",\"followersCount\":193,\"favoriteCount\":19,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/707139.jpg\",\"profileUrl\":\"/cook/1218326/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/15184/mouth-watering-stuffed-mushrooms/\",\"videoDetailUrl\":\"/video/629/mouth-watering-stuffed-mushrooms/\",\"altText\":\"Mouth-Watering Stuffed Mushrooms Recipe and Video - A restaurant-worthy appetizer stuffed with cream cheese, garlic, Parmesan cheese, and a hint of heat.\",\"titleText\":\"Mouth-Watering Stuffed Mushrooms Recipe and Video\",\"id\":15184,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=45eaab2f-6e01-415e-b6e2-9c8f7c9d47d1\",\"contentProviderId\":\"451\"},{\"title\":\"Air Fryer Fried Pickles\",\"reviewCount\":7,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/5597592.jpg\",\"description\":\"This crispy and satisfying appetizer is a lower-guilt alternative to deep-fried pickle chips. Using pre-sliced pickle chips is a shortcut that makes this quick to prepare.\",\"stars\":{\"rating\":3.0,\"starsCssClasses\":\"stars stars-3\"},\"cook\":{\"id\":5118896,\"displayName\":\"fabeverydayblog\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/4270835.jpg\",\"followersCount\":148,\"favoriteCount\":294,\"madeRecipesCount\":464,\"handle\":\"fabeveryday\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/4270835.jpg\",\"profileUrl\":\"/cook/fabeveryday/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/267111/air-fryer-fried-pickles/\",\"videoDetailUrl\":\"\",\"altText\":\"Air Fryer Fried Pickles Recipe - This crispy and satisfying appetizer is a lower-guilt alternative to deep-fried pickle chips. Using pre-sliced pickle chips is a shortcut that makes this quick to prepare.\",\"titleText\":\"Air Fryer Fried Pickles Recipe\",\"id\":267111,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Best Green Bean Casserole\",\"reviewCount\":628,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/1656932.jpg\",\"description\":\"This great variation of the traditional green bean casserole is topped with French fried onions and Cheddar cheese.\",\"videoId\":5083,\"stars\":{\"rating\":4.630000114440918,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":24323957,\"displayName\":\"Jan\",\"thumbnail\":\"\",\"followersCount\":10,\"handle\":\"[email protected]\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674138.jpg\",\"profileUrl\":\"/cook/[email protected]/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/18379/best-green-bean-casserole/\",\"videoDetailUrl\":\"/video/5083/best-green-bean-casserole/\",\"altText\":\"Best Green Bean Casserole Recipe and Video - This great variation of the traditional green bean casserole is topped with French fried onions and Cheddar cheese.\",\"titleText\":\"Best Green Bean Casserole Recipe and Video\",\"id\":18379,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Chef John\\u0027s Italian Meatballs\",\"reviewCount\":1344,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/4531471.jpg\",\"description\":\"These Italian meatballs use a standard mix of ground beef and ground pork, with added flavor from parsley, garlic, and dried herbs. Bake up a batch, mix them with your favorite spaghetti dish, and dinner is served!\",\"videoId\":320,\"stars\":{\"rating\":4.820000171661377,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":8601924,\"displayName\":\"Chef John\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/2267470.jpg\",\"followersCount\":70108,\"favoriteCount\":636,\"madeRecipesCount\":309,\"handle\":\"foodwisheswithchefjohn\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/2267470.jpg\",\"profileUrl\":\"/cook/foodwisheswithchefjohn/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/220854/chef-johns-italian-meatballs/\",\"videoDetailUrl\":\"/video/320/chef-johns-italian-meatballs/\",\"altText\":\"Chef John\\u0027s Italian Meatballs Recipe and Video - These Italian meatballs use a standard mix of ground beef and ground pork, with added flavor from parsley, garlic, and dried herbs. Bake up a batch, mix them with your favorite spaghetti dish, and dinner is served!\",\"titleText\":\"Chef John\\u0027s Italian Meatballs Recipe and Video\",\"id\":220854,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=a10c2443-b003-4956-842b-a7de961da1f7\",\"contentProviderId\":\"451\"},{\"title\":\"Lentil Soup\",\"reviewCount\":2128,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/6236012.jpg\",\"description\":\"Lentils are coupled with vegetables for this family-friendly lentil soup. Topped with spinach and a splash of vinegar, this is the perfect weekday dinner.\",\"videoId\":2701,\"stars\":{\"rating\":4.46999979019165,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":24321584,\"displayName\":\"Bob Cody\",\"thumbnail\":\"\",\"followersCount\":17,\"handle\":\"[email protected]\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674162.jpg\",\"profileUrl\":\"/cook/[email protected]/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/13978/lentil-soup/\",\"videoDetailUrl\":\"/video/2701/lentil-soup/\",\"altText\":\"Lentil Soup Recipe and Video - Lentils are coupled with vegetables for this family-friendly lentil soup. Topped with spinach and a splash of vinegar, this is the perfect weekday dinner.\",\"titleText\":\"Lentil Soup Recipe and Video\",\"id\":13978,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Baked Ham and Cheese Party Sandwiches\",\"reviewCount\":562,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/4502246.jpg\",\"description\":\"Mini rolls stuffed with deli ham and melted Swiss cheese are baked with a savory poppyseed-mustard sauce for an easy, tasty little bite or appetizer.\",\"videoId\":4969,\"stars\":{\"rating\":4.8000001907348633,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":3078696,\"displayName\":\"LisaT\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/268283.jpg\",\"followersCount\":35,\"favoriteCount\":316,\"madeRecipesCount\":31,\"handle\":\"lisat\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/268283.jpg\",\"profileUrl\":\"/cook/lisat/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/216756/baked-ham-and-cheese-party-sandwiches/\",\"videoDetailUrl\":\"/video/4969/baked-ham-and-cheese-party-sandwiches/\",\"altText\":\"Baked Ham and Cheese Party Sandwiches Recipe and Video - Mini rolls stuffed with deli ham and melted Swiss cheese are baked with a savory poppyseed-mustard sauce for an easy, tasty little bite or appetizer.\",\"titleText\":\"Baked Ham and Cheese Party Sandwiches Recipe and Video\",\"id\":216756,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Mrs. Sigg\\u0027s Snickerdoodles\",\"reviewCount\":4834,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/3206419.jpg\",\"description\":\"This snickerdoodle cookie recipe makes treats that are perfectly soft in the middle with a bit of crunch around the edges. The sweet cinnamon-sugar coating makes them a sure crowd-pleaser!\",\"videoId\":8429,\"stars\":{\"rating\":4.7399997711181641,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":24321474,\"displayName\":\"Beth Sigworth\",\"thumbnail\":\"\",\"followersCount\":36,\"handle\":\"[email protected]\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674156.jpg\",\"profileUrl\":\"/cook/[email protected]/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/10687/mrs-siggs-snickerdoodles/\",\"videoDetailUrl\":\"/video/8429/mrs-siggs-snickerdoodles/\",\"altText\":\"Mrs. Sigg\\u0027s Snickerdoodles Recipe and Video - This snickerdoodle cookie recipe makes treats that are perfectly soft in the middle with a bit of crunch around the edges. The sweet cinnamon-sugar coating makes them a sure crowd-pleaser!\",\"titleText\":\"Mrs. Sigg\\u0027s Snickerdoodles Recipe and Video\",\"id\":10687,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Apple Pie by Grandma Ople\",\"reviewCount\":10911,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/736203.jpg\",\"description\":\"A unique and popular recipe. Sliced apples under a lattice crust get bathed with a sweet buttery sauce before baking.\",\"videoId\":816,\"stars\":{\"rating\":4.78000020980835,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":27143,\"displayName\":\"MOSHASMAMA\",\"thumbnail\":\"\",\"followersCount\":102,\"madeRecipesCount\":1,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5677177.jpg\",\"profileUrl\":\"/cook/27143/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/12682/apple-pie-by-grandma-ople/\",\"videoDetailUrl\":\"/video/816/apple-pie-by-grandma-ople/\",\"altText\":\"Apple Pie by Grandma Ople Recipe and Video - A unique and popular recipe. Sliced apples under a lattice crust get bathed with a sweet buttery sauce before baking.\",\"titleText\":\"Apple Pie by Grandma Ople Recipe and Video\",\"id\":12682,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=1dd5d927-f1f0-4a22-8438-9066d9958073\",\"contentProviderId\":\"451\"},{\"title\":\"Lighter Baked Shrimp Scampi\",\"reviewCount\":2,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/7419043.jpg\",\"description\":\"Enjoy the same great, garlicky taste of shrimp scampi in a healthier baked version, with way less butter and low-carb cauliflower rice instead of pasta.\",\"stars\":{\"rating\":4.0,\"starsCssClasses\":\"stars stars-4\"},\"cook\":{\"id\":4976982,\"displayName\":\"lutzflcat\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/6445679.jpg\",\"followersCount\":5732,\"favoriteCount\":5972,\"madeRecipesCount\":2785,\"handle\":\"lutzflcat\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/6445679.jpg\",\"profileUrl\":\"/cook/lutzflcat/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/277593/lighter-baked-shrimp-scampi/\",\"videoDetailUrl\":\"\",\"altText\":\"Lighter Baked Shrimp Scampi Recipe - Enjoy the same great, garlicky taste of shrimp scampi in a healthier baked version, with way less butter and low-carb cauliflower rice instead of pasta.\",\"titleText\":\"Lighter Baked Shrimp Scampi Recipe\",\"id\":277593,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Slow Cooker Pork Loin Roast with Brown Sugar and Sweet Potatoes\",\"reviewCount\":44,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/7092749.jpg\",\"description\":\"Easily prepared in a slow cooker, this tender pork loin roast with sweet potatoes is cooked in a buttery brown sugar mixture for ultimate flavor.\",\"stars\":{\"rating\":4.5100002288818359,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":6639347,\"displayName\":\"galet09\",\"thumbnail\":\"\",\"followersCount\":2,\"favoriteCount\":1,\"madeRecipesCount\":2,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674161.jpg\",\"profileUrl\":\"/cook/6639347/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/275214/slow-cooker-pork-loin-roast-with-brown-sugar-and-sweet-potatoes/\",\"videoDetailUrl\":\"\",\"altText\":\"Slow Cooker Pork Loin Roast with Brown Sugar and Sweet Potatoes Recipe - Easily prepared in a slow cooker, this tender pork loin roast with sweet potatoes is cooked in a buttery brown sugar mixture for ultimate flavor.\",\"titleText\":\"Slow Cooker Pork Loin Roast with Brown Sugar and Sweet Potatoes Recipe\",\"id\":275214,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Slow Cooker Beef Stew I\",\"reviewCount\":3669,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/276397.jpg\",\"description\":\"This easy, comforting beef stew is cooked in a slow cooker with potatoes in a hearty broth. Garlic, Worcestershire sauce, and paprika add flair!\",\"videoId\":633,\"stars\":{\"rating\":4.440000057220459,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":55841,\"displayName\":\"BUCHKO\",\"thumbnail\":\"\",\"followersCount\":102,\"favoriteCount\":5,\"madeRecipesCount\":1,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5674168.jpg\",\"profileUrl\":\"/cook/55841/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/14685/slow-cooker-beef-stew-i/\",\"videoDetailUrl\":\"/video/633/slow-cooker-beef-stew-i/\",\"altText\":\"Slow Cooker Beef Stew I Recipe and Video - This easy, comforting beef stew is cooked in a slow cooker with potatoes in a hearty broth. Garlic, Worcestershire sauce, and paprika add flair!\",\"titleText\":\"Slow Cooker Beef Stew I Recipe and Video\",\"id\":14685,\"analyticsType\":\"popular\",\"sourceId\":461,\"trackingPixelUrl\":\"https://pubads.g.doubleclick.net/gampad/ad?iu=/3865/DFP_1x1_impression_tracker\\u0026sz=1x1\\u0026t=adpartner%3Dallrecipesmagazine_earned_impression\\u0026c=63a1a2c5-9900-4f5f-be67-9de1d803e5b2\",\"contentProviderId\":\"451\"},{\"title\":\"Cheesy Amish Breakfast Casserole\",\"reviewCount\":532,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/1334701.jpg\",\"description\":\"This hearty casserole has bacon, eggs, hash browns, and three different cheeses all baked into a comforting breakfast dish, perfect for feeding a crowd.\",\"videoId\":3894,\"stars\":{\"rating\":4.7699999809265137,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":2524546,\"displayName\":\"parothstein\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/548065.jpg\",\"followersCount\":34,\"favoriteCount\":627,\"madeRecipesCount\":25,\"handle\":\"parothstein\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/548065.jpg\",\"profileUrl\":\"/cook/parothstein/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/229150/cheesy-amish-breakfast-casserole/\",\"videoDetailUrl\":\"/video/3894/cheesy-amish-breakfast-casserole/\",\"altText\":\"Cheesy Amish Breakfast Casserole Recipe and Video - This hearty casserole has bacon, eggs, hash browns, and three different cheeses all baked into a comforting breakfast dish, perfect for feeding a crowd.\",\"titleText\":\"Cheesy Amish Breakfast Casserole Recipe and Video\",\"id\":229150,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Sandy\\u0027s Homemade Broccoli and Cheddar Soup\",\"reviewCount\":450,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/4539521.jpg\",\"description\":\"Serve fresh broccoli and Cheddar soup in under an hour! This quick recipe is easily adaptable to suit your tastes.\",\"videoId\":7988,\"stars\":{\"rating\":4.8299999237060547,\"starsCssClasses\":\"stars stars-5\"},\"cook\":{\"id\":3775085,\"displayName\":\"Sandy Lafleur\",\"thumbnail\":\"https://images.media-allrecipes.com/userphotos/50x50/1225305.jpg\",\"followersCount\":20,\"favoriteCount\":309,\"madeRecipesCount\":10,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/1225305.jpg\",\"profileUrl\":\"/cook/3775085/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/240331/sandys-homemade-broccoli-and-cheddar-soup/\",\"videoDetailUrl\":\"/video/7988/sandys-homemade-broccoli-and-cheddar-soup/\",\"altText\":\"Sandy\\u0027s Homemade Broccoli and Cheddar Soup Recipe and Video - Serve fresh broccoli and Cheddar soup in under an hour! This quick recipe is easily adaptable to suit your tastes.\",\"titleText\":\"Sandy\\u0027s Homemade Broccoli and Cheddar Soup Recipe and Video\",\"id\":240331,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"},{\"title\":\"Easy Taco Casserole\",\"reviewCount\":124,\"imageUrl\":\"https://images.media-allrecipes.com/userphotos/300x300/2210782.jpg\",\"description\":\"Ground beef mixed with salsa and onion is layered into a casserole with crushed tortilla chips and cheese in this Mexican-style dinner idea.\",\"videoId\":5042,\"stars\":{\"rating\":4.440000057220459,\"starsCssClasses\":\"stars stars-4-5\"},\"cook\":{\"id\":2784083,\"displayName\":\"charmed71\",\"thumbnail\":\"\",\"followersCount\":1,\"favoriteCount\":4,\"handle\":\"\",\"thumbnailUrl\":\"https://images.media-allrecipes.com/userphotos/50x50/5614248.jpg\",\"profileUrl\":\"/cook/2784083/\"},\"collectionId\":\"0\",\"feedItemViewModelType\":\"RecipeFeedItemViewModel\",\"feedSubItemType\":\"RecipeRecommendationPopular\",\"cardHeaderText\":\"\",\"detailUrl\":\"/recipe/228134/easy-taco-casserole/\",\"videoDetailUrl\":\"/video/5042/easy-taco-casserole/\",\"altText\":\"Easy Taco Casserole Recipe and Video - Ground beef mixed with salsa and onion is layered into a casserole with crushed tortilla chips and cheese in this Mexican-style dinner idea.\",\"titleText\":\"Easy Taco Casserole Recipe and Video\",\"id\":228134,\"analyticsType\":\"popular\",\"contentProviderId\":\"0\"}]);\r\n angular.module('allrecipes').value('tastePrefAbbreviations', {\"Dairy Free\":{\"Abbreviation\":\"df\"},\"Gluten Free\":{\"Abbreviation\":\"gf\"},\"Diabetic\":{\"Abbreviation\":\"db\"},\"Paleo\":{\"Abbreviation\":\"p\"},\"Vegan\":{\"Abbreviation\":\"vn\"},\"Vegetarian\":{\"Abbreviation\":\"vt\"},\"Heart Healthy\":{\"Abbreviation\":\"hh\"},\"Low Calorie\":{\"Abbreviation\":\"lo\"},\"Health\":{\"Abbreviation\":\"h\"},\"Entertaining\":{\"Abbreviation\":\"e\"},\"Family\":{\"Abbreviation\":\"f\"},\"Clean Eating\":{\"Abbreviation\":\"ce\"},\"Weight Loss\":{\"Abbreviation\":\"wl\"},\"Budget\":{\"Abbreviation\":\"bd\"},\"Quick Easy\":{\"Abbreviation\":\"qe\"},\"One Two\":{\"Abbreviation\":\"o2\"},\"Appetizer\":{\"Abbreviation\":\"ap\"},\"BBQ\":{\"Abbreviation\":\"bq\"},\"Beef\":{\"Abbreviation\":\"bf\"},\"Bread\":{\"Abbreviation\":\"br\"},\"Breakfast\":{\"Abbreviation\":\"bk\"},\"Casserole\":{\"Abbreviation\":\"cs\"},\"Chicken\":{\"Abbreviation\":\"ch\"},\"Dessert\":{\"Abbreviation\":\"d\"},\"Drink\":{\"Abbreviation\":\"dr\"},\"Main\":{\"Abbreviation\":\"g\"},\"Lowfat\":{\"Abbreviation\":\"lf\"},\"Pasta\":{\"Abbreviation\":\"ps\"},\"Pork\":{\"Abbreviation\":\"pk\"},\"Salad\":{\"Abbreviation\":\"sl\"},\"Seafood\":{\"Abbreviation\":\"sf\"},\"Side\":{\"Abbreviation\":\"sd\"},\"Slow\":{\"Abbreviation\":\"sc\"},\"Soup\":{\"Abbreviation\":\"ss\"},\"World\":{\"Abbreviation\":\"wo\"},\"Lowcarb\":{\"Abbreviation\":\"lc\"},\"Cookie\":{\"Abbreviation\":\"ck\"},\"Holiday\":{\"Abbreviation\":\"ho\"},\"Salmon\":{\"Abbreviation\":\"sm\"},\"Passover\":{\"Abbreviation\":\"pv\"},\"Easter\":{\"Abbreviation\":\"ea\"},\"Cinco\":{\"Abbreviation\":\"m5\"},\"Mother\":{\"Abbreviation\":\"md\"},\"Smoothie\":{\"Abbreviation\":\"sh\"},\"Cake\":{\"Abbreviation\":\"ca\"},\"Asian\":{\"Abbreviation\":\"ai\"},\"Indian\":{\"Abbreviation\":\"in\"},\"Italian\":{\"Abbreviation\":\"it\"},\"Mexican\":{\"Abbreviation\":\"mx\"},\"Southern\":{\"Abbreviation\":\"so\"},\"Dinner\":{\"Abbreviation\":\"di\"}});\r\n if (!userInformation) {\r\n var userInformation =\r\n {\r\n clientIp: '209.129.89.6'\r\n };\r\n };\r\n </script>\n<script>\r\n angular.module('allrecipes')\r\n .constant('Constant', {\r\n 'version': '1.185.0.5222'\r\n });\r\n </script>\n<!-- Begin comScore Tag - Part 1 -->\n<script id=\"script_comscore\">\r\n var _comscore = _comscore || [];\r\n _comscore.push({ c1: \"2\", c2: \"6036305\", cs_ucfr: \"1\" });\r\n (function () {\r\n var s = document.createElement(\"script\"), el = document.getElementsByTagName(\"script\")[0];\r\n s.async = true;\r\n s.src = (document.location.protocol == \"https:\" ? \"https://sb\" : \"http://b\") + \".scorecardresearch.com/beacon.js\";\r\n el.parentNode.insertBefore(s, el);\r\n })();\r\n </script>\n<!-- End comScore Tag Part 1-->\n<script>\r\n (function (d) {\r\n var e = d.createElement('script');\r\n e.src = d.location.protocol + '//tag.bounceexchange.com/2602/i.js';\r\n e.async = true;\r\n d.getElementsByTagName(\"head\")[0].appendChild(e);\r\n }(document));\r\n </script>\n<!-- script_facebookpixel -->\n<script>\r\n AR.FacebookPixel.init();\r\n </script>\n<!-- OneTrust Cookies Consent Notice start -->\n<script type=\"text/javascript\">\r\n // Arguments passed to this function:\r\n // OneTrust ID (required): The OneTrust-assigned ID for this site.\r\n // Environment (optional): Test environment is assumed. Specify 'prod' for production.\r\n (function (otid, env) {\r\n const d = window.document;\r\n const otscript = d.createElement('script');\r\n const firstscr = d.getElementsByTagName('script')[0];\r\n otscript.src = 'https://cdn.cookielaw.org/scripttemplates/otSDKStub.js';\r\n otscript.type = 'text/javascript';\r\n otscript.charset = 'UTF-8';\r\n otscript.async = true;\r\n otscript.setAttribute('data-domain-script', otid + (env != 'prod' ? '-test' : ''));\r\n firstscr.parentNode.insertBefore(otscript, firstscr);\r\n }('63a0b6bc-e912-4c8d-adfd-3b8a4b698c6c', 'prod'));\r\n </script>\n<script type=\"text/javascript\">\r\n function OptanonWrapper() { }\r\n </script>\n<!-- OneTrust Cookies Consent Notice end -->\n<!-- Scoby Telemetry snippet script_scobytelemetry -->\n<script src=\"https://moprd-cdnservice-uw1.azureedge.net/telemetryapi/1/telemetry.js\"></script>\n<!-- End Scoby Telemetry snippet -->\n<!-- script_adobetagmanager-->\n<script src=\"//assets.adobedtm.com/1c2ad567a53f27e563c4dc2c278a904b84dc5fde/satelliteLib-a07d47e4668bf3c3fa98aff5b2fc6d3f1d0981a3-staging.js\"></script>\n<script type=\"text/javascript\">_satellite.pageBottom(); // Initialize Adobe DTM</script>\n<div id=\"dsapp-is-tablet\"></div>\n<script type=\"text/javascript\">\r\n var testStringVersion = 'True';\r\n </script>\n<script type=\"text/javascript\">\r\n (function(b,r,a,n,c,h,_,s,d,k){if(!b[n]||!b[n]._q){for(;s<_.length;)c(h,_[s++]);d=r.createElement(a);d.async=1;d.src=\"https://cdn.branch.io/branch-latest.min.js\";k=r.getElementsByTagName(a)[0];k.parentNode.insertBefore(d,k);b[n]=h}})(window,document,\"script\",\"branch\",function(b,r){b[r]=function(){b._q.push([r,arguments])}},{_q:[],_v:1},\"addListener applyCode banner closeBanner creditHistory credits data deepview deepviewCta first getCode init link logout redeem referrals removeListener sendSMS setBranchViewData setIdentity track validateCode\".split(\" \"), 0);\r\n branch.init('key_live_dcvcpHkps9BjZy4HCivJjpdewCg0PjvK');\r\n branch.setBranchViewData({\r\n data: {\r\n '$deeplink_path': '/'\r\n }});\r\n\r\n branch.addListener('didShowJourney', function(event) {\r\n var journeysBanner = document.getElementById('branch-banner-iframe');\r\n if (!journeysBanner || !journeysBanner.style) { // don't run if the journey doesn't exist\r\n return;\r\n }\r\n var topPosition = journeysBanner.style.top;\r\n var position = window.getComputedStyle(journeysBanner).getPropertyValue('position');\r\n var bannerHeight = window.getComputedStyle(journeysBanner).getPropertyValue('height');\r\n if (topPosition === '0px' && position !== 'fixed') { // if its a top, inline journey\r\n journeysBanner.style.top = '-' + bannerHeight; // shift the banner upward by the height\r\n }\r\n }); // fires as soon as a journey is being shown\r\n </script>\n</body>\n</html>\n\n"
]
],
[
[
"### Finding the `<span>` that contains the username",
"_____no_output_____"
]
],
[
[
"username_found = logged_in_soup.find('span', attrs = {'class': 'username'})",
"_____no_output_____"
]
],
[
[
"### Printing the username",
"_____no_output_____"
]
],
[
[
"print(username_found.text)",
"Test\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.