hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
sequence
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
sequence
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
sequence
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
sequence
cell_types
sequence
cell_type_groups
sequence
e716fd77caa7bdc1b7d8821a323152bd06c2beef
202,266
ipynb
Jupyter Notebook
notebooks/dissect_delta_logR.ipynb
fritzo/pyro-cov
bfc619cbde9403f20d89c854133855c1e823e3e5
[ "Apache-2.0" ]
null
null
null
notebooks/dissect_delta_logR.ipynb
fritzo/pyro-cov
bfc619cbde9403f20d89c854133855c1e823e3e5
[ "Apache-2.0" ]
null
null
null
notebooks/dissect_delta_logR.ipynb
fritzo/pyro-cov
bfc619cbde9403f20d89c854133855c1e823e3e5
[ "Apache-2.0" ]
null
null
null
346.34589
80,896
0.926755
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport datetime\nfrom collections import defaultdict\nimport torch\nfrom pyrocov import mutrans\nimport pickle", "_____no_output_____" ] ], [ [ "### Compute clade birthdays", "_____no_output_____" ] ], [ [ "results_dir = '../results/'\ncolumns = pickle.load(open(results_dir + 'columns.3000.pkl', 'rb'))\nassert len(set(columns['clade'])) == 2999\n\ndef estimate_clade_bdays(exclude_first=10, min_portion=0.0005, max_portion=0.1):\n clade_days = defaultdict(list)\n for clade, day in zip(columns[\"clade\"], columns[\"day\"]):\n clade_days[clade].append(day)\n clade_bday = {}\n for clade, days in list(clade_days.items()):\n days.sort()\n exclude = max(exclude_first, int(min_portion * len(days)))\n exclude = min(exclude, int(max_portion * len(days)))\n clade_bday[clade] = days[exclude]\n start_date = datetime.datetime.strptime(mutrans.START_DATE, \"%Y-%m-%d\")\n return {\n clade: (start_date + datetime.timedelta(days=day))\n for clade, day in clade_bday.items()\n }\n\nclade_bday = estimate_clade_bdays()\nprint(len(clade_bday))", "2999\n" ] ], [ [ "## Compare S-gene to RBD to non-S", "_____no_output_____" ] ], [ [ "filename = 'mutrans.svi.3000.1.50.coef_scale=0.05.reparam-localinit.full.10001.0.05.0.1.10.0.200.6.None..pt'\nfit = torch.load(results_dir + filename, map_location=torch.device('cpu'))\n\ncoef = fit['median']['coef'].data.cpu()\n\nfeatures = torch.load(results_dir + 'features.3000.1.pt', map_location=torch.device('cpu'))\n\nclades = features['clades']\nclade_to_lineage = features['clade_to_lineage']\nmutations = features['aa_mutations']\nfeatures = features['aa_features'].data.cpu().float()\n\nfull_pred = torch.mv(features, coef).data.numpy()\nprint(\"full_pred\", full_pred.shape)\n\nrbd = []\nS = []\nfor m in mutations:\n if m[:2] != 'S:':\n rbd.append(0)\n S.append(0)\n continue\n\n S.append(1)\n\n pos = int(m[3:-1])\n\n if pos >= 319 and pos <= 541:\n rbd.append(1)\n else:\n rbd.append(0)\n\nS = torch.tensor(S).bool()\nrbd = torch.tensor(rbd).bool()\n\nS_mutations = np.array(mutations)[S].tolist()\nrbd_mutations = np.array(mutations)[rbd].tolist()\n\nS_features = features[:, S]\nrbd_features = features[:, rbd]\nprint(\"S_features\", S_features.shape)\nprint(\"rbd_features\", rbd_features.shape)\n\nS_coef = coef[S]\nrbd_coef = coef[rbd]\nprint(\"S_coef\", S_coef.shape)\nprint(\"rbd_coef\", rbd_coef.shape)\n\nS_pred = torch.mv(S_features, S_coef).data.numpy()\nrbd_pred = torch.mv(rbd_features, rbd_coef).data.numpy()\nprint(\"S_pred\", S_pred.shape)\nprint(\"rbd_pred\", rbd_pred.shape)", "full_pred (3000,)\nS_features torch.Size([3000, 415])\nrbd_features torch.Size([3000, 68])\nS_coef torch.Size([415])\nrbd_coef torch.Size([68])\nS_pred (3000,)\nrbd_pred (3000,)\n" ], [ "full_pred = 0.01 * full_pred\nrbd_pred = 0.01 * rbd_pred\nS_pred = 0.01 * S_pred\n\nmissing_clade = 'fine.521.0...1284.0.32.219.3.50.116'\n\nmask = []\nfor clade in clades:\n if clade == missing_clade:\n mask.append(0)\n else:\n mask.append(1)\nmask = np.array(mask, dtype=bool)\n\ndates = []\n\nfor clade in clades:\n if clade in clade_bday:\n dates.append(clade_bday[clade])\n else:\n dates.append(None)\n \ndates = np.array(dates)[mask].tolist()\nfull_pred = full_pred[mask]\nrbd_pred = rbd_pred[mask]\nS_pred = S_pred[mask]\n \nassert len(dates) == len(full_pred) == len(rbd_pred) == len(S_pred) == 2999\n\nprint(dates[:2])", "[datetime.datetime(2019, 12, 30, 0, 0), datetime.datetime(2020, 4, 1, 0, 0)]\n" ], [ "from collections import OrderedDict\n\ndef aggregate_by_month(values, dates):\n agg = defaultdict(list)\n for v, d in zip(values, dates):\n d = datetime.datetime(d.year, d.month, 1)\n agg[d].append(v)\n \n mean = OrderedDict()\n for v, k in agg.items():\n mean[v] = np.median(k)\n \n print(len(mean.keys()), len(mean.values()))\n \n return list(mean.keys()), list(mean.values()) ", "_____no_output_____" ], [ "fig, (ax0, ax1, ax2) = plt.subplots(1, 3, sharey=True, figsize=(16, 6))\n\nprint(\"y-mins:\", min(S_pred), min(rbd_pred), min(full_pred - S_pred))\nprint(\"y-maxes:\", max(S_pred), max(rbd_pred), max(full_pred - S_pred))\n\nlabelfontsize = 22\n\nax0.plot(dates, S_pred, lw=0.0, ls=None, marker='.', alpha=0.5, markersize=4)\nmonths, values = aggregate_by_month(S_pred, dates)\nax0.plot(months, values, lw=0.0, ls=None, color='red', marker='s', markersize=8)\nax0.set_ylabel(\"S-gene $\\\\Delta$ Log R\", fontsize=labelfontsize)\n\nax1.plot(dates, rbd_pred, lw=0.0, ls=None, marker='.', alpha=0.5, markersize=4)\nmonths, values = aggregate_by_month(rbd_pred, dates)\nax1.plot(months, values, lw=0.0, ls=None, color='red', marker='s', markersize=8)\nax1.set_ylabel(\"RBD $\\\\Delta$ Log R\", fontsize=labelfontsize)\n\nax2.plot(dates, full_pred - S_pred, lw=0.0, ls=None, marker='.', alpha=0.5, markersize=4)\nmonths, values = aggregate_by_month(full_pred - S_pred, dates)\nax2.plot(months, values, lw=0.0, ls=None, color='red', marker='s', markersize=8)\nax2.set_ylabel(\"Non-S-gene $\\\\Delta$ Log R\", fontsize=labelfontsize)\n\nfor ax in (ax0, ax1, ax2):\n ax.xaxis.set_major_locator(mdates.MonthLocator())\n ax.xaxis.set_major_formatter(mdates.DateFormatter(\"%b %Y\"))\n ax.set_ylim(-0.2, 1.4)\n\n for label in ax.get_xticklabels():\n label.set_rotation(90)\n\nax0.tick_params(axis='y', which='major', labelsize=18)\n\nplt.tight_layout()\nplt.savefig('delta_logR_breakdown.png')\nplt.show()", "y-mins: -0.006676604 -0.00018717974 -0.20632032\ny-maxes: 1.3910034 0.90480256 0.8389287\n26 26\n26 26\n26 26\n" ], [ "fig, ax0 = plt.subplots(1, 1, sharey=True, figsize=(7, 5))\n\nlabelfontsize = 22\n\nax0.plot(dates, S_pred, lw=0.0, ls=None, marker='.', alpha=0.5, markersize=4)\nmonths, values = aggregate_by_month(S_pred, dates)\nax0.plot(months, values, lw=0.0, ls=None, color='red', marker='s', markersize=8)\nax0.set_ylabel(\"S-gene $\\\\Delta$ Log R\", fontsize=labelfontsize)\n\nax0.xaxis.set_major_locator(mdates.MonthLocator())\nax0.xaxis.set_major_formatter(mdates.DateFormatter(\"%b %Y\"))\nax0.set_ylim(-0.05, 1.5)\n\nfor label in ax0.get_xticklabels():\n label.set_rotation(90)\n\nax0.tick_params(axis='y', which='major', labelsize=18)\n\nplt.tight_layout()\nplt.savefig('delta_logR_Sgene_contribution.png')\nplt.show()", "26 26\n" ] ], [ [ "### examine b.1.1 heterogeneity", "_____no_output_____" ] ], [ [ "lineage_to_clades = defaultdict(list)\nfor k, v in clade_to_lineage.items():\n lineage_to_clades[v].append(k)\nclade_ids = {clade: i for i, clade in enumerate(clades)}", "_____no_output_____" ], [ "b11 = []\nfor clade in lineage_to_clades['B.1.1']:\n b11.append( (clade_bday[clade], full_pred[clade_ids[clade]] ) )", "_____no_output_____" ], [ "sorted(b11)", "_____no_output_____" ] ], [ [ "## examine omicron", "_____no_output_____" ] ], [ [ "lineage_to_clade = defaultdict(list)\n\nfor k, v in clade_to_lineage.items():\n lineage_to_clade[v].append(k)", "_____no_output_____" ], [ "ba2 = np.array([0 if clade not in lineage_to_clade['BA.2'] else 1 for clade in clades], dtype=bool)\nfeatures_ba2 = features.data.numpy()[ba2]\nassert np.abs(features_ba2[0] - features_ba2[1]).sum() == 0\nfeatures_ba2 = features_ba2[0]", "_____no_output_____" ], [ "data = torch.load('results/mutrans.data.single.3000.1.50.None.pt', map_location='cpu')\nweekly_clades = data['weekly_clades']\nfeature_counts = weekly_clades.sum(1).data.numpy() @ features.data.numpy()\nba2_feature_counts = feature_counts[:, np.array(features_ba2, dtype=bool)]\nba2_feature_counts.shape", "_____no_output_____" ], [ "fig, axes = plt.subplots(1, 2, sharey=False, figsize=(12, 6))\n\nfor featrange, ax in zip([0, 1], axes):\n #feats = ba2_feature_counts.T[:25] if featrange == 0 else ba2_feature_counts.T[25:]\n feats = ba2_feature_counts.T if featrange == 0 else np.cumsum(ba2_feature_counts, axis=0).T\n if featrange == 0:\n ax.set_ylabel(\"Biweekly amino acid count\", fontsize=16)\n else:\n ax.set_ylabel(\"Cumulative amino acid count\", fontsize=16)\n for feat in feats:\n ax.plot(np.arange(56), feat, alpha=0.5)\n ax.set_xlabel(\"Time (fortnights)\", fontsize=16)\n ax.set_xticks(np.arange(0, 60, 4))\n ax.tick_params(axis='both', which='major', labelsize=14)", "_____no_output_____" ], [ "cs = np.cumsum(ba2_feature_counts, axis=0).T\nnp.sort(cs[:, 48])", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
e717085a6b72cfb66077517dba6513c6d5d485e7
42,137
ipynb
Jupyter Notebook
bronze/B42_Rotations_Solutions.ipynb
QRussia/basics-of-quantum-computing-translate
2a426aadd7ef17ff8c4f0a1b95702fa52c7eec8f
[ "Apache-2.0", "CC-BY-4.0" ]
null
null
null
bronze/B42_Rotations_Solutions.ipynb
QRussia/basics-of-quantum-computing-translate
2a426aadd7ef17ff8c4f0a1b95702fa52c7eec8f
[ "Apache-2.0", "CC-BY-4.0" ]
null
null
null
bronze/B42_Rotations_Solutions.ipynb
QRussia/basics-of-quantum-computing-translate
2a426aadd7ef17ff8c4f0a1b95702fa52c7eec8f
[ "Apache-2.0", "CC-BY-4.0" ]
null
null
null
80.108365
24,084
0.782234
[ [ [ "<table> <tr>\n <td style=\"background-color:#ffffff;\">\n <a href=\"http://qworld.lu.lv\" target=\"_blank\"><img src=\"..\\images\\qworld.jpg\" width=\"25%\" align=\"left\"> </a></td>\n <td style=\"background-color:#ffffff;vertical-align:bottom;text-align:right;\">\n prepared by <a href=\"http://abu.lu.lv\" target=\"_blank\">Abuzer Yakaryilmaz</a> (<a href=\"http://qworld.lu.lv/index.php/qlatvia/\" target=\"_blank\">QLatvia</a>)\n </td> \n</tr></table>", "_____no_output_____" ], [ "<table width=\"100%\"><tr><td style=\"color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;\">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>\n$ \\newcommand{\\bra}[1]{\\langle #1|} $\n$ \\newcommand{\\ket}[1]{|#1\\rangle} $\n$ \\newcommand{\\braket}[2]{\\langle #1|#2\\rangle} $\n$ \\newcommand{\\dot}[2]{ #1 \\cdot #2} $\n$ \\newcommand{\\biginner}[2]{\\left\\langle #1,#2\\right\\rangle} $\n$ \\newcommand{\\mymatrix}[2]{\\left( \\begin{array}{#1} #2\\end{array} \\right)} $\n$ \\newcommand{\\myvector}[1]{\\mymatrix{c}{#1}} $\n$ \\newcommand{\\myrvector}[1]{\\mymatrix{r}{#1}} $\n$ \\newcommand{\\mypar}[1]{\\left( #1 \\right)} $\n$ \\newcommand{\\mybigpar}[1]{ \\Big( #1 \\Big)} $\n$ \\newcommand{\\sqrttwo}{\\frac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\dsqrttwo}{\\dfrac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\onehalf}{\\frac{1}{2}} $\n$ \\newcommand{\\donehalf}{\\dfrac{1}{2}} $\n$ \\newcommand{\\hadamard}{ \\mymatrix{rr}{ \\sqrttwo & \\sqrttwo \\\\ \\sqrttwo & -\\sqrttwo }} $\n$ \\newcommand{\\vzero}{\\myvector{1\\\\0}} $\n$ \\newcommand{\\vone}{\\myvector{0\\\\1}} $\n$ \\newcommand{\\stateplus}{\\myvector{ \\sqrttwo \\\\ \\sqrttwo } } $\n$ \\newcommand{\\stateminus}{ \\myrvector{ \\sqrttwo \\\\ -\\sqrttwo } } $\n$ \\newcommand{\\myarray}[2]{ \\begin{array}{#1}#2\\end{array}} $\n$ \\newcommand{\\X}{ \\mymatrix{cc}{0 & 1 \\\\ 1 & 0} } $\n$ \\newcommand{\\Z}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & -1} } $\n$ \\newcommand{\\Htwo}{ \\mymatrix{rrrr}{ \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} } } $\n$ \\newcommand{\\CNOT}{ \\mymatrix{cccc}{1 & 0 & 0 & 0 \\\\ 0 & 1 & 0 & 0 \\\\ 0 & 0 & 0 & 1 \\\\ 0 & 0 & 1 & 0} } $\n$ \\newcommand{\\norm}[1]{ \\left\\lVert #1 \\right\\rVert } $\n$ \\newcommand{\\pstate}[1]{ \\lceil \\mspace{-1mu} #1 \\mspace{-1.5mu} \\rfloor } $", "_____no_output_____" ], [ "<h2> <font color=\"blue\"> Solutions for </font>Rotations</h2>", "_____no_output_____" ], [ "<a id=\"task2\"></a>\n<h3> Task 2 </h3>\n\nStart with state $ \\ket{0} $. \n\nApply $ R(\\pi/4) $ 7 times, and draw each state on the unit circle.", "_____no_output_____" ], [ "<h3>Solution</h3>", "_____no_output_____" ] ], [ [ "%run qlatvia.py\n\ndraw_qubit()\n\n[x,y]=[1,0]\n\ndraw_quantum_state(x,y,\"v0\")\n\nsqrttwo = 2**0.5\noversqrttwo = 1/sqrttwo\n\nR = [ [oversqrttwo, -1*oversqrttwo], [oversqrttwo,oversqrttwo] ]\n\n# function for rotation R\ndef rotate(px,py):\n newx = R[0][0]*px + R[0][1]*py\n newy = R[1][0]*px + R[1][1]*py\n return [newx,newy]\n\n# apply rotation R 7 times\nfor i in range(1,8):\n [x,y] = rotate(x,y)\n draw_quantum_state(x,y,\"|v\"+str(i)+\">\")\n", "_____no_output_____" ] ], [ [ "<a id=\"task3\"></a>\n<h3> Task 3 </h3>\n\nWe implement Task 2 by using \"ry-gate\" and \"statevector_simulator\".\n\nDefine a quantum circuit with one qubit. \n\nIterate seven times\n- Apply the rotation with angle $ \\pi \\over 4 $ by using ry-gate\n- Read the current quantum state\n- Print the current quantum state\n- Draw the quantum state on the unit circle by using the values of the current quantum state", "_____no_output_____" ], [ "<h3>Solution</h3>", "_____no_output_____" ] ], [ [ "%run qlatvia.py\n\ndraw_qubit()\n\n[x,y]=[1,0]\n\ndraw_quantum_state(x,y,\"v0\")\n\nfrom qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\nfrom math import pi\n\n# we define a quantum circuit with one qubit and one bit\nq = QuantumRegister(1) # quantum register with a single qubit\nc = ClassicalRegister(1) # classical register with a single bit\nqc = QuantumCircuit(q,c) # quantum circuit with quantum and classical registers\n\nrotation_angle = pi/4\n\nfor i in range(1,9):\n # rotate the qubit with angle pi/4\n qc.ry(2*rotation_angle,q[0]) \n \n # read the current quantum state\n job = execute(qc,Aer.get_backend('statevector_simulator'),optimization_level=0)\n current_quantum_state=job.result().get_statevector(qc) \n \n # print the current quantum state \n x_value = current_quantum_state[0].real # get the amplitude of |0>\n y_value = current_quantum_state[1].real # get the amplitude of |1>\n print(\"iteration\",i,\": the quantum state is (\",round(x_value,3),\") |0>\",\"+(\",round(y_value,3),\") |1>\")\n \n # draw the current quantum state\n draw_quantum_state(x_value,y_value,\"|v\"+str(i)+\">\")\n", "_____no_output_____" ] ], [ [ "<a id=\"task4\"></a>\n<h3> Task 4 </h3>\n\nRepeat Task 3 for the angle $ \\pi/6 $ by applying the rotation 12 times. \n\nRepeat Task 3 for the angle $ 3\\pi/8 $ by applying the rotation 16 times. \n\nRepeat Task 3 for the angle $ \\sqrt{2}\\pi $ by applying the rotation 20 times. ", "_____no_output_____" ], [ "<h3>Solution</h3>", "_____no_output_____" ], [ "Repeat Task 3 for the angle $ \\pi/6 $ by applying the rotation 12 times.", "_____no_output_____" ] ], [ [ "%run qlatvia.py\n\ndraw_qubit()\n\nfrom qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\nfrom math import pi\n\n# we define a quantum circuit with one qubit and one bit\nq = QuantumRegister(1) # quantum register with a single qubit\nc = ClassicalRegister(1) # classical register with a single bit\nqc = QuantumCircuit(q,c) # quantum circuit with quantum and classical registers\n\nrotation_angle = pi/6\n\nfor i in range(1,13):\n # rotate the qubit with angle pi/4\n qc.ry(2*rotation_angle,q[0]) \n \n # read the current quantum state\n job = execute(qc,Aer.get_backend('statevector_simulator'),optimization_level=0)\n current_quantum_state=job.result().get_statevector(qc) \n \n # print the current quantum state \n x_value = current_quantum_state[0].real # get the amplitude of |0>\n y_value = current_quantum_state[1].real # get the amplitude of |1>\n print(\"iteration\",i,\": the quantum state is (\",round(x_value,3),\") |0>\",\"+(\",round(y_value,3),\") |1>\")\n \n # draw the current quantum state\n draw_quantum_state(x_value,y_value,\"|v\"+str(i)+\">\")\n", "_____no_output_____" ] ], [ [ "Repeat Task 3 for the angle $ 3\\pi/8 $ by applying the rotation 16 times.", "_____no_output_____" ] ], [ [ "%run qlatvia.py\n\ndraw_qubit()\n\nfrom qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\nfrom math import pi\n\n# we define a quantum circuit with one qubit and one bit\nq = QuantumRegister(1) # quantum register with a single qubit\nc = ClassicalRegister(1) # classical register with a single bit\nqc = QuantumCircuit(q,c) # quantum circuit with quantum and classical registers\n\nrotation_angle = 3*pi/8\n\nfor i in range(1,17):\n # rotate the qubit with angle pi/4\n qc.ry(2*rotation_angle,q[0]) \n \n # read the current quantum state\n job = execute(qc,Aer.get_backend('statevector_simulator'),optimization_level=0)\n current_quantum_state=job.result().get_statevector(qc) \n \n # print the current quantum state \n x_value = current_quantum_state[0].real # get the amplitude of |0>\n y_value = current_quantum_state[1].real # get the amplitude of |1>\n print(\"iteration\",i,\": the quantum state is (\",round(x_value,3),\") |0>\",\"+(\",round(y_value,3),\") |1>\")\n \n # draw the current quantum state\n draw_quantum_state(x_value,y_value,\"|v\"+str(i)+\">\")\n", "_____no_output_____" ] ], [ [ "Repeat Task 3 for the angle $ \\sqrt{2}\\pi $ by applying the rotation 20 times. ", "_____no_output_____" ] ], [ [ "%run qlatvia.py\n\ndraw_qubit()\n\nfrom qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\nfrom math import pi\n\n# we define a quantum circuit with one qubit and one bit\nq = QuantumRegister(1) # quantum register with a single qubit\nc = ClassicalRegister(1) # classical register with a single bit\nqc = QuantumCircuit(q,c) # quantum circuit with quantum and classical registers\n\nrotation_angle = 2**(0.5)\n\nfor i in range(1,21):\n # rotate the qubit with angle pi/4\n qc.ry(2*rotation_angle,q[0]) \n \n # read the current quantum state\n job = execute(qc,Aer.get_backend('statevector_simulator'),optimization_level=0)\n current_quantum_state=job.result().get_statevector(qc) \n \n # print the current quantum state \n x_value = current_quantum_state[0].real # get the amplitude of |0>\n y_value = current_quantum_state[1].real # get the amplitude of |1>\n print(\"iteration\",i,\": the quantum state is (\",round(x_value,3),\") |0>\",\"+(\",round(y_value,3),\") |1>\")\n \n # draw the current quantum state\n draw_quantum_state(x_value,y_value,\"|v\"+str(i)+\">\")\n", "_____no_output_____" ] ], [ [ "<a id=\"task6\"></a>\n<h3> Task 6 </h3>\n\nWe randomly pick an angle $ \\theta \\in [0,2\\pi) $.\n\nWe have two separate qubits initially set to state $ \\ket{0} $. \n\nThe first qubit is rotated by the angle $ \\theta $ and the second qubit is rotated by the angle $ \\theta + \\pi/2 $.\n\nImplement each qubit and its rotation as a separate quantum circuit.\n\nThen, read both quantum states and calculate their dot product.\n\nCheck the result of the dot product for different random angles.", "_____no_output_____" ], [ "<h3>Solution</h3>", "_____no_output_____" ] ], [ [ "from random import randrange\nfrom math import pi\nfrom qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\n\n\n# implement the experiment 10 times\nfor i in range(10):\n # pick a random angle\n random_angle = randrange(3600)/10\n\n # specify the angles\n rotation_angle1 = random_angle/360*2*pi\n rotation_angle2 = rotation_angle1 + pi/2\n \n #\n # first qubit\n #\n q1 = QuantumRegister(1) \n c1 = ClassicalRegister(1) \n qc1 = QuantumCircuit(q1,c1)\n \n # rotate the qubit\n qc1.ry(2 * rotation_angle1,q1[0])\n \n # read the quantum state\n job = execute(qc1,Aer.get_backend('statevector_simulator'),optimization_level=0)\n current_quantum_state1=job.result().get_statevector(qc1) \n [x1,y1]=[current_quantum_state1[0].real,current_quantum_state1[1].real]\n \n #\n # second qubit \n #\n q2 = QuantumRegister(1) \n c2 = ClassicalRegister(1) \n qc2 = QuantumCircuit(q2,c2)\n \n # rotate the qubit \n qc2.ry(2 * rotation_angle2,q2[0])\n \n # read the quantum state\n job = execute(qc2,Aer.get_backend('statevector_simulator'),optimization_level=0)\n current_quantum_state2=job.result().get_statevector(qc2) \n [x2,y2]=[current_quantum_state2[0].real,current_quantum_state2[1].real]\n\n \n #\n # dot product\n #\n print(i,\"- the result of dot product is \",round(x1*x2+y1*y2,5))\n print(\"random angle is\",random_angle)\n print(\"x1 , y1 =\",round(x1,5),round(y1,5))\n print(\"x2 , y2 =\",round(x2,5),round(y2,5))\n print()", "_____no_output_____" ] ], [ [ "<a id=\"task7\"></a>\n<h3> Task 7 </h3>\n\nWe randomly pick an angle $ \\theta \\in [0,2\\pi) $.\n\nWe have a single qubit initially set to state $ \\ket{0} $. \n\nThe qubit is rotated by the angle either $ \\theta_1 = \\theta $ or $ \\theta_2 = \\theta-\\pi/2 $.\n\nYou are allowed to do one more rotation $ \\theta' $ and then make a measurement.\n\nCan you determine the angle of the first rotation angle by looking/using the measurement result? Is it $ \\theta_1 $ or $ \\theta_2 $?\n\nCheck your solution for different random angles.", "_____no_output_____" ], [ "<h3>Solution</h3>", "_____no_output_____" ] ], [ [ "from random import randrange\nfrom math import pi\nfrom qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\n\nrandom_angle = randrange(3600)/10\nrotation_angle1 = random_angle/360*2*pi\nrotation_angle2 = rotation_angle1 - pi/2\n\n# we define a quantum circuit with one qubit and one bit\nq = QuantumRegister(1) # quantum register with a single qubit\nc = ClassicalRegister(1) # classical register with a single bit\nqc = QuantumCircuit(q,c) # quantum circuit with quantum and classical registers\n\nif randrange(2)==0:\n qc.ry(2 * rotation_angle1,q[0])\n picked_angle = \"theta1\"\nelse:\n qc.ry(2 * rotation_angle2,q[0])\n picked_angle = \"theta2\"\n\n#\n# your code is here\n#\nyour_guess = \"\"\n\nqc.ry(-2 * rotation_angle1,q[0]) # the new state will be either |0> or -|1>\n\nqc.measure(q,c)\n\njob = execute(qc,Aer.get_backend('qasm_simulator'),shots=100)\ncounts = job.result().get_counts(qc)\nprint(counts)\n\nif '0' in counts:\n your_guess = \"theta1\"\nelse:\n your_guess = \"theta2\"\n\n\n######################\nprint(\"your guess is\",your_guess)\nprint(\"picked_angle is\",picked_angle)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
e7171185795d37f8440b5ba333dd18c6de3b331e
132,923
ipynb
Jupyter Notebook
examples/Face Recognition Example.ipynb
uGokalp/FaceVerification
bfc40ea24c7ec4f0a3c9e003e9760014fbd36349
[ "MIT" ]
null
null
null
examples/Face Recognition Example.ipynb
uGokalp/FaceVerification
bfc40ea24c7ec4f0a3c9e003e9760014fbd36349
[ "MIT" ]
23
2021-05-01T16:56:02.000Z
2022-03-08T05:39:41.000Z
examples/Face Recognition Example.ipynb
uGokalp/FaceVerification
bfc40ea24c7ec4f0a3c9e003e9760014fbd36349
[ "MIT" ]
null
null
null
228.783133
57,332
0.900205
[ [ [ "import sys\nsys.path.append('../recognition')", "_____no_output_____" ], [ "import recognition\nfrom typing import Union, Tuple\nimport torch\nimport torchvision.transforms as T\nimport numpy as np\nfrom torch import nn\nfrom facenet_pytorch import MTCNN, InceptionResnetV1\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom scipy.spatial import distance\nimport utils", "_____no_output_____" ], [ "recognition.get_model", "_____no_output_____" ], [ "def get_model(model_name: str = \"vgg\") -> torch.nn.Module:\n \"\"\"Return the most up to date model\n\n Returns:\n nn.Module: Pytorch model set to eval\n \"\"\"\n types = (str,)\n available_model_names = (\"vgg\", \"inception\")\n utils.type_check(model_name, types)\n\n model_name = model_name.lower()\n if model_name not in available_model_names:\n raise ValueError(f\"Expected {model_name} to be one of {available_model_names}\")\n if model_name == \"vgg\":\n model = VGG_16()\n model.load_weights()\n model.eval()\n elif model_name == \"inception\":\n model = InceptionResnetV1(pretrained=\"casia-webface\").eval()\n return model\n", "_____no_output_____" ], [ "model = get_model()", "_____no_output_____" ], [ "VGG_16 = recognition.VGG_16", "_____no_output_____" ], [ "class DefaultMTCNN(MTCNN):\n def __init__(self):\n super().__init__(image_size=224,\n margin=0,\n min_face_size=20,\n thresholds=[0.6, 0.7, 0.7],\n factor=0.709,\n post_process=True,\n device=\"cpu\")", "_____no_output_____" ], [ "from typing import Union, get_type_hints, Optional\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "import PIL.PngImagePlugin\nimport PIL.JpegImagePlugin", "_____no_output_____" ], [ "def check_image_channels(img: Union[np.ndarray, Image.Image], num_channels=3) -> None:\n \"\"\"Checks image channels to be equal to num_channels\"\"\"\n types = (np.ndarray, Image.Image, PIL.PngImagePlugin.PngImageFile, PIL.JpegImagePlugin.JpegImageFile) # Last option is a workaround.\n utils.type_check(img, types)\n \n if isinstance(img, np.ndarray):\n assert img.ndim == num_channels, f\"Expected image to have ndim <= {num_channels}, but got {img.ndim}\"\n if isinstance(img, Image.Image):\n if type(img) != Image.Image:\n img = img.convert(\"RGB\")\n img_array = np.array(img)\n check_image_channels(img_array)\n ", "_____no_output_____" ], [ "check_image_channels(5)", "_____no_output_____" ], [ "def load_image(image: Union[str, np.ndarray, Image.Image, PIL.PngImagePlugin.PngImageFile, PIL.JpegImagePlugin.JpegImageFile]) -> np.ndarray:\n types = (str, np.ndarray, Image.Image, PIL.PngImagePlugin.PngImageFile, PIL.JpegImagePlugin.JpegImageFile)\n utils.type_check(image, types)\n img = image\n if type(image) == str:\n img = Image.open(image)\n if not isinstance(img, np.ndarray):\n img = img.convert('RGB')\n img = np.array(img)\n return img\n ", "_____no_output_____" ], [ "img = np.array(Image.open(\"../images/1.PNG\").convert(\"RGB\"))", "_____no_output_____" ], [ "load_image(5)", "_____no_output_____" ], [ "mtcnn = DefaultMTCNN()", "_____no_output_____" ], [ "boxes, _ = mtcnn.detect(img)", "_____no_output_____" ], [ "boxes", "_____no_output_____" ], [ "def extract_face(img: Union[np.ndarray, Image.Image], mtcnn) -> Image.Image:\n \"\"\"Extracts the bounding box containing only the first face and return a crop of only that face.\n\n Returns:\n PIL Image: shape 160,160,3\n \"\"\"\n types = (np.ndarray, Image.Image)\n utils.type_check(img, types)\n \n check_image_channels(img)\n boxes, _ = mtcnn.detect(img)\n\n if len(boxes) >= 1 and len(boxes) != 4:\n box = boxes[0]\n \n cropped = img.crop(box) if type(img) == Image.Image else Image.fromarray(img).crop(box)\n\n return cropped", "_____no_output_____" ], [ "def display_image(img: Union[Image.Image, np.ndarray], ax:Optional=None ) -> None:\n \"\"\"Displays PIL image, optionally can be used to stack images together\n\n Args:\n img (PIL Image): Image to be displayed.\n ax ([Axes], optional): Axes from the previous plt call to stack together. Defaults to None.\n \"\"\"\n types = (Image.Image, np.ndarray)\n \n utils.type_check(img, types)\n check_image_channels(img)\n if isinstance(img, Image.Image):\n img = np.array(img)\n if ax:\n ax.imshow(img)\n else:\n plt.imshow(img)\n", "_____no_output_____" ], [ "fig, ax = plt.subplots()\ndisplay_image(img, ax)\ndisplay_image(img, ax)\nplt.show()", "_____no_output_____" ], [ "default_mtcnn = DefaultMTCNN()", "_____no_output_____" ], [ "def torch_to_np(array: torch.Tensor) -> np.ndarray:\n \"\"\"Coverts torch tensor to numpy array, handles the case when torch tensor is\n stuck in cuda.\n\n Args:\n array (torch.Tensor): Pytorch Tensor\n\n Returns:\n np.ndarray\n \"\"\"\n types = (torch.Tensor)\n utils.type_check(array, types)\n \n if array.is_cuda:\n array = array.cpu()\n return array.detach().numpy()", "_____no_output_____" ], [ "def get_embedding(image: Union[np.ndarray, Image.Image] , model, mtcnn=default_mtcnn) -> torch.Tensor:\n \"\"\"Runs the PIL image though the model after preprocessing\n\n Args:\n image (Image.Image): PIL image\n model (nn.Module): Pytorch model\n mtcnn (optional): Defaults to None.\n\n Returns:\n [type]: [description]\n \"\"\"\n types = (np.ndarray, Image.Image)\n utils.type_check(image, types)\n\n resize = T.Resize([mtcnn.image_size] * 2)\n to_tensor = T.ToTensor()\n\n with torch.no_grad():\n\n Img1 = to_tensor(resize(extract_face(image, mtcnn)))\n embedding = model(Img1.unsqueeze(0))\n\n return embedding", "_____no_output_____" ], [ "get_embedding(img, model)", "_____no_output_____" ], [ "def verify(\n img1: Union[Image.Image, str],\n img2: Union[Image.Image, str],\n model: nn.Module,\n mtcnn: MTCNN = default_mtcnn,\n dist_fn: str = \"euclidean\",\n plot: bool = True,\n decision: bool = True,\n) -> Union[Tuple[bool, float], float]:\n \"\"\"Compares two images by the distance between their embeddings\n\n Args:\n img1 (Union[Image.Image, str]): Either PIL or path\n img2 (Union[Image.Image, str]): Either PIL or path\n model (nn.Module): A pytorch model.\n mtcnn (MTCNN, optional): User defined MTCNN. Defaults to None.\n dist_fn (str, optional): Current opetions are euclidean or cosine. Defaults to \"euclidean\".\n plot (bool, optional): Matplotlib plot. Defaults to True.\n decision (bool, optional): [description]. Defaults to True.\n\n Returns:\n Union[Tuple[bool, float]]: Distance between the embeddings\n \"\"\"\n img_types = (str, Image.Image, np.ndarray)\n utils.type_check(img1, img_types)\n utils.type_check(img2, img_types)\n\n img1 = load_image(img1)\n img2 = load_image(img2)\n\n img1_embedding = get_embedding(img1, model, mtcnn)\n img2_embedding = get_embedding(img2, model, mtcnn)\n\n img1_embedding_np = torch_to_np(img1_embedding)\n img2_embedding_np = torch_to_np(img2_embedding)\n\n if plot:\n fig = plt.figure()\n ax1 = fig.add_subplot(121)\n display_image(img1, ax1)\n ax2 = fig.add_subplot(122)\n display_image(img2, ax2)\n plt.show()\n\n if dist_fn == \"euclidean\":\n dist = distance.euclidean(img1_embedding_np, img2_embedding_np)\n\n elif dist_fn == \"cosine\":\n dist = distance.cosine(img1_embedding_np, img2_embedding_np)\n\n elif dist_fn == \"both\":\n dist1 = distance.euclidean(img1_embedding_np, img2_embedding_np)\n dist2 = distance.cosine(img1_embedding_np, img2_embedding_np)\n dist = np.array([dist1, dist2]).mean()\n\n if decision:\n return dist < 0.56, dist\n\n return dist", "_____no_output_____" ], [ "verify(img, img, model)", "_____no_output_____" ], [ "Union[IMAGE_TYPES]", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e71712fe99013f499cdb4d077dc7df41072ce383
89,778
ipynb
Jupyter Notebook
code/hiv_model.ipynb
mpatil99/ModSimPy
973812dfb871d83314f37dd37d7d4ebf86adc79b
[ "MIT" ]
null
null
null
code/hiv_model.ipynb
mpatil99/ModSimPy
973812dfb871d83314f37dd37d7d4ebf86adc79b
[ "MIT" ]
null
null
null
code/hiv_model.ipynb
mpatil99/ModSimPy
973812dfb871d83314f37dd37d7d4ebf86adc79b
[ "MIT" ]
null
null
null
226.712121
24,156
0.903551
[ [ [ "### Practice Notebook\n\nSuper Deadly dont touch\n\nBy Manu Patil", "_____no_output_____" ] ], [ [ "# Configure Jupyter so figures appear in the notebook\n%matplotlib inline\n\n# Configure Jupyter to display the assigned value after an assignment\n%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'\n\n# import functions from the modsim.py module\nfrom modsim import *", "_____no_output_____" ], [ "def make_system(Gamma, mu, tau, beta, rho, alpha, sigma, delta, pi,dt):\n \"\"\"Make a system object for the SIR model.\n \n beta: contact rate in days\n gamma: recovery rate in days\n \n returns: System object\n \"\"\"\n init = State(R = 200, L = 0, E = 0, V = 4e-7);\n\n t0 = 0\n t_end = 300\n \n return System(init=init, t0=t0, t_end=t_end,Gamma = Gamma, \n mu = mu, \n tau = tau, \n beta = beta, \n rho = rho, \n alpha = alpha, \n sigma = sigma, \n delta = delta, \n pi = pi, dt=dt)", "_____no_output_____" ], [ "def update_func(state, t, system):\n \"\"\"Update the RLEV model.\n \n state: State (R, L, E, V)\n t: time\n system: System object\n \n returns: State (RLEV)\n \"\"\"\n unpack(system)\n r,l,e,v = state;\n dr = ((Gamma * tau - mu * r - beta * r * v)*dt)\n dl = ((rho * beta * r * v- mu * l - alpha * l)*dt)\n de = (((1 - rho)*beta*r*v + alpha*l - delta*e)*dt)\n dv = ((pi*e - sigma *v)*dt)\n r += dr\n l += dl\n e += de\n v += dv \n\n return State(R = r,L = l,E = e,V = v);", "_____no_output_____" ], [ "def run_simulation(system, update_func):\n \"\"\"Runs a simulation of the system.\n \n system: System object\n update_func: function that updates state\n \n returns: TimeFrame\n \"\"\"\n unpack(system)\n frame = TimeFrame(columns=init.index)\n frame.row[0] = init\n \n for t in linrange(t0, t_end, dt):\n frame.row[t+dt] = update_func(frame.row[t], t, system)\n \n return frame", "_____no_output_____" ], [ "system = make_system(1.36, .00136, .2, .00027, .1, .036 , 2, .33, 100,1/24) #Given Parameters and dt set to 1/24 such that time is represented in hours\nframe = run_simulation(system, update_func);\nplot(frame.R, label = \"Lymphocytes\");\nplot(frame.L, label = \"Latently Infected\");\nplot(frame.E, label = \"Actively Infected\");\n\ndecorate(xlabel = \"time (hours)\", ylabel = \"Cells\");\n", "_____no_output_____" ], [ "plot(frame.V, label = \"virons\");\ndecorate(xlabel = \"time (hours)\", ylabel = \"Free Virons\");", "_____no_output_____" ], [ "# system = make_system(1.36, .00136, .2, .00027, .1, .036 , 2, .33, 100,1/24)\n# frame = run_simulation(system, update_func);\n# #print(frame)\n# fig1 = plt.figure()\n# ax1 = fig1.add_subplot(111)\n# ax1.set_yscale('log')\n# ax1.plot(frame.V, 'r')\n# ax1.set_ylabel(\"R(r)\")\n# ax1.set_xlabel(\"time (hours)\")\n# decorate();\n\n# \"\"\"\n# ax2 = ax1.twinx()\n# ax2.set_yscale('linear')\n# ax2.plot(frame.L, 'g')\n# ax2.plot(frame.E, 'b')\n# ax2.set_ylabel('L (g), E (b)')\n# decorate();\n# \"\"\"", "_____no_output_____" ], [ "def slope_func(state, t, system):\n \"\"\"Return slope of each of the state variables\n \n state: State (R, L, E, V)\n t: time\n system: System object\n \n returns: dr,dl,de,dv\n \"\"\"\n unpack(system);\n r,l,e,v = state;\n dr = ((Gamma * tau - mu * r - beta * r * v))\n dl = ((rho * beta * r * v- mu * l - alpha * l))\n de = (((1 - rho)*beta*r*v + alpha*l - delta*e))\n dv =(pi*e - sigma *v)\n \n\n return dr, dl,de,dv;", "_____no_output_____" ], [ "slope_func(init, 0, system)", "_____no_output_____" ], [ "results, details = run_ode_solver(system, slope_func)\ndetails", "_____no_output_____" ], [ "plot(frame.R, label = \"Lymphocytes\");\nplot(frame.L, label = \"Latently Infected\");\nplot(frame.E, label = \"Actively Infected\");\n\ndecorate(xlabel = \"time (hours)\", ylabel = \"Cells\");", "_____no_output_____" ], [ "plot(frame.V, label = \"virons\", );\ndecorate(xlabel = \"time (hours)\", ylabel = \"Free Virons\");", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7171d7cd64f936a8dbc9df9a4fe00c68393fa8d
20,293
ipynb
Jupyter Notebook
fe588/FE588 Take Home Exam 2016.ipynb
bkoyuncu/notes
0e660f46b7d17fdfddc2cad1bb60dcf847f5d1e4
[ "MIT" ]
191
2016-01-21T19:44:23.000Z
2022-03-25T20:50:50.000Z
fe588/FE588 Take Home Exam 2016.ipynb
onurboyar/notes
2ec14820af044c2cfbc99bc989338346572a5e24
[ "MIT" ]
2
2018-02-18T03:41:04.000Z
2018-11-21T11:08:49.000Z
fe588/FE588 Take Home Exam 2016.ipynb
onurboyar/notes
2ec14820af044c2cfbc99bc989338346572a5e24
[ "MIT" ]
138
2015-10-04T21:57:21.000Z
2021-06-15T19:35:55.000Z
35.048359
496
0.457448
[ [ [ "## Task\n\nIn this assignment, you will implement simple algorithmic trading policies and modify a very simple backtester.\n\nIn the following, you will find a Backtester1 function that gets as an input the historical price series of a single stock.\n\nAt each time interval, the backtester calls for a new order (by calling the placeOrder function), along with the current opening price. Depending upon the current position of the customer (number of owned stocks and current cash), the customer decides upon a new investment. This is currently a random decision, merely deciding the percentage of capital to leave on a stocks. Consequently, the PlaceOrder function will return one of the following orders, to realize the decided position:\n\n* Buy <number of stocks>\n* Sell <number of stocks>\n\nPlaceOrder assumes that stocks can be traded only as integer multiples.\n\n### Task 1\nModify the program to create plots of the total wealth over time, along with the value of the stocks and cash at each time point. \n\n### Task 2\n\nAdd the following type of orders that a customer can issue\n* AddCapital <amount>\n* WithdrawCapital <amount>\n\nThe backtester should always keep track of the position of the trader and make appropriate checks, such as only allowing buying stocks allowed by the current capital.\n\n### Task 3\nAdd an interest_rate such that at the beginning of each trading day, the cash earns a fixed interest. Also, include transaction costs (a fixed percentage of the trade) to be deduced from each transaction.\n\n### Task 4\n\nModify the system such that it allows for open selling, that is selling without actually owning any stock. At the end of the trading day, any open positions should be cleared by the closing price.\n\n### Task 5\nThink and implement a trading policy of your imagination, such as estimating the trend in the last few days, and coming up with a smarter decision than random. Compare your policy with the random policy in terms of earnings or losses.\n\n### Task 6\nModify the program such that you allow for pairs trading. Modify the backtester such that you input a pair of stocks. Now the generated investment decisions must be portfolio. Repeat task 5 for the pairs case.\n", "_____no_output_____" ], [ "## Read some example Data", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport pandas.io.data as web\n\nimport numpy as np\nimport datetime\n\nmsft = pd.read_csv(\"msft.csv\", index_col=0, parse_dates=True)\naapl = pd.read_csv(\"aapl.csv\", index_col=0, parse_dates=True)", "_____no_output_____" ], [ "msft['2012-01']", "_____no_output_____" ] ], [ [ "## A reference implementation", "_____no_output_____" ] ], [ [ "\nInitialCash = 1000\nPosition = {'Cash': InitialCash, 'Stocks': 0.0} \n\ndef DecideTargetPosition():\n ## Randomly decide a portfolio output percentage of capital to put into stocks\n return np.random.choice([0.0, 0.5, 1.0]) \n \ndef Capital(price):\n return Position['Cash'] + Position['Stocks']*price\n \ndef PlaceOrder(price):\n p = DecideTargetPosition()\n capital = Capital(price)\n \n numLots = np.floor(capital*p/price)\n \n TargetPosition = {'Cash': capital-numLots*price, 'Stocks': numLots}\n \n if TargetPosition['Stocks'] > Position['Stocks']:\n # Buy \n order = ('Buy', TargetPosition['Stocks']-Position['Stocks'])\n return order\n elif TargetPosition['Stocks'] < Position['Stocks']:\n # Sell\n order = ('Sell', -TargetPosition['Stocks']+Position['Stocks'])\n return order\n else:\n # Do nothing\n None\n \ndef UpdatePosition(deltaCash, deltaStock):\n Position['Cash'] += deltaCash\n Position['Stocks'] += deltaStock\n return\n\n\ndef BackTester1(series, interest_rate):\n \n openPrice = series['Open']\n closePrice = series['Close']\n \n for k in openPrice.keys():\n price = openPrice[k]\n order = PlaceOrder(price) \n \n if order is None:\n continue\n \n print order\n \n if order[0]=='Buy':\n deltaCash = -price*order[1]\n deltaStock = order[1]\n UpdatePosition(deltaCash, deltaStock)\n elif order[0]=='Sell':\n deltaCash = price*order[1]\n deltaStock = -order[1] \n UpdatePosition(deltaCash, deltaStock)\n else:\n None\n \n price = closePrice[k]\n print k, Capital(price)\n \nInitialCash = 1000\nPosition = {'Cash': InitialCash, 'Stocks': 0.0} \n\nBackTester1(msft['2012-01'], 0.05)", "('Buy', 18.0)\n2012-01-03 00:00:00 1003.960018\n('Buy', 19.0)\n2012-01-04 00:00:00 1026.320018\n('Sell', 19.0)\n2012-01-05 00:00:00 1030.979999\n('Buy', 19.0)\n2012-01-06 00:00:00 1049.740017\n('Sell', 37.0)\n2012-01-12 00:00:00 1040.860017\n('Buy', 18.0)\n2012-01-20 00:00:00 1056.879999\n('Sell', 18.0)\n2012-01-23 00:00:00 1053.999999\n('Buy', 17.0)\n2012-01-26 00:00:00 1052.129982\n('Sell', 17.0)\n2012-01-30 00:00:00 1043.119965\n('Buy', 35.0)\n2012-01-31 00:00:00 1038.57\n" ] ], [ [ "## A cleaner implementation with the use of class constructs", "_____no_output_____" ] ], [ [ "\nclass Customer:\n def __init__(self, cash=10000):\n self.Position = {'Cash': cash, 'Stocks': 0.0} \n\n def DecideTargetPosition(self):\n ## Randomly decide a portfolio output percentage of capital to put into stocks\n return np.random.choice([0.0, 0.5, 1.0]) \n \n def Capital(self, price):\n return self.Position['Cash'] + self.Position['Stocks']*price\n \n def PlaceOrder(self, price):\n p = self.DecideTargetPosition()\n capital = self.Capital(price)\n \n numLots = np.floor(capital*p/price)\n \n TargetPosition = {'Cash': capital-numLots*price, 'Stocks': numLots}\n \n if TargetPosition['Stocks']>self.Position['Stocks']:\n # Buy \n return ('Buy', TargetPosition['Stocks']-self.Position['Stocks'])\n elif TargetPosition['Stocks']<self.Position['Stocks']:\n # Sell\n return ('Sell', -TargetPosition['Stocks']+self.Position['Stocks'])\n else:\n # Do nothing\n None\n \n def GetPosition(self):\n return self.Position\n def UpdatePosition(self, deltaCash, deltaStock):\n self.Position['Cash'] += deltaCash\n self.Position['Stocks'] += deltaStock\n return\n\ndef BackTester(series, customer, interest_rate):\n \n openPrice = series['Open']\n closePrice = series['Close']\n \n for k in openPrice.keys():\n price = openPrice[k]\n order = customer.PlaceOrder(price) \n \n if order is None:\n continue\n \n print order\n \n if order[0]=='Buy':\n deltaCash = -price*order[1]\n deltaStock = order[1]\n customer.UpdatePosition(deltaCash, deltaStock)\n elif order[0]=='Sell':\n deltaCash = price*order[1]\n deltaStock = -order[1] \n customer.UpdatePosition(deltaCash, deltaStock)\n else:\n None\n \n price = closePrice[k]\n print k, customer.Capital(price)\n \nCash = 1000\ncust = Customer(cash=Cash)\n\nBackTester(msft['2012-01'], cust, 0.05)", "('Buy', 37.0)\n2012-01-03 00:00:00 1008.140037\n('Sell', 19.0)\n2012-01-04 00:00:00 1020.430037\n('Buy', 19.0)\n2012-01-05 00:00:00 1031.170056\n('Sell', 37.0)\n2012-01-06 00:00:00 1025.620093\n('Buy', 37.0)\n2012-01-11 00:00:00 1036.350056\n('Sell', 37.0)\n2012-01-12 00:00:00 1041.90013\n('Buy', 37.0)\n2012-01-13 00:00:00 1053.74013\n('Sell', 37.0)\n2012-01-17 00:00:00 1059.29013\n('Buy', 18.0)\n2012-01-18 00:00:00 1057.850148\n('Buy', 19.0)\n2012-01-20 00:00:00 1101.400111\n('Sell', 19.0)\n2012-01-23 00:00:00 1098.720129\n('Buy', 19.0)\n2012-01-25 00:00:00 1104.970092\n('Sell', 37.0)\n2012-01-26 00:00:00 1106.820166\n('Buy', 18.0)\n2012-01-27 00:00:00 1102.860148\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7173457839a8255b19a5791fb812925b8ee4e10
166,117
ipynb
Jupyter Notebook
ComparingSuboptimalPerformance.ipynb
andytaylor823/euchre-ML
691d5dba9a72af201e004308782c9c429dbeba51
[ "MIT" ]
null
null
null
ComparingSuboptimalPerformance.ipynb
andytaylor823/euchre-ML
691d5dba9a72af201e004308782c9c429dbeba51
[ "MIT" ]
null
null
null
ComparingSuboptimalPerformance.ipynb
andytaylor823/euchre-ML
691d5dba9a72af201e004308782c9c429dbeba51
[ "MIT" ]
null
null
null
43.41793
158
0.558642
[ [ [ "# Imports", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "import numpy as np\nimport os\nimport sys\nimport matplotlib.pyplot as plt\nimport pickle\nimport pandas as pd\npd.set_option('display.max_columns', 200)\n\nfrom card import Card\nfrom board import Board\nfrom player import Player\nfrom rule import Rule\nimport optimal_strategy as opt\nimport suboptimal_strategy as subopt\n\nfrom functools import partial\nimport multiprocessing\nimport time\nfrom multiprocessing.pool import ThreadPool\nimport threading\nfrom tqdm import notebook\nnotebook.tqdm.pandas()\n\nfrom euchre_lib import *\nfrom search_lib import *\nimport data_formatting as dtf", "_____no_output_____" ], [ "notebook.tqdm.get_lock().locks = []", "_____no_output_____" ], [ "from board import Board\nimport optimal_strategy as opt\nimport suboptimal_strategy as subopt\np0, p2 = opt.make_optimal_player(0), opt.make_optimal_player(2)\np1, p3 = subopt.make_BASIC_player(1), subopt.make_BASIC_player(3)\nboard = Board(p0=p0, p1=p1, p2=p2, p3=p3)\nboard.play_hand(show=True)", "_____no_output_____" ] ], [ [ "# Create optimal-vs-basic dataset", "_____no_output_____" ] ], [ [ "!rm -r /staging/fast/taylora/euchre/opt_vs_basic/*", "rm: cannot remove '/staging/fast/taylora/euchre/opt_vs_basic/*': No such file or directory\n" ], [ "if os.path.exists('/staging/'):\n outfolder = '/staging/fast/taylora/euchre/opt_vs_basic'\nelse:\n outfolder = 'opt_vs_basic'\nif not os.path.exists(outfolder): os.mkdir(outfolder)\np0, p2 = opt.make_optimal_player(0), opt.make_optimal_player(2)\np1, p3 = subopt.make_BASIC_player(1), subopt.make_BASIC_player(3)", "_____no_output_____" ], [ "def performance(id, n_epochs, n_hands, p0=None, p1=None, p2=None, p3=None, use_tqdm=False):\n iterable = range(n_epochs)\n if use_tqdm:\n print(' ', end='', flush=True)\n iterable = notebook.tqdm(iterable, desc='Worker '+str(id))\n \n for i in iterable:\n if not use_tqdm:\n print('Worker %i, epoch %i...' %(id, i))\n board = Board(p0=p0, p1=p1, p2=p2, p3=p3)\n for hand in range(n_hands):\n board.play_hand()\n board.writeout(folder=outfolder, keep_results=False, ROOT_DIR='/'.join(outfolder.split('/')[:-1]) if '/staging' in outfolder else os.getcwd())", "_____no_output_____" ], [ "n_epochs, n_hands = int(1e3), int(1e4)\nAVAILABLE_CPUS = 16-1\nparticular_performance_function = partial(performance, n_epochs=int(n_epochs/AVAILABLE_CPUS), n_hands=n_hands, p0=p0, p1=p1, p2=p2, p3=p3)", "_____no_output_____" ], [ "%%time\npool = multiprocessing.Pool(AVAILABLE_CPUS)\nfor worker in range(AVAILABLE_CPUS):\n pool.apply_async(particular_performance_function, args=(worker,))\npool.close()\npool.join()\nprint('Done!')", "Worker 0, epoch 0...Worker 1, epoch 0...Worker 2, epoch 0...Worker 4, epoch 0...\n\nWorker 3, epoch 0...\nWorker 5, epoch 0...\n\nWorker 9, epoch 0...Worker 7, epoch 0...Worker 11, epoch 0...Worker 10, epoch 0...Worker 8, epoch 0...\nWorker 13, epoch 0...\n\nWorker 12, epoch 0...\nWorker 6, epoch 0...\nWorker 14, epoch 0...\n\n\n\n\nWorker 2, epoch 1...\nWorker 5, epoch 1...\nWorker 14, epoch 1...\nWorker 4, epoch 1...\nWorker 0, epoch 1...\nWorker 11, epoch 1...\nWorker 12, epoch 1...\nWorker 7, epoch 1...\nWorker 8, epoch 1...\nWorker 9, epoch 1...\nWorker 6, epoch 1...\nWorker 1, epoch 1...\nWorker 10, epoch 1...Worker 13, epoch 1...\nWorker 3, epoch 1...\n\nWorker 0, epoch 2...\nWorker 4, epoch 2...\nWorker 14, epoch 2...\nWorker 5, epoch 2...\nWorker 9, epoch 2...Worker 12, epoch 2...Worker 11, epoch 2...\n\n\nWorker 8, epoch 2...Worker 7, epoch 2...\n\nWorker 1, epoch 2...\nWorker 6, epoch 2...\nWorker 10, epoch 2...\nWorker 2, epoch 2...\nWorker 13, epoch 2...\nWorker 3, epoch 2...\nWorker 0, epoch 3...\nWorker 14, epoch 3...\nWorker 5, epoch 3...\nWorker 4, epoch 3...\nWorker 11, epoch 3...\nWorker 8, epoch 3...\nWorker 9, epoch 3...\nWorker 12, epoch 3...\nWorker 6, epoch 3...\nWorker 10, epoch 3...\nWorker 13, epoch 3...\nWorker 7, epoch 3...\nWorker 1, epoch 3...\nWorker 2, epoch 3...\nWorker 3, epoch 3...\nWorker 5, epoch 4...\nWorker 0, epoch 4...\nWorker 4, epoch 4...\nWorker 14, epoch 4...\nWorker 13, epoch 4...\nWorker 8, epoch 4...\nWorker 11, epoch 4...\nWorker 12, epoch 4...\nWorker 9, epoch 4...\nWorker 6, epoch 4...\nWorker 10, epoch 4...\nWorker 7, epoch 4...Worker 1, epoch 4...\n\nWorker 4, epoch 5...\nWorker 2, epoch 4...\nWorker 0, epoch 5...\nWorker 14, epoch 5...\nWorker 5, epoch 5...\nWorker 3, epoch 4...\nWorker 13, epoch 5...\nWorker 8, epoch 5...\nWorker 11, epoch 5...\nWorker 6, epoch 5...\nWorker 9, epoch 5...\nWorker 12, epoch 5...\nWorker 10, epoch 5...\nWorker 7, epoch 5...\nWorker 1, epoch 5...\nWorker 4, epoch 6...\nWorker 0, epoch 6...\nWorker 5, epoch 6...\nWorker 14, epoch 6...\nWorker 2, epoch 5...\nWorker 3, epoch 5...\nWorker 13, epoch 6...\nWorker 8, epoch 6...Worker 11, epoch 6...\n\nWorker 6, epoch 6...\nWorker 9, epoch 6...\nWorker 12, epoch 6...\nWorker 10, epoch 6...\nWorker 0, epoch 7...\nWorker 7, epoch 6...\nWorker 4, epoch 7...\nWorker 14, epoch 7...\nWorker 2, epoch 6...\nWorker 1, epoch 6...\nWorker 5, epoch 7...\nWorker 3, epoch 6...\nWorker 13, epoch 7...\nWorker 0, epoch 8...\nWorker 11, epoch 7...\nWorker 8, epoch 7...\nWorker 9, epoch 7...\nWorker 6, epoch 7...\nWorker 12, epoch 7...\nWorker 10, epoch 7...\nWorker 14, epoch 8...\nWorker 4, epoch 8...\nWorker 7, epoch 7...\nWorker 2, epoch 7...\nWorker 1, epoch 7...\nWorker 5, epoch 8...\nWorker 13, epoch 8...\nWorker 3, epoch 7...\nWorker 1, epoch 8...\nWorker 0, epoch 9...\nWorker 9, epoch 8...\nWorker 8, epoch 8...\nWorker 11, epoch 8...\nWorker 4, epoch 9...Worker 6, epoch 8...\n\nWorker 14, epoch 9...\nWorker 12, epoch 8...\nWorker 2, epoch 8...\nWorker 10, epoch 8...\nWorker 13, epoch 9...\nWorker 7, epoch 8...\nWorker 5, epoch 9...\nWorker 3, epoch 8...\nWorker 1, epoch 9...\nWorker 0, epoch 10...\nWorker 14, epoch 10...\nWorker 2, epoch 9...\nWorker 9, epoch 9...Worker 8, epoch 9...\n\nWorker 11, epoch 9...\nWorker 6, epoch 9...\nWorker 12, epoch 9...\nWorker 4, epoch 10...\nWorker 10, epoch 9...\nWorker 13, epoch 10...\nWorker 7, epoch 9...\nWorker 5, epoch 10...\nWorker 0, epoch 11...\nWorker 3, epoch 9...\nWorker 1, epoch 10...\nWorker 14, epoch 11...\nWorker 2, epoch 10...\nWorker 8, epoch 10...\nWorker 6, epoch 10...\nWorker 12, epoch 10...\nWorker 10, epoch 10...\nWorker 11, epoch 10...\nWorker 4, epoch 11...\nWorker 13, epoch 11...\nWorker 9, epoch 10...\nWorker 7, epoch 10...\nWorker 5, epoch 11...\nWorker 0, epoch 12...\nWorker 2, epoch 11...\nWorker 14, epoch 12...\nWorker 6, epoch 11...\nWorker 8, epoch 11...\nWorker 10, epoch 11...\nWorker 12, epoch 11...\nWorker 3, epoch 10...\nWorker 11, epoch 11...\nWorker 13, epoch 12...\nWorker 1, epoch 11...\nWorker 4, epoch 12...\nWorker 7, epoch 11...\nWorker 9, epoch 11...\nWorker 0, epoch 13...\nWorker 5, epoch 12...\nWorker 2, epoch 12...\nWorker 14, epoch 13...\nWorker 13, epoch 13...\nWorker 6, epoch 12...\nWorker 8, epoch 12...\nWorker 10, epoch 12...\nWorker 12, epoch 12...\nWorker 7, epoch 12...\nWorker 11, epoch 12...\nWorker 3, epoch 11...\nWorker 1, epoch 12...\nWorker 4, epoch 13...\nWorker 9, epoch 12...\nWorker 0, epoch 14...\nWorker 2, epoch 13...\nWorker 14, epoch 14...\nWorker 5, epoch 13...\nWorker 13, epoch 14...\nWorker 9, epoch 13...\nWorker 6, epoch 13...\nWorker 7, epoch 13...\nWorker 8, epoch 13...\nWorker 10, epoch 13...\nWorker 12, epoch 13...\nWorker 1, epoch 13...\nWorker 0, epoch 15...\nWorker 2, epoch 14...\nWorker 14, epoch 15...\nWorker 9, epoch 14...\nWorker 11, epoch 13...\nWorker 5, epoch 14...\nWorker 3, epoch 12...\nWorker 4, epoch 14...\nWorker 13, epoch 15...\nWorker 7, epoch 14...\nWorker 6, epoch 14...\nWorker 8, epoch 14...\nWorker 10, epoch 14...\nWorker 12, epoch 14...\nWorker 1, epoch 14...\nWorker 0, epoch 16...\nWorker 2, epoch 15...\nWorker 14, epoch 16...\nWorker 11, epoch 14...\nWorker 9, epoch 15...\nWorker 5, epoch 15...\nWorker 3, epoch 13...\nWorker 4, epoch 15...\nWorker 13, epoch 16...\nWorker 7, epoch 15...\nWorker 6, epoch 15...\nWorker 8, epoch 15...\nWorker 10, epoch 15...\nWorker 12, epoch 15...\nWorker 0, epoch 17...\nWorker 1, epoch 15...\nWorker 2, epoch 16...\nWorker 11, epoch 15...\nWorker 14, epoch 17...\nWorker 13, epoch 17...\nWorker 9, epoch 16...\nWorker 5, epoch 16...\nWorker 3, epoch 14...\nWorker 4, epoch 16...\nWorker 7, epoch 16...\nWorker 6, epoch 16...\nWorker 0, epoch 18...\nWorker 8, epoch 16...\nWorker 10, epoch 16...\nWorker 12, epoch 16...\nWorker 2, epoch 17...\nWorker 1, epoch 16...\nWorker 11, epoch 16...\nWorker 13, epoch 18...\nWorker 14, epoch 18...\nWorker 9, epoch 17...\nWorker 7, epoch 17...\nWorker 5, epoch 17...\nWorker 3, epoch 15...\nWorker 4, epoch 17...\nWorker 0, epoch 19...\nWorker 6, epoch 17...\nWorker 2, epoch 18...\nWorker 8, epoch 17...\nWorker 10, epoch 17...\nWorker 12, epoch 17...\nWorker 1, epoch 17...\nWorker 11, epoch 17...\nWorker 13, epoch 19...\nWorker 7, epoch 18...\nWorker 14, epoch 19...\nWorker 9, epoch 18...\nWorker 5, epoch 18...\nWorker 3, epoch 16...\nWorker 4, epoch 18...\nWorker 0, epoch 20...\nWorker 2, epoch 19...\nWorker 6, epoch 18...\nWorker 8, epoch 18...\nWorker 10, epoch 18...\nWorker 12, epoch 18...\nWorker 11, epoch 18...\nWorker 1, epoch 18...\nWorker 13, epoch 20...\nWorker 7, epoch 19...\nWorker 5, epoch 19...\nWorker 9, epoch 19...\nWorker 14, epoch 20...\nWorker 3, epoch 17...\nWorker 0, epoch 21...\nWorker 2, epoch 20...\nWorker 6, epoch 19...\nWorker 8, epoch 19...\nWorker 10, epoch 19...\nWorker 11, epoch 19...\nWorker 4, epoch 19...\nWorker 1, epoch 19...\nWorker 13, epoch 21...\nWorker 7, epoch 20...\nWorker 5, epoch 20...\nWorker 12, epoch 19...\nWorker 0, epoch 22...\nWorker 9, epoch 20...\nWorker 3, epoch 18...\nWorker 2, epoch 21...\nWorker 6, epoch 20...\nWorker 8, epoch 20...\nWorker 10, epoch 20...\nWorker 11, epoch 20...\nWorker 1, epoch 20...\nWorker 13, epoch 22...\nWorker 14, epoch 21...\nWorker 7, epoch 21...\nWorker 5, epoch 21...\nWorker 4, epoch 20...\nWorker 0, epoch 23...\nWorker 2, epoch 22...\nWorker 8, epoch 21...\nWorker 6, epoch 21...\nWorker 10, epoch 21...\nWorker 9, epoch 21...\nWorker 11, epoch 21...\nWorker 1, epoch 21...\nWorker 12, epoch 20...Worker 3, epoch 19...\n\nWorker 13, epoch 23...\nWorker 7, epoch 22...\nWorker 0, epoch 24...\nWorker 14, epoch 22...\nWorker 2, epoch 23...\nWorker 5, epoch 22...\nWorker 4, epoch 21...\nWorker 8, epoch 22...\nWorker 11, epoch 22...\nWorker 10, epoch 22...\nWorker 6, epoch 22...\nWorker 9, epoch 22...\nWorker 1, epoch 22...\nWorker 13, epoch 24...\nWorker 12, epoch 21...\nWorker 3, epoch 20...\nWorker 7, epoch 23...\nWorker 0, epoch 25...\nWorker 2, epoch 24...\nWorker 14, epoch 23...\nWorker 5, epoch 23...\nWorker 4, epoch 22...\nWorker 9, epoch 23...\nWorker 8, epoch 23...\nWorker 11, epoch 23...\nWorker 6, epoch 23...\nWorker 10, epoch 23...\nWorker 13, epoch 25...\nWorker 3, epoch 21...\nWorker 1, epoch 23...\nWorker 12, epoch 22...\nWorker 7, epoch 24...\nWorker 0, epoch 26...\nWorker 2, epoch 25...\nWorker 14, epoch 24...\nWorker 4, epoch 23...\nWorker 5, epoch 24...\nWorker 8, epoch 24...\nWorker 10, epoch 24...\nWorker 13, epoch 26...\nWorker 3, epoch 22...\nWorker 1, epoch 24...\nWorker 12, epoch 23...\nWorker 9, epoch 24...\nWorker 11, epoch 24...\nWorker 6, epoch 24...\nWorker 2, epoch 26...\nWorker 7, epoch 25...\nWorker 14, epoch 25...\nWorker 4, epoch 24...\nWorker 8, epoch 25...\nWorker 0, epoch 27...\nWorker 10, epoch 25...\nWorker 5, epoch 25...\nWorker 13, epoch 27...\nWorker 12, epoch 24...\nWorker 3, epoch 23...\nWorker 1, epoch 25...\nWorker 2, epoch 27...\nWorker 14, epoch 26...\nWorker 4, epoch 25...\nWorker 8, epoch 26...\nWorker 13, epoch 28...\nWorker 10, epoch 26...\nWorker 11, epoch 25...\nWorker 3, epoch 24...\nWorker 9, epoch 25...\nWorker 7, epoch 26...\nWorker 12, epoch 25...\nWorker 6, epoch 25...\nWorker 5, epoch 26...\nWorker 1, epoch 26...\nWorker 0, epoch 28...\nWorker 2, epoch 28...\nWorker 14, epoch 27...\nWorker 4, epoch 26...\nWorker 10, epoch 27...\nWorker 13, epoch 29...\nWorker 12, epoch 26...\nWorker 3, epoch 25...\nWorker 9, epoch 26...Worker 8, epoch 27...\n\nWorker 11, epoch 26...\nWorker 1, epoch 27...\nWorker 7, epoch 27...\nWorker 6, epoch 26...\nWorker 5, epoch 27...\nWorker 14, epoch 28...\nWorker 0, epoch 29...\nWorker 2, epoch 29...\nWorker 10, epoch 28...\nWorker 4, epoch 27...\nWorker 12, epoch 27...\nWorker 13, epoch 30...\nWorker 3, epoch 26...\nWorker 9, epoch 27...\nWorker 1, epoch 28...\nWorker 11, epoch 27...\nWorker 8, epoch 28...\nWorker 7, epoch 28...\nWorker 6, epoch 27...\nWorker 14, epoch 29...\nWorker 2, epoch 30...\nWorker 0, epoch 30...\nWorker 5, epoch 28...\nWorker 4, epoch 28...Worker 10, epoch 29...\n\nWorker 1, epoch 29...\nWorker 13, epoch 31...\nWorker 3, epoch 27...\nWorker 9, epoch 28...\nWorker 7, epoch 29...\nWorker 6, epoch 28...\nWorker 12, epoch 28...\nWorker 8, epoch 29...\nWorker 14, epoch 30...\nWorker 11, epoch 28...\nWorker 0, epoch 31...\nWorker 5, epoch 29...\nWorker 2, epoch 31...\nWorker 1, epoch 30...\nWorker 4, epoch 29...\nWorker 10, epoch 30...\nWorker 7, epoch 30...\nWorker 13, epoch 32...\nWorker 3, epoch 28...\nWorker 9, epoch 29...\nWorker 0, epoch 32...Worker 14, epoch 31...\n\nWorker 6, epoch 29...\nWorker 12, epoch 29...\nWorker 8, epoch 30...\nWorker 5, epoch 30...\nWorker 11, epoch 29...\nWorker 2, epoch 32...\nWorker 1, epoch 31...\nWorker 0, epoch 33...\nWorker 4, epoch 30...\nWorker 10, epoch 31...\nWorker 7, epoch 31...\nWorker 13, epoch 33...\nWorker 3, epoch 29...\nWorker 14, epoch 32...\nWorker 9, epoch 30...\nWorker 8, epoch 31...\nWorker 5, epoch 31...\nWorker 11, epoch 30...\nWorker 6, epoch 30...\nWorker 1, epoch 32...\nWorker 2, epoch 33...\nWorker 12, epoch 30...\nWorker 4, epoch 31...\nWorker 10, epoch 32...\nWorker 14, epoch 33...\nWorker 7, epoch 32...\nWorker 13, epoch 34...\nWorker 3, epoch 30...\nWorker 9, epoch 31...\nWorker 0, epoch 34...\nWorker 5, epoch 32...\nWorker 11, epoch 31...\nWorker 6, epoch 31...\nWorker 12, epoch 31...\nWorker 2, epoch 34...\nWorker 1, epoch 33...\nWorker 8, epoch 32...\nWorker 4, epoch 32...\nWorker 10, epoch 33...\nWorker 5, epoch 33...\nWorker 7, epoch 33...\nWorker 13, epoch 35...\nWorker 6, epoch 32...\nWorker 3, epoch 31...\nWorker 9, epoch 32...\nWorker 14, epoch 34...\nWorker 0, epoch 35...\nWorker 12, epoch 32...\nWorker 11, epoch 32...\nWorker 2, epoch 35...\nWorker 8, epoch 33...\nWorker 1, epoch 34...\nWorker 4, epoch 33...\nWorker 10, epoch 34...\nWorker 5, epoch 34...\nWorker 7, epoch 34...\nWorker 6, epoch 33...\nWorker 13, epoch 36...\nWorker 3, epoch 32...\nWorker 9, epoch 33...\nWorker 12, epoch 33...\nWorker 0, epoch 36...\nWorker 14, epoch 35...\nWorker 8, epoch 34...\nWorker 11, epoch 33...\nWorker 5, epoch 35...\nWorker 2, epoch 36...\nWorker 6, epoch 34...\nWorker 4, epoch 34...\nWorker 10, epoch 35...\nWorker 1, epoch 35...\nWorker 13, epoch 37...\nWorker 12, epoch 34...\nWorker 7, epoch 35...\nWorker 3, epoch 33...\nWorker 9, epoch 34...\nWorker 0, epoch 37...\nWorker 11, epoch 34...\nWorker 5, epoch 36...\nWorker 6, epoch 35...\nWorker 8, epoch 35...\nWorker 2, epoch 37...\nWorker 14, epoch 36...\nWorker 4, epoch 35...\nWorker 10, epoch 36...\nWorker 12, epoch 35...\nWorker 1, epoch 36...\nWorker 13, epoch 38...\nWorker 3, epoch 34...\nWorker 9, epoch 35...\nWorker 5, epoch 37...\nWorker 11, epoch 35...\nWorker 6, epoch 36...\nWorker 8, epoch 36...\nWorker 7, epoch 36...\nWorker 0, epoch 38...\nWorker 2, epoch 38...\nWorker 4, epoch 36...\nWorker 12, epoch 36...\nWorker 10, epoch 37...\nWorker 3, epoch 35...\nWorker 9, epoch 36...\nWorker 13, epoch 39...\nWorker 5, epoch 38...\nWorker 14, epoch 37...\nWorker 6, epoch 37...\nWorker 11, epoch 36...\nWorker 8, epoch 37...\nWorker 0, epoch 39...\nWorker 1, epoch 37...\nWorker 2, epoch 39...\nWorker 12, epoch 37...\nWorker 4, epoch 37...\nWorker 10, epoch 38...\nWorker 3, epoch 36...\nWorker 9, epoch 37...\nWorker 5, epoch 39...\nWorker 7, epoch 37...\nWorker 6, epoch 38...\nWorker 13, epoch 40...\nWorker 0, epoch 40...\nWorker 1, epoch 38...\nWorker 8, epoch 38...\nWorker 14, epoch 38...\nWorker 12, epoch 38...\nWorker 2, epoch 40...\nWorker 11, epoch 37...\nWorker 4, epoch 38...\nWorker 10, epoch 39...\nWorker 5, epoch 40...\nWorker 3, epoch 37...\nWorker 9, epoch 38...\nWorker 6, epoch 39...\nWorker 1, epoch 39...\nWorker 0, epoch 41...\nWorker 14, epoch 39...\nWorker 12, epoch 39...\nWorker 7, epoch 38...\nWorker 13, epoch 41...\nWorker 8, epoch 39...\nWorker 2, epoch 41...\nWorker 10, epoch 40...\nWorker 4, epoch 39...\nWorker 11, epoch 38...\nWorker 6, epoch 40...\nWorker 3, epoch 38...\nWorker 9, epoch 39...\nWorker 0, epoch 42...\nWorker 5, epoch 41...\nWorker 14, epoch 40...\nWorker 12, epoch 40...\nWorker 13, epoch 42...\nWorker 8, epoch 40...\nWorker 2, epoch 42...\nWorker 1, epoch 40...\nWorker 10, epoch 41...\nWorker 4, epoch 40...\nWorker 11, epoch 39...\nWorker 6, epoch 41...\nWorker 3, epoch 39...\nWorker 9, epoch 40...\nWorker 7, epoch 39...\nWorker 0, epoch 43...\nWorker 14, epoch 41...\nWorker 12, epoch 41...\nWorker 13, epoch 43...\nWorker 8, epoch 41...\nWorker 2, epoch 43...\nWorker 10, epoch 42...\nWorker 6, epoch 42...\nWorker 9, epoch 41...\nWorker 5, epoch 42...\nWorker 3, epoch 40...\nWorker 4, epoch 41...\nWorker 11, epoch 40...\nWorker 0, epoch 44...\nWorker 14, epoch 42...\nWorker 12, epoch 42...\nWorker 13, epoch 44...\nWorker 8, epoch 42...\nWorker 1, epoch 41...\nWorker 2, epoch 44...\nWorker 7, epoch 40...\nWorker 10, epoch 43...\nWorker 6, epoch 43...\nWorker 9, epoch 42...\nWorker 3, epoch 41...\nWorker 14, epoch 43...\nWorker 11, epoch 41...\nWorker 13, epoch 45...\nWorker 12, epoch 43...\nWorker 5, epoch 43...\nWorker 0, epoch 45...\nWorker 8, epoch 43...\nWorker 2, epoch 45...\nWorker 4, epoch 42...\nWorker 7, epoch 41...\nWorker 10, epoch 44...\nWorker 1, epoch 42...\nWorker 9, epoch 43...\nWorker 3, epoch 42...\nWorker 6, epoch 44...\nWorker 11, epoch 42...\nWorker 13, epoch 46...\nWorker 5, epoch 44...\nWorker 14, epoch 44...\nWorker 8, epoch 44...\nWorker 12, epoch 44...\nWorker 2, epoch 46...\nWorker 4, epoch 43...\nWorker 7, epoch 42...\nWorker 0, epoch 46...\nWorker 10, epoch 45...\nWorker 1, epoch 43...\nWorker 9, epoch 44...\nWorker 3, epoch 43...\nWorker 11, epoch 43...\nWorker 13, epoch 47...\nWorker 5, epoch 45...\nWorker 6, epoch 45...\nWorker 8, epoch 45...\nWorker 4, epoch 44...\nWorker 12, epoch 45...\nWorker 7, epoch 43...\nWorker 2, epoch 47...\nWorker 14, epoch 45...\nWorker 10, epoch 46...\nWorker 9, epoch 45...\nWorker 3, epoch 44...\nWorker 0, epoch 47...\nWorker 11, epoch 44...\nWorker 13, epoch 48...\nWorker 1, epoch 44...\nWorker 6, epoch 46...\nWorker 5, epoch 46...\nWorker 4, epoch 45...\nWorker 7, epoch 44...\nWorker 8, epoch 46...\nWorker 12, epoch 46...\nWorker 2, epoch 48...\nWorker 11, epoch 45...\nWorker 9, epoch 46...\nWorker 13, epoch 49...\nWorker 3, epoch 45...\nWorker 1, epoch 45...\nWorker 6, epoch 47...\nWorker 4, epoch 46...\nWorker 7, epoch 45...\nWorker 8, epoch 47...\nWorker 12, epoch 47...\nWorker 2, epoch 49...\nWorker 14, epoch 46...\nWorker 5, epoch 47...\nWorker 10, epoch 47...\nWorker 0, epoch 48...\nWorker 11, epoch 46...\nWorker 13, epoch 50...\nWorker 9, epoch 47...\nWorker 3, epoch 46...\nWorker 1, epoch 46...\nWorker 4, epoch 47...\nWorker 7, epoch 46...\nWorker 6, epoch 48...\nWorker 8, epoch 48...\nWorker 12, epoch 48...\nWorker 2, epoch 50...\nWorker 5, epoch 48...\nWorker 11, epoch 47...\nWorker 9, epoch 48...Worker 13, epoch 51...\n\nWorker 3, epoch 47...\nWorker 14, epoch 47...\nWorker 10, epoch 48...\nWorker 4, epoch 48...\nWorker 7, epoch 47...\nWorker 8, epoch 49...\nWorker 1, epoch 47...\nWorker 2, epoch 51...\nWorker 12, epoch 49...\nWorker 0, epoch 49...\nWorker 6, epoch 49...\nWorker 5, epoch 49...\nWorker 11, epoch 48...\nWorker 9, epoch 49...\nWorker 13, epoch 52...\nWorker 3, epoch 48...\nWorker 4, epoch 49...\nWorker 7, epoch 48...\nWorker 10, epoch 49...\nWorker 14, epoch 48...\nWorker 8, epoch 50...\nWorker 12, epoch 50...\nWorker 2, epoch 52...\nWorker 5, epoch 50...\nWorker 9, epoch 50...\nWorker 3, epoch 49...\nWorker 11, epoch 49...\nWorker 13, epoch 53...\nWorker 1, epoch 48...\nWorker 6, epoch 50...\nWorker 0, epoch 50...\nWorker 4, epoch 50...\nWorker 7, epoch 49...\nWorker 10, epoch 50...\nWorker 14, epoch 49...Worker 8, epoch 51...\n\nWorker 2, epoch 53...\nWorker 12, epoch 51...\nWorker 5, epoch 51...\nWorker 9, epoch 51...\nWorker 3, epoch 50...\nWorker 6, epoch 51...\nWorker 13, epoch 54...\nWorker 0, epoch 51...\nWorker 4, epoch 51...\nWorker 10, epoch 51...\nWorker 8, epoch 52...\nWorker 12, epoch 52...\nWorker 11, epoch 50...\nWorker 14, epoch 50...\nWorker 1, epoch 49...\nWorker 2, epoch 54...\nWorker 7, epoch 50...\nWorker 9, epoch 52...\nWorker 3, epoch 51...\nWorker 6, epoch 52...\nWorker 5, epoch 52...\nWorker 0, epoch 52...\nWorker 4, epoch 52...\nWorker 10, epoch 52...\nWorker 8, epoch 53...\nWorker 12, epoch 53...\nWorker 11, epoch 51...Worker 2, epoch 55...\n\nWorker 1, epoch 50...\nWorker 13, epoch 55...\nWorker 9, epoch 53...\nWorker 3, epoch 52...\nWorker 6, epoch 53...\nWorker 14, epoch 51...\nWorker 7, epoch 51...\nWorker 5, epoch 53...\nWorker 0, epoch 53...\nWorker 4, epoch 53...\nWorker 10, epoch 53...\nWorker 8, epoch 54...\nWorker 12, epoch 54...\nWorker 11, epoch 52...\nWorker 2, epoch 56...\nWorker 6, epoch 54...\nWorker 3, epoch 53...\nWorker 9, epoch 54...\nWorker 13, epoch 56...\nWorker 4, epoch 54...\nWorker 2, epoch 57...\nWorker 0, epoch 54...\nWorker 1, epoch 51...\nWorker 8, epoch 55...\nWorker 12, epoch 55...\nWorker 14, epoch 52...\nWorker 5, epoch 54...\nWorker 7, epoch 52...\nWorker 6, epoch 55...\nWorker 10, epoch 54...\nWorker 3, epoch 54...\nWorker 9, epoch 55...\nWorker 11, epoch 53...\nWorker 13, epoch 57...\nWorker 4, epoch 55...\nWorker 0, epoch 55...\nWorker 2, epoch 58...\nWorker 8, epoch 56...\nWorker 12, epoch 56...\nWorker 6, epoch 56...\nWorker 7, epoch 53...\nWorker 5, epoch 55...\nWorker 3, epoch 55...\nWorker 10, epoch 55...\nWorker 9, epoch 56...\nWorker 13, epoch 58...\nWorker 1, epoch 52...\nWorker 4, epoch 56...\nWorker 0, epoch 56...\nWorker 14, epoch 53...\nWorker 2, epoch 59...\nWorker 8, epoch 57...\nWorker 12, epoch 57...\nWorker 11, epoch 54...\nWorker 6, epoch 57...\nWorker 7, epoch 54...\nWorker 3, epoch 56...\nWorker 5, epoch 56...\nWorker 13, epoch 59...\nWorker 4, epoch 57...\nWorker 9, epoch 57...\nWorker 14, epoch 54...\nWorker 0, epoch 57...\nWorker 2, epoch 60...\nWorker 8, epoch 58...\nWorker 12, epoch 58...\nWorker 7, epoch 55...\nWorker 10, epoch 56...\nWorker 1, epoch 53...\nWorker 6, epoch 58...\nWorker 3, epoch 57...\nWorker 5, epoch 57...\nWorker 13, epoch 60...\nWorker 4, epoch 58...\nWorker 14, epoch 55...\nWorker 0, epoch 58...\nWorker 9, epoch 58...\nWorker 11, epoch 55...\nWorker 2, epoch 61...\nWorker 8, epoch 59...\nWorker 12, epoch 59...\nWorker 7, epoch 56...\nWorker 6, epoch 59...\nWorker 3, epoch 58...\nWorker 13, epoch 61...\nWorker 4, epoch 59...\nWorker 10, epoch 57...\nWorker 14, epoch 56...\nWorker 1, epoch 54...\nWorker 0, epoch 59...\nWorker 2, epoch 62...\nWorker 8, epoch 60...\nWorker 12, epoch 60...\nWorker 5, epoch 58...\nWorker 9, epoch 59...\nWorker 6, epoch 60...\nWorker 3, epoch 59...\nWorker 7, epoch 57...\nWorker 11, epoch 56...\nWorker 13, epoch 62...\nWorker 4, epoch 60...\nWorker 1, epoch 55...\nWorker 0, epoch 60...\nWorker 14, epoch 57...\nWorker 2, epoch 63...\nWorker 8, epoch 61...\nWorker 12, epoch 61...\nWorker 10, epoch 58...\nWorker 9, epoch 60...\nWorker 6, epoch 61...\nWorker 7, epoch 58...\nWorker 5, epoch 59...\nWorker 13, epoch 63...\nWorker 3, epoch 60...\nWorker 4, epoch 61...\nWorker 11, epoch 57...\nWorker 1, epoch 56...\nWorker 0, epoch 61...\nWorker 2, epoch 64...Worker 14, epoch 58...\n\nWorker 8, epoch 62...\nWorker 12, epoch 62...\nWorker 9, epoch 61...\nWorker 10, epoch 59...\nWorker 6, epoch 62...\nWorker 7, epoch 59...\nWorker 13, epoch 64...\nWorker 5, epoch 60...\nWorker 4, epoch 62...\nWorker 3, epoch 61...\nWorker 11, epoch 58...\nWorker 1, epoch 57...\nWorker 0, epoch 62...\nWorker 2, epoch 65...\nWorker 14, epoch 59...\nWorker 8, epoch 63...\nWorker 9, epoch 62...\nWorker 12, epoch 63...\nWorker 6, epoch 63...\nWorker 7, epoch 60...\nWorker 10, epoch 60...\nWorker 13, epoch 65...\nWorker 4, epoch 63...\nWorker 5, epoch 61...\nWorker 3, epoch 62...\nWorker 11, epoch 59...\nWorker 1, epoch 58...\nWorker 0, epoch 63...\nWorker 14, epoch 60...\nWorker 9, epoch 63...\nWorker 8, epoch 64...\nWorker 6, epoch 64...\nWorker 12, epoch 64...\nWorker 4, epoch 64...\nWorker 10, epoch 61...\nWorker 7, epoch 61...\nWorker 3, epoch 63...\nWorker 5, epoch 62...\nWorker 9, epoch 64...\nWorker 6, epoch 65...\nWorker 0, epoch 64...\nWorker 4, epoch 65...\nWorker 8, epoch 65...\nWorker 1, epoch 59...\nWorker 12, epoch 65...Worker 11, epoch 60...\n\nWorker 14, epoch 61...\nWorker 10, epoch 62...\nWorker 9, epoch 65...\nWorker 3, epoch 64...\nWorker 7, epoch 62...\nWorker 0, epoch 65...\nWorker 5, epoch 63...\nWorker 1, epoch 60...\nWorker 10, epoch 63...\nWorker 14, epoch 62...\nWorker 11, epoch 61...\nWorker 3, epoch 65...\nWorker 7, epoch 63...\nWorker 1, epoch 61...\nWorker 10, epoch 64...\nWorker 14, epoch 63...\nWorker 7, epoch 64...\nWorker 5, epoch 64...\nWorker 11, epoch 62...\nWorker 1, epoch 62...\nWorker 10, epoch 65...\nWorker 14, epoch 64...\nWorker 7, epoch 65...\nWorker 5, epoch 65...\nWorker 1, epoch 63...\nWorker 14, epoch 65...\nWorker 11, epoch 63...\nWorker 1, epoch 64...\nWorker 11, epoch 64...\nWorker 1, epoch 65...\nWorker 11, epoch 65...\nDone!\nCPU times: user 2.02 s, sys: 466 ms, total: 2.49 s\nWall time: 36min 41s\n" ], [ "print('Done!')", "Done!\n" ], [ "!ls /staging/fast/taylora/euchre", "RLdataset old_thresholds opt_vs_basic tfrecords\n" ], [ "!ls /staging/fast/taylora/euchre/opt_vs_basic", "10000_hands_000.csv 10000_hands_329.csv 10000_hands_658.csv\n10000_hands_001.csv 10000_hands_330.csv 10000_hands_659.csv\n10000_hands_002.csv 10000_hands_331.csv 10000_hands_660.csv\n10000_hands_003.csv 10000_hands_332.csv 10000_hands_661.csv\n10000_hands_004.csv 10000_hands_333.csv 10000_hands_662.csv\n10000_hands_005.csv 10000_hands_334.csv 10000_hands_663.csv\n10000_hands_006.csv 10000_hands_335.csv 10000_hands_664.csv\n10000_hands_007.csv 10000_hands_336.csv 10000_hands_665.csv\n10000_hands_008.csv 10000_hands_337.csv 10000_hands_666.csv\n10000_hands_009.csv 10000_hands_338.csv 10000_hands_667.csv\n10000_hands_010.csv 10000_hands_339.csv 10000_hands_668.csv\n10000_hands_011.csv 10000_hands_340.csv 10000_hands_669.csv\n10000_hands_012.csv 10000_hands_341.csv 10000_hands_670.csv\n10000_hands_013.csv 10000_hands_342.csv 10000_hands_671.csv\n10000_hands_014.csv 10000_hands_343.csv 10000_hands_672.csv\n10000_hands_015.csv 10000_hands_344.csv 10000_hands_673.csv\n10000_hands_016.csv 10000_hands_345.csv 10000_hands_674.csv\n10000_hands_017.csv 10000_hands_346.csv 10000_hands_675.csv\n10000_hands_018.csv 10000_hands_347.csv 10000_hands_676.csv\n10000_hands_019.csv 10000_hands_348.csv 10000_hands_677.csv\n10000_hands_020.csv 10000_hands_349.csv 10000_hands_678.csv\n10000_hands_021.csv 10000_hands_350.csv 10000_hands_679.csv\n10000_hands_022.csv 10000_hands_351.csv 10000_hands_680.csv\n10000_hands_023.csv 10000_hands_352.csv 10000_hands_681.csv\n10000_hands_024.csv 10000_hands_353.csv 10000_hands_682.csv\n10000_hands_025.csv 10000_hands_354.csv 10000_hands_683.csv\n10000_hands_026.csv 10000_hands_355.csv 10000_hands_684.csv\n10000_hands_027.csv 10000_hands_356.csv 10000_hands_685.csv\n10000_hands_028.csv 10000_hands_357.csv 10000_hands_686.csv\n10000_hands_029.csv 10000_hands_358.csv 10000_hands_687.csv\n10000_hands_030.csv 10000_hands_359.csv 10000_hands_688.csv\n10000_hands_031.csv 10000_hands_360.csv 10000_hands_689.csv\n10000_hands_032.csv 10000_hands_361.csv 10000_hands_690.csv\n10000_hands_033.csv 10000_hands_362.csv 10000_hands_691.csv\n10000_hands_034.csv 10000_hands_363.csv 10000_hands_692.csv\n10000_hands_035.csv 10000_hands_364.csv 10000_hands_693.csv\n10000_hands_036.csv 10000_hands_365.csv 10000_hands_694.csv\n10000_hands_037.csv 10000_hands_366.csv 10000_hands_695.csv\n10000_hands_038.csv 10000_hands_367.csv 10000_hands_696.csv\n10000_hands_039.csv 10000_hands_368.csv 10000_hands_697.csv\n10000_hands_040.csv 10000_hands_369.csv 10000_hands_698.csv\n10000_hands_041.csv 10000_hands_370.csv 10000_hands_699.csv\n10000_hands_042.csv 10000_hands_371.csv 10000_hands_700.csv\n10000_hands_043.csv 10000_hands_372.csv 10000_hands_701.csv\n10000_hands_044.csv 10000_hands_373.csv 10000_hands_702.csv\n10000_hands_045.csv 10000_hands_374.csv 10000_hands_703.csv\n10000_hands_046.csv 10000_hands_375.csv 10000_hands_704.csv\n10000_hands_047.csv 10000_hands_376.csv 10000_hands_705.csv\n10000_hands_048.csv 10000_hands_377.csv 10000_hands_706.csv\n10000_hands_049.csv 10000_hands_378.csv 10000_hands_707.csv\n10000_hands_050.csv 10000_hands_379.csv 10000_hands_708.csv\n10000_hands_051.csv 10000_hands_380.csv 10000_hands_709.csv\n10000_hands_052.csv 10000_hands_381.csv 10000_hands_710.csv\n10000_hands_053.csv 10000_hands_382.csv 10000_hands_711.csv\n10000_hands_054.csv 10000_hands_383.csv 10000_hands_712.csv\n10000_hands_055.csv 10000_hands_384.csv 10000_hands_713.csv\n10000_hands_056.csv 10000_hands_385.csv 10000_hands_714.csv\n10000_hands_057.csv 10000_hands_386.csv 10000_hands_715.csv\n10000_hands_058.csv 10000_hands_387.csv 10000_hands_716.csv\n10000_hands_059.csv 10000_hands_388.csv 10000_hands_717.csv\n10000_hands_060.csv 10000_hands_389.csv 10000_hands_718.csv\n10000_hands_061.csv 10000_hands_390.csv 10000_hands_719.csv\n10000_hands_062.csv 10000_hands_391.csv 10000_hands_720.csv\n10000_hands_063.csv 10000_hands_392.csv 10000_hands_721.csv\n10000_hands_064.csv 10000_hands_393.csv 10000_hands_722.csv\n10000_hands_065.csv 10000_hands_394.csv 10000_hands_723.csv\n10000_hands_066.csv 10000_hands_395.csv 10000_hands_724.csv\n10000_hands_067.csv 10000_hands_396.csv 10000_hands_725.csv\n10000_hands_068.csv 10000_hands_397.csv 10000_hands_726.csv\n10000_hands_069.csv 10000_hands_398.csv 10000_hands_727.csv\n10000_hands_070.csv 10000_hands_399.csv 10000_hands_728.csv\n10000_hands_071.csv 10000_hands_400.csv 10000_hands_729.csv\n10000_hands_072.csv 10000_hands_401.csv 10000_hands_730.csv\n10000_hands_073.csv 10000_hands_402.csv 10000_hands_731.csv\n10000_hands_074.csv 10000_hands_403.csv 10000_hands_732.csv\n10000_hands_075.csv 10000_hands_404.csv 10000_hands_733.csv\n10000_hands_076.csv 10000_hands_405.csv 10000_hands_734.csv\n10000_hands_077.csv 10000_hands_406.csv 10000_hands_735.csv\n10000_hands_078.csv 10000_hands_407.csv 10000_hands_736.csv\n10000_hands_079.csv 10000_hands_408.csv 10000_hands_737.csv\n10000_hands_080.csv 10000_hands_409.csv 10000_hands_738.csv\n10000_hands_081.csv 10000_hands_410.csv 10000_hands_739.csv\n10000_hands_082.csv 10000_hands_411.csv 10000_hands_740.csv\n10000_hands_083.csv 10000_hands_412.csv 10000_hands_741.csv\n10000_hands_084.csv 10000_hands_413.csv 10000_hands_742.csv\n10000_hands_085.csv 10000_hands_414.csv 10000_hands_743.csv\n10000_hands_086.csv 10000_hands_415.csv 10000_hands_744.csv\n10000_hands_087.csv 10000_hands_416.csv 10000_hands_745.csv\n10000_hands_088.csv 10000_hands_417.csv 10000_hands_746.csv\n10000_hands_089.csv 10000_hands_418.csv 10000_hands_747.csv\n10000_hands_090.csv 10000_hands_419.csv 10000_hands_748.csv\n10000_hands_091.csv 10000_hands_420.csv 10000_hands_749.csv\n10000_hands_092.csv 10000_hands_421.csv 10000_hands_750.csv\n10000_hands_093.csv 10000_hands_422.csv 10000_hands_751.csv\n10000_hands_094.csv 10000_hands_423.csv 10000_hands_752.csv\n10000_hands_095.csv 10000_hands_424.csv 10000_hands_753.csv\n10000_hands_096.csv 10000_hands_425.csv 10000_hands_754.csv\n10000_hands_097.csv 10000_hands_426.csv 10000_hands_755.csv\n10000_hands_098.csv 10000_hands_427.csv 10000_hands_756.csv\n10000_hands_099.csv 10000_hands_428.csv 10000_hands_757.csv\n10000_hands_100.csv 10000_hands_429.csv 10000_hands_758.csv\n10000_hands_101.csv 10000_hands_430.csv 10000_hands_759.csv\n10000_hands_102.csv 10000_hands_431.csv 10000_hands_760.csv\n10000_hands_103.csv 10000_hands_432.csv 10000_hands_761.csv\n10000_hands_104.csv 10000_hands_433.csv 10000_hands_762.csv\n10000_hands_105.csv 10000_hands_434.csv 10000_hands_763.csv\n10000_hands_106.csv 10000_hands_435.csv 10000_hands_764.csv\n10000_hands_107.csv 10000_hands_436.csv 10000_hands_765.csv\n10000_hands_108.csv 10000_hands_437.csv 10000_hands_766.csv\n10000_hands_109.csv 10000_hands_438.csv 10000_hands_767.csv\n10000_hands_110.csv 10000_hands_439.csv 10000_hands_768.csv\n10000_hands_111.csv 10000_hands_440.csv 10000_hands_769.csv\n10000_hands_112.csv 10000_hands_441.csv 10000_hands_770.csv\n10000_hands_113.csv 10000_hands_442.csv 10000_hands_771.csv\n10000_hands_114.csv 10000_hands_443.csv 10000_hands_772.csv\n10000_hands_115.csv 10000_hands_444.csv 10000_hands_773.csv\n10000_hands_116.csv 10000_hands_445.csv 10000_hands_774.csv\n10000_hands_117.csv 10000_hands_446.csv 10000_hands_775.csv\n10000_hands_118.csv 10000_hands_447.csv 10000_hands_776.csv\n10000_hands_119.csv 10000_hands_448.csv 10000_hands_777.csv\n10000_hands_120.csv 10000_hands_449.csv 10000_hands_778.csv\n10000_hands_121.csv 10000_hands_450.csv 10000_hands_779.csv\n10000_hands_122.csv 10000_hands_451.csv 10000_hands_780.csv\n10000_hands_123.csv 10000_hands_452.csv 10000_hands_781.csv\n10000_hands_124.csv 10000_hands_453.csv 10000_hands_782.csv\n10000_hands_125.csv 10000_hands_454.csv 10000_hands_783.csv\n10000_hands_126.csv 10000_hands_455.csv 10000_hands_784.csv\n10000_hands_127.csv 10000_hands_456.csv 10000_hands_785.csv\n10000_hands_128.csv 10000_hands_457.csv 10000_hands_786.csv\n10000_hands_129.csv 10000_hands_458.csv 10000_hands_787.csv\n10000_hands_130.csv 10000_hands_459.csv 10000_hands_788.csv\n10000_hands_131.csv 10000_hands_460.csv 10000_hands_789.csv\n10000_hands_132.csv 10000_hands_461.csv 10000_hands_790.csv\n10000_hands_133.csv 10000_hands_462.csv 10000_hands_791.csv\n10000_hands_134.csv 10000_hands_463.csv 10000_hands_792.csv\n10000_hands_135.csv 10000_hands_464.csv 10000_hands_793.csv\n10000_hands_136.csv 10000_hands_465.csv 10000_hands_794.csv\n10000_hands_137.csv 10000_hands_466.csv 10000_hands_795.csv\n10000_hands_138.csv 10000_hands_467.csv 10000_hands_796.csv\n10000_hands_139.csv 10000_hands_468.csv 10000_hands_797.csv\n10000_hands_140.csv 10000_hands_469.csv 10000_hands_798.csv\n10000_hands_141.csv 10000_hands_470.csv 10000_hands_799.csv\n10000_hands_142.csv 10000_hands_471.csv 10000_hands_800.csv\n10000_hands_143.csv 10000_hands_472.csv 10000_hands_801.csv\n10000_hands_144.csv 10000_hands_473.csv 10000_hands_802.csv\n10000_hands_145.csv 10000_hands_474.csv 10000_hands_803.csv\n10000_hands_146.csv 10000_hands_475.csv 10000_hands_804.csv\n10000_hands_147.csv 10000_hands_476.csv 10000_hands_805.csv\n10000_hands_148.csv 10000_hands_477.csv 10000_hands_806.csv\n10000_hands_149.csv 10000_hands_478.csv 10000_hands_807.csv\n10000_hands_150.csv 10000_hands_479.csv 10000_hands_808.csv\n10000_hands_151.csv 10000_hands_480.csv 10000_hands_809.csv\n10000_hands_152.csv 10000_hands_481.csv 10000_hands_810.csv\n10000_hands_153.csv 10000_hands_482.csv 10000_hands_811.csv\n10000_hands_154.csv 10000_hands_483.csv 10000_hands_812.csv\n10000_hands_155.csv 10000_hands_484.csv 10000_hands_813.csv\n10000_hands_156.csv 10000_hands_485.csv 10000_hands_814.csv\n10000_hands_157.csv 10000_hands_486.csv 10000_hands_815.csv\n10000_hands_158.csv 10000_hands_487.csv 10000_hands_816.csv\n10000_hands_159.csv 10000_hands_488.csv 10000_hands_817.csv\n10000_hands_160.csv 10000_hands_489.csv 10000_hands_818.csv\n10000_hands_161.csv 10000_hands_490.csv 10000_hands_819.csv\n10000_hands_162.csv 10000_hands_491.csv 10000_hands_820.csv\n10000_hands_163.csv 10000_hands_492.csv 10000_hands_821.csv\n10000_hands_164.csv 10000_hands_493.csv 10000_hands_822.csv\n10000_hands_165.csv 10000_hands_494.csv 10000_hands_823.csv\n10000_hands_166.csv 10000_hands_495.csv 10000_hands_824.csv\n10000_hands_167.csv 10000_hands_496.csv 10000_hands_825.csv\n10000_hands_168.csv 10000_hands_497.csv 10000_hands_826.csv\n10000_hands_169.csv 10000_hands_498.csv 10000_hands_827.csv\n10000_hands_170.csv 10000_hands_499.csv 10000_hands_828.csv\n10000_hands_171.csv 10000_hands_500.csv 10000_hands_829.csv\n10000_hands_172.csv 10000_hands_501.csv 10000_hands_830.csv\n10000_hands_173.csv 10000_hands_502.csv 10000_hands_831.csv\n10000_hands_174.csv 10000_hands_503.csv 10000_hands_832.csv\n10000_hands_175.csv 10000_hands_504.csv 10000_hands_833.csv\n10000_hands_176.csv 10000_hands_505.csv 10000_hands_834.csv\n10000_hands_177.csv 10000_hands_506.csv 10000_hands_835.csv\n10000_hands_178.csv 10000_hands_507.csv 10000_hands_836.csv\n10000_hands_179.csv 10000_hands_508.csv 10000_hands_837.csv\n10000_hands_180.csv 10000_hands_509.csv 10000_hands_838.csv\n10000_hands_181.csv 10000_hands_510.csv 10000_hands_839.csv\n10000_hands_182.csv 10000_hands_511.csv 10000_hands_840.csv\n10000_hands_183.csv 10000_hands_512.csv 10000_hands_841.csv\n10000_hands_184.csv 10000_hands_513.csv 10000_hands_842.csv\n10000_hands_185.csv 10000_hands_514.csv 10000_hands_843.csv\n10000_hands_186.csv 10000_hands_515.csv 10000_hands_844.csv\n10000_hands_187.csv 10000_hands_516.csv 10000_hands_845.csv\n10000_hands_188.csv 10000_hands_517.csv 10000_hands_846.csv\n10000_hands_189.csv 10000_hands_518.csv 10000_hands_847.csv\n10000_hands_190.csv 10000_hands_519.csv 10000_hands_848.csv\n10000_hands_191.csv 10000_hands_520.csv 10000_hands_849.csv\n10000_hands_192.csv 10000_hands_521.csv 10000_hands_850.csv\n10000_hands_193.csv 10000_hands_522.csv 10000_hands_851.csv\n10000_hands_194.csv 10000_hands_523.csv 10000_hands_852.csv\n10000_hands_195.csv 10000_hands_524.csv 10000_hands_853.csv\n10000_hands_196.csv 10000_hands_525.csv 10000_hands_854.csv\n10000_hands_197.csv 10000_hands_526.csv 10000_hands_855.csv\n10000_hands_198.csv 10000_hands_527.csv 10000_hands_856.csv\n10000_hands_199.csv 10000_hands_528.csv 10000_hands_857.csv\n10000_hands_200.csv 10000_hands_529.csv 10000_hands_858.csv\n10000_hands_201.csv 10000_hands_530.csv 10000_hands_859.csv\n10000_hands_202.csv 10000_hands_531.csv 10000_hands_860.csv\n10000_hands_203.csv 10000_hands_532.csv 10000_hands_861.csv\n10000_hands_204.csv 10000_hands_533.csv 10000_hands_862.csv\n10000_hands_205.csv 10000_hands_534.csv 10000_hands_863.csv\n10000_hands_206.csv 10000_hands_535.csv 10000_hands_864.csv\n10000_hands_207.csv 10000_hands_536.csv 10000_hands_865.csv\n10000_hands_208.csv 10000_hands_537.csv 10000_hands_866.csv\n10000_hands_209.csv 10000_hands_538.csv 10000_hands_867.csv\n10000_hands_210.csv 10000_hands_539.csv 10000_hands_868.csv\n10000_hands_211.csv 10000_hands_540.csv 10000_hands_869.csv\n10000_hands_212.csv 10000_hands_541.csv 10000_hands_870.csv\n10000_hands_213.csv 10000_hands_542.csv 10000_hands_871.csv\n10000_hands_214.csv 10000_hands_543.csv 10000_hands_872.csv\n10000_hands_215.csv 10000_hands_544.csv 10000_hands_873.csv\n10000_hands_216.csv 10000_hands_545.csv 10000_hands_874.csv\n10000_hands_217.csv 10000_hands_546.csv 10000_hands_875.csv\n10000_hands_218.csv 10000_hands_547.csv 10000_hands_876.csv\n10000_hands_219.csv 10000_hands_548.csv 10000_hands_877.csv\n10000_hands_220.csv 10000_hands_549.csv 10000_hands_878.csv\n10000_hands_221.csv 10000_hands_550.csv 10000_hands_879.csv\n10000_hands_222.csv 10000_hands_551.csv 10000_hands_880.csv\n10000_hands_223.csv 10000_hands_552.csv 10000_hands_881.csv\n10000_hands_224.csv 10000_hands_553.csv 10000_hands_882.csv\n10000_hands_225.csv 10000_hands_554.csv 10000_hands_883.csv\n10000_hands_226.csv 10000_hands_555.csv 10000_hands_884.csv\n10000_hands_227.csv 10000_hands_556.csv 10000_hands_885.csv\n10000_hands_228.csv 10000_hands_557.csv 10000_hands_886.csv\n10000_hands_229.csv 10000_hands_558.csv 10000_hands_887.csv\n10000_hands_230.csv 10000_hands_559.csv 10000_hands_888.csv\n10000_hands_231.csv 10000_hands_560.csv 10000_hands_889.csv\n10000_hands_232.csv 10000_hands_561.csv 10000_hands_890.csv\n10000_hands_233.csv 10000_hands_562.csv 10000_hands_891.csv\n10000_hands_234.csv 10000_hands_563.csv 10000_hands_892.csv\n10000_hands_235.csv 10000_hands_564.csv 10000_hands_893.csv\n10000_hands_236.csv 10000_hands_565.csv 10000_hands_894.csv\n10000_hands_237.csv 10000_hands_566.csv 10000_hands_895.csv\n10000_hands_238.csv 10000_hands_567.csv 10000_hands_896.csv\n10000_hands_239.csv 10000_hands_568.csv 10000_hands_897.csv\n10000_hands_240.csv 10000_hands_569.csv 10000_hands_898.csv\n10000_hands_241.csv 10000_hands_570.csv 10000_hands_899.csv\n10000_hands_242.csv 10000_hands_571.csv 10000_hands_900.csv\n10000_hands_243.csv 10000_hands_572.csv 10000_hands_901.csv\n10000_hands_244.csv 10000_hands_573.csv 10000_hands_902.csv\n10000_hands_245.csv 10000_hands_574.csv 10000_hands_903.csv\n10000_hands_246.csv 10000_hands_575.csv 10000_hands_904.csv\n10000_hands_247.csv 10000_hands_576.csv 10000_hands_905.csv\n10000_hands_248.csv 10000_hands_577.csv 10000_hands_906.csv\n10000_hands_249.csv 10000_hands_578.csv 10000_hands_907.csv\n10000_hands_250.csv 10000_hands_579.csv 10000_hands_908.csv\n10000_hands_251.csv 10000_hands_580.csv 10000_hands_909.csv\n10000_hands_252.csv 10000_hands_581.csv 10000_hands_910.csv\n10000_hands_253.csv 10000_hands_582.csv 10000_hands_911.csv\n10000_hands_254.csv 10000_hands_583.csv 10000_hands_912.csv\n10000_hands_255.csv 10000_hands_584.csv 10000_hands_913.csv\n10000_hands_256.csv 10000_hands_585.csv 10000_hands_914.csv\n10000_hands_257.csv 10000_hands_586.csv 10000_hands_915.csv\n10000_hands_258.csv 10000_hands_587.csv 10000_hands_916.csv\n10000_hands_259.csv 10000_hands_588.csv 10000_hands_917.csv\n10000_hands_260.csv 10000_hands_589.csv 10000_hands_918.csv\n10000_hands_261.csv 10000_hands_590.csv 10000_hands_919.csv\n10000_hands_262.csv 10000_hands_591.csv 10000_hands_920.csv\n10000_hands_263.csv 10000_hands_592.csv 10000_hands_921.csv\n10000_hands_264.csv 10000_hands_593.csv 10000_hands_922.csv\n10000_hands_265.csv 10000_hands_594.csv 10000_hands_923.csv\n10000_hands_266.csv 10000_hands_595.csv 10000_hands_924.csv\n10000_hands_267.csv 10000_hands_596.csv 10000_hands_925.csv\n10000_hands_268.csv 10000_hands_597.csv 10000_hands_926.csv\n10000_hands_269.csv 10000_hands_598.csv 10000_hands_927.csv\n10000_hands_270.csv 10000_hands_599.csv 10000_hands_928.csv\n10000_hands_271.csv 10000_hands_600.csv 10000_hands_929.csv\n10000_hands_272.csv 10000_hands_601.csv 10000_hands_930.csv\n10000_hands_273.csv 10000_hands_602.csv 10000_hands_931.csv\n10000_hands_274.csv 10000_hands_603.csv 10000_hands_932.csv\n10000_hands_275.csv 10000_hands_604.csv 10000_hands_933.csv\n10000_hands_276.csv 10000_hands_605.csv 10000_hands_934.csv\n10000_hands_277.csv 10000_hands_606.csv 10000_hands_935.csv\n10000_hands_278.csv 10000_hands_607.csv 10000_hands_936.csv\n10000_hands_279.csv 10000_hands_608.csv 10000_hands_937.csv\n10000_hands_280.csv 10000_hands_609.csv 10000_hands_938.csv\n10000_hands_281.csv 10000_hands_610.csv 10000_hands_939.csv\n10000_hands_282.csv 10000_hands_611.csv 10000_hands_940.csv\n10000_hands_283.csv 10000_hands_612.csv 10000_hands_941.csv\n10000_hands_284.csv 10000_hands_613.csv 10000_hands_942.csv\n10000_hands_285.csv 10000_hands_614.csv 10000_hands_943.csv\n10000_hands_286.csv 10000_hands_615.csv 10000_hands_944.csv\n10000_hands_287.csv 10000_hands_616.csv 10000_hands_945.csv\n10000_hands_288.csv 10000_hands_617.csv 10000_hands_946.csv\n10000_hands_289.csv 10000_hands_618.csv 10000_hands_947.csv\n10000_hands_290.csv 10000_hands_619.csv 10000_hands_948.csv\n10000_hands_291.csv 10000_hands_620.csv 10000_hands_949.csv\n10000_hands_292.csv 10000_hands_621.csv 10000_hands_950.csv\n10000_hands_293.csv 10000_hands_622.csv 10000_hands_951.csv\n10000_hands_294.csv 10000_hands_623.csv 10000_hands_952.csv\n10000_hands_295.csv 10000_hands_624.csv 10000_hands_953.csv\n10000_hands_296.csv 10000_hands_625.csv 10000_hands_954.csv\n10000_hands_297.csv 10000_hands_626.csv 10000_hands_955.csv\n10000_hands_298.csv 10000_hands_627.csv 10000_hands_956.csv\n10000_hands_299.csv 10000_hands_628.csv 10000_hands_957.csv\n10000_hands_300.csv 10000_hands_629.csv 10000_hands_958.csv\n10000_hands_301.csv 10000_hands_630.csv 10000_hands_959.csv\n10000_hands_302.csv 10000_hands_631.csv 10000_hands_960.csv\n10000_hands_303.csv 10000_hands_632.csv 10000_hands_961.csv\n10000_hands_304.csv 10000_hands_633.csv 10000_hands_962.csv\n10000_hands_305.csv 10000_hands_634.csv 10000_hands_963.csv\n10000_hands_306.csv 10000_hands_635.csv 10000_hands_964.csv\n10000_hands_307.csv 10000_hands_636.csv 10000_hands_965.csv\n10000_hands_308.csv 10000_hands_637.csv 10000_hands_966.csv\n10000_hands_309.csv 10000_hands_638.csv 10000_hands_967.csv\n10000_hands_310.csv 10000_hands_639.csv 10000_hands_968.csv\n10000_hands_311.csv 10000_hands_640.csv 10000_hands_969.csv\n10000_hands_312.csv 10000_hands_641.csv 10000_hands_970.csv\n10000_hands_313.csv 10000_hands_642.csv 10000_hands_971.csv\n10000_hands_314.csv 10000_hands_643.csv 10000_hands_972.csv\n10000_hands_315.csv 10000_hands_644.csv 10000_hands_973.csv\n10000_hands_316.csv 10000_hands_645.csv 10000_hands_974.csv\n10000_hands_317.csv 10000_hands_646.csv 10000_hands_975.csv\n10000_hands_318.csv 10000_hands_647.csv 10000_hands_976.csv\n10000_hands_319.csv 10000_hands_648.csv 10000_hands_977.csv\n10000_hands_320.csv 10000_hands_649.csv 10000_hands_978.csv\n10000_hands_321.csv 10000_hands_650.csv 10000_hands_979.csv\n10000_hands_322.csv 10000_hands_651.csv 10000_hands_980.csv\n10000_hands_323.csv 10000_hands_652.csv 10000_hands_981.csv\n10000_hands_324.csv 10000_hands_653.csv 10000_hands_982.csv\n10000_hands_325.csv 10000_hands_654.csv 10000_hands_983.csv\n10000_hands_326.csv 10000_hands_655.csv 10000_hands_984.csv\n10000_hands_327.csv 10000_hands_656.csv 10000_hands_985.csv\n10000_hands_328.csv 10000_hands_657.csv 10000_hands_986.csv\n" ] ], [ [ "# Create optimal-vs-intermediate dataset", "_____no_output_____" ] ], [ [ "if os.path.exists('/staging/'):\n outfolder = '/staging/fast/taylora/euchre/opt_vs_inter'\nelse:\n outfolder = 'opt_vs_inter'\nif not os.path.exists(outfolder): os.mkdir(outfolder)\np0, p2 = opt.make_optimal_player(0), opt.make_optimal_player(2)\np1, p3 = subopt.make_INTERMEDIATE_player(1), subopt.make_INTERMEDIATE_player(3)", "_____no_output_____" ], [ "n_epochs, n_hands = int(1e3), int(1e5)\nAVAILABLE_CPUS = 16-1\nparticular_performance_function = partial(performance, n_epochs=int(n_epochs/AVAILABLE_CPUS), n_hands=n_hands, p0=p0, p1=p1, p2=p2, p3=p3)", "_____no_output_____" ], [ "%%time\nz=\"\"\"pool = multiprocessing.Pool(AVAILABLE_CPUS)\nfor worker in range(AVAILABLE_CPUS):\n pool.apply_async(particular_performance_function, args=(worker,))\npool.close()\npool.join()\nprint('Done!')\"\"\"", "CPU times: user 0 ns, sys: 2 µs, total: 2 µs\nWall time: 4.05 µs\n" ], [ "!ls /staging/fast/taylora/euchre", "RLdataset old_thresholds opt_vs_basic opt_vs_inter tfrecords\n" ], [ "!ls /staging/fast/taylora/euchre/opt_vs_inter", "100000_hands_000.csv 100000_hands_330.csv 100000_hands_660.csv\n100000_hands_001.csv 100000_hands_331.csv 100000_hands_661.csv\n100000_hands_002.csv 100000_hands_332.csv 100000_hands_662.csv\n100000_hands_003.csv 100000_hands_333.csv 100000_hands_663.csv\n100000_hands_004.csv 100000_hands_334.csv 100000_hands_664.csv\n100000_hands_005.csv 100000_hands_335.csv 100000_hands_665.csv\n100000_hands_006.csv 100000_hands_336.csv 100000_hands_666.csv\n100000_hands_007.csv 100000_hands_337.csv 100000_hands_667.csv\n100000_hands_008.csv 100000_hands_338.csv 100000_hands_668.csv\n100000_hands_009.csv 100000_hands_339.csv 100000_hands_669.csv\n100000_hands_010.csv 100000_hands_340.csv 100000_hands_670.csv\n100000_hands_011.csv 100000_hands_341.csv 100000_hands_671.csv\n100000_hands_012.csv 100000_hands_342.csv 100000_hands_672.csv\n100000_hands_013.csv 100000_hands_343.csv 100000_hands_673.csv\n100000_hands_014.csv 100000_hands_344.csv 100000_hands_674.csv\n100000_hands_015.csv 100000_hands_345.csv 100000_hands_675.csv\n100000_hands_016.csv 100000_hands_346.csv 100000_hands_676.csv\n100000_hands_017.csv 100000_hands_347.csv 100000_hands_677.csv\n100000_hands_018.csv 100000_hands_348.csv 100000_hands_678.csv\n100000_hands_019.csv 100000_hands_349.csv 100000_hands_679.csv\n100000_hands_020.csv 100000_hands_350.csv 100000_hands_680.csv\n100000_hands_021.csv 100000_hands_351.csv 100000_hands_681.csv\n100000_hands_022.csv 100000_hands_352.csv 100000_hands_682.csv\n100000_hands_023.csv 100000_hands_353.csv 100000_hands_683.csv\n100000_hands_024.csv 100000_hands_354.csv 100000_hands_684.csv\n100000_hands_025.csv 100000_hands_355.csv 100000_hands_685.csv\n100000_hands_026.csv 100000_hands_356.csv 100000_hands_686.csv\n100000_hands_027.csv 100000_hands_357.csv 100000_hands_687.csv\n100000_hands_028.csv 100000_hands_358.csv 100000_hands_688.csv\n100000_hands_029.csv 100000_hands_359.csv 100000_hands_689.csv\n100000_hands_030.csv 100000_hands_360.csv 100000_hands_690.csv\n100000_hands_031.csv 100000_hands_361.csv 100000_hands_691.csv\n100000_hands_032.csv 100000_hands_362.csv 100000_hands_692.csv\n100000_hands_033.csv 100000_hands_363.csv 100000_hands_693.csv\n100000_hands_034.csv 100000_hands_364.csv 100000_hands_694.csv\n100000_hands_035.csv 100000_hands_365.csv 100000_hands_695.csv\n100000_hands_036.csv 100000_hands_366.csv 100000_hands_696.csv\n100000_hands_037.csv 100000_hands_367.csv 100000_hands_697.csv\n100000_hands_038.csv 100000_hands_368.csv 100000_hands_698.csv\n100000_hands_039.csv 100000_hands_369.csv 100000_hands_699.csv\n100000_hands_040.csv 100000_hands_370.csv 100000_hands_700.csv\n100000_hands_041.csv 100000_hands_371.csv 100000_hands_701.csv\n100000_hands_042.csv 100000_hands_372.csv 100000_hands_702.csv\n100000_hands_043.csv 100000_hands_373.csv 100000_hands_703.csv\n100000_hands_044.csv 100000_hands_374.csv 100000_hands_704.csv\n100000_hands_045.csv 100000_hands_375.csv 100000_hands_705.csv\n100000_hands_046.csv 100000_hands_376.csv 100000_hands_706.csv\n100000_hands_047.csv 100000_hands_377.csv 100000_hands_707.csv\n100000_hands_048.csv 100000_hands_378.csv 100000_hands_708.csv\n100000_hands_049.csv 100000_hands_379.csv 100000_hands_709.csv\n100000_hands_050.csv 100000_hands_380.csv 100000_hands_710.csv\n100000_hands_051.csv 100000_hands_381.csv 100000_hands_711.csv\n100000_hands_052.csv 100000_hands_382.csv 100000_hands_712.csv\n100000_hands_053.csv 100000_hands_383.csv 100000_hands_713.csv\n100000_hands_054.csv 100000_hands_384.csv 100000_hands_714.csv\n100000_hands_055.csv 100000_hands_385.csv 100000_hands_715.csv\n100000_hands_056.csv 100000_hands_386.csv 100000_hands_716.csv\n100000_hands_057.csv 100000_hands_387.csv 100000_hands_717.csv\n100000_hands_058.csv 100000_hands_388.csv 100000_hands_718.csv\n100000_hands_059.csv 100000_hands_389.csv 100000_hands_719.csv\n100000_hands_060.csv 100000_hands_390.csv 100000_hands_720.csv\n100000_hands_061.csv 100000_hands_391.csv 100000_hands_721.csv\n100000_hands_062.csv 100000_hands_392.csv 100000_hands_722.csv\n100000_hands_063.csv 100000_hands_393.csv 100000_hands_723.csv\n100000_hands_064.csv 100000_hands_394.csv 100000_hands_724.csv\n100000_hands_065.csv 100000_hands_395.csv 100000_hands_725.csv\n100000_hands_066.csv 100000_hands_396.csv 100000_hands_726.csv\n100000_hands_067.csv 100000_hands_397.csv 100000_hands_727.csv\n100000_hands_068.csv 100000_hands_398.csv 100000_hands_728.csv\n100000_hands_069.csv 100000_hands_399.csv 100000_hands_729.csv\n100000_hands_070.csv 100000_hands_400.csv 100000_hands_730.csv\n100000_hands_071.csv 100000_hands_401.csv 100000_hands_731.csv\n100000_hands_072.csv 100000_hands_402.csv 100000_hands_732.csv\n100000_hands_073.csv 100000_hands_403.csv 100000_hands_733.csv\n100000_hands_074.csv 100000_hands_404.csv 100000_hands_734.csv\n100000_hands_075.csv 100000_hands_405.csv 100000_hands_735.csv\n100000_hands_076.csv 100000_hands_406.csv 100000_hands_736.csv\n100000_hands_077.csv 100000_hands_407.csv 100000_hands_737.csv\n100000_hands_078.csv 100000_hands_408.csv 100000_hands_738.csv\n100000_hands_079.csv 100000_hands_409.csv 100000_hands_739.csv\n100000_hands_080.csv 100000_hands_410.csv 100000_hands_740.csv\n100000_hands_081.csv 100000_hands_411.csv 100000_hands_741.csv\n100000_hands_082.csv 100000_hands_412.csv 100000_hands_742.csv\n100000_hands_083.csv 100000_hands_413.csv 100000_hands_743.csv\n100000_hands_084.csv 100000_hands_414.csv 100000_hands_744.csv\n100000_hands_085.csv 100000_hands_415.csv 100000_hands_745.csv\n100000_hands_086.csv 100000_hands_416.csv 100000_hands_746.csv\n100000_hands_087.csv 100000_hands_417.csv 100000_hands_747.csv\n100000_hands_088.csv 100000_hands_418.csv 100000_hands_748.csv\n100000_hands_089.csv 100000_hands_419.csv 100000_hands_749.csv\n100000_hands_090.csv 100000_hands_420.csv 100000_hands_750.csv\n100000_hands_091.csv 100000_hands_421.csv 100000_hands_751.csv\n100000_hands_092.csv 100000_hands_422.csv 100000_hands_752.csv\n100000_hands_093.csv 100000_hands_423.csv 100000_hands_753.csv\n100000_hands_094.csv 100000_hands_424.csv 100000_hands_754.csv\n100000_hands_095.csv 100000_hands_425.csv 100000_hands_755.csv\n100000_hands_096.csv 100000_hands_426.csv 100000_hands_756.csv\n100000_hands_097.csv 100000_hands_427.csv 100000_hands_757.csv\n100000_hands_098.csv 100000_hands_428.csv 100000_hands_758.csv\n100000_hands_099.csv 100000_hands_429.csv 100000_hands_759.csv\n100000_hands_100.csv 100000_hands_430.csv 100000_hands_760.csv\n100000_hands_101.csv 100000_hands_431.csv 100000_hands_761.csv\n100000_hands_102.csv 100000_hands_432.csv 100000_hands_762.csv\n100000_hands_103.csv 100000_hands_433.csv 100000_hands_763.csv\n100000_hands_104.csv 100000_hands_434.csv 100000_hands_764.csv\n100000_hands_105.csv 100000_hands_435.csv 100000_hands_765.csv\n100000_hands_106.csv 100000_hands_436.csv 100000_hands_766.csv\n100000_hands_107.csv 100000_hands_437.csv 100000_hands_767.csv\n100000_hands_108.csv 100000_hands_438.csv 100000_hands_768.csv\n100000_hands_109.csv 100000_hands_439.csv 100000_hands_769.csv\n100000_hands_110.csv 100000_hands_440.csv 100000_hands_770.csv\n100000_hands_111.csv 100000_hands_441.csv 100000_hands_771.csv\n100000_hands_112.csv 100000_hands_442.csv 100000_hands_772.csv\n100000_hands_113.csv 100000_hands_443.csv 100000_hands_773.csv\n100000_hands_114.csv 100000_hands_444.csv 100000_hands_774.csv\n100000_hands_115.csv 100000_hands_445.csv 100000_hands_775.csv\n100000_hands_116.csv 100000_hands_446.csv 100000_hands_776.csv\n100000_hands_117.csv 100000_hands_447.csv 100000_hands_777.csv\n100000_hands_118.csv 100000_hands_448.csv 100000_hands_778.csv\n100000_hands_119.csv 100000_hands_449.csv 100000_hands_779.csv\n100000_hands_120.csv 100000_hands_450.csv 100000_hands_780.csv\n100000_hands_121.csv 100000_hands_451.csv 100000_hands_781.csv\n100000_hands_122.csv 100000_hands_452.csv 100000_hands_782.csv\n100000_hands_123.csv 100000_hands_453.csv 100000_hands_783.csv\n100000_hands_124.csv 100000_hands_454.csv 100000_hands_784.csv\n100000_hands_125.csv 100000_hands_455.csv 100000_hands_785.csv\n100000_hands_126.csv 100000_hands_456.csv 100000_hands_786.csv\n100000_hands_127.csv 100000_hands_457.csv 100000_hands_787.csv\n100000_hands_128.csv 100000_hands_458.csv 100000_hands_788.csv\n100000_hands_129.csv 100000_hands_459.csv 100000_hands_789.csv\n100000_hands_130.csv 100000_hands_460.csv 100000_hands_790.csv\n100000_hands_131.csv 100000_hands_461.csv 100000_hands_791.csv\n100000_hands_132.csv 100000_hands_462.csv 100000_hands_792.csv\n100000_hands_133.csv 100000_hands_463.csv 100000_hands_793.csv\n100000_hands_134.csv 100000_hands_464.csv 100000_hands_794.csv\n100000_hands_135.csv 100000_hands_465.csv 100000_hands_795.csv\n100000_hands_136.csv 100000_hands_466.csv 100000_hands_796.csv\n100000_hands_137.csv 100000_hands_467.csv 100000_hands_797.csv\n100000_hands_138.csv 100000_hands_468.csv 100000_hands_798.csv\n100000_hands_139.csv 100000_hands_469.csv 100000_hands_799.csv\n100000_hands_140.csv 100000_hands_470.csv 100000_hands_800.csv\n100000_hands_141.csv 100000_hands_471.csv 100000_hands_801.csv\n100000_hands_142.csv 100000_hands_472.csv 100000_hands_802.csv\n100000_hands_143.csv 100000_hands_473.csv 100000_hands_803.csv\n100000_hands_144.csv 100000_hands_474.csv 100000_hands_804.csv\n100000_hands_145.csv 100000_hands_475.csv 100000_hands_805.csv\n100000_hands_146.csv 100000_hands_476.csv 100000_hands_806.csv\n100000_hands_147.csv 100000_hands_477.csv 100000_hands_807.csv\n100000_hands_148.csv 100000_hands_478.csv 100000_hands_808.csv\n100000_hands_149.csv 100000_hands_479.csv 100000_hands_809.csv\n100000_hands_150.csv 100000_hands_480.csv 100000_hands_810.csv\n100000_hands_151.csv 100000_hands_481.csv 100000_hands_811.csv\n100000_hands_152.csv 100000_hands_482.csv 100000_hands_812.csv\n100000_hands_153.csv 100000_hands_483.csv 100000_hands_813.csv\n100000_hands_154.csv 100000_hands_484.csv 100000_hands_814.csv\n100000_hands_155.csv 100000_hands_485.csv 100000_hands_815.csv\n100000_hands_156.csv 100000_hands_486.csv 100000_hands_816.csv\n100000_hands_157.csv 100000_hands_487.csv 100000_hands_817.csv\n100000_hands_158.csv 100000_hands_488.csv 100000_hands_818.csv\n100000_hands_159.csv 100000_hands_489.csv 100000_hands_819.csv\n100000_hands_160.csv 100000_hands_490.csv 100000_hands_820.csv\n100000_hands_161.csv 100000_hands_491.csv 100000_hands_821.csv\n100000_hands_162.csv 100000_hands_492.csv 100000_hands_822.csv\n100000_hands_163.csv 100000_hands_493.csv 100000_hands_823.csv\n100000_hands_164.csv 100000_hands_494.csv 100000_hands_824.csv\n100000_hands_165.csv 100000_hands_495.csv 100000_hands_825.csv\n100000_hands_166.csv 100000_hands_496.csv 100000_hands_826.csv\n100000_hands_167.csv 100000_hands_497.csv 100000_hands_827.csv\n100000_hands_168.csv 100000_hands_498.csv 100000_hands_828.csv\n100000_hands_169.csv 100000_hands_499.csv 100000_hands_829.csv\n100000_hands_170.csv 100000_hands_500.csv 100000_hands_830.csv\n100000_hands_171.csv 100000_hands_501.csv 100000_hands_831.csv\n100000_hands_172.csv 100000_hands_502.csv 100000_hands_832.csv\n100000_hands_173.csv 100000_hands_503.csv 100000_hands_833.csv\n100000_hands_174.csv 100000_hands_504.csv 100000_hands_834.csv\n100000_hands_175.csv 100000_hands_505.csv 100000_hands_835.csv\n100000_hands_176.csv 100000_hands_506.csv 100000_hands_836.csv\n100000_hands_177.csv 100000_hands_507.csv 100000_hands_837.csv\n100000_hands_178.csv 100000_hands_508.csv 100000_hands_838.csv\n100000_hands_179.csv 100000_hands_509.csv 100000_hands_839.csv\n100000_hands_180.csv 100000_hands_510.csv 100000_hands_840.csv\n100000_hands_181.csv 100000_hands_511.csv 100000_hands_841.csv\n100000_hands_182.csv 100000_hands_512.csv 100000_hands_842.csv\n100000_hands_183.csv 100000_hands_513.csv 100000_hands_843.csv\n100000_hands_184.csv 100000_hands_514.csv 100000_hands_844.csv\n100000_hands_185.csv 100000_hands_515.csv 100000_hands_845.csv\n100000_hands_186.csv 100000_hands_516.csv 100000_hands_846.csv\n100000_hands_187.csv 100000_hands_517.csv 100000_hands_847.csv\n100000_hands_188.csv 100000_hands_518.csv 100000_hands_848.csv\n100000_hands_189.csv 100000_hands_519.csv 100000_hands_849.csv\n100000_hands_190.csv 100000_hands_520.csv 100000_hands_850.csv\n100000_hands_191.csv 100000_hands_521.csv 100000_hands_851.csv\n100000_hands_192.csv 100000_hands_522.csv 100000_hands_852.csv\n100000_hands_193.csv 100000_hands_523.csv 100000_hands_853.csv\n100000_hands_194.csv 100000_hands_524.csv 100000_hands_854.csv\n100000_hands_195.csv 100000_hands_525.csv 100000_hands_855.csv\n100000_hands_196.csv 100000_hands_526.csv 100000_hands_856.csv\n100000_hands_197.csv 100000_hands_527.csv 100000_hands_857.csv\n100000_hands_198.csv 100000_hands_528.csv 100000_hands_858.csv\n100000_hands_199.csv 100000_hands_529.csv 100000_hands_859.csv\n100000_hands_200.csv 100000_hands_530.csv 100000_hands_860.csv\n100000_hands_201.csv 100000_hands_531.csv 100000_hands_861.csv\n100000_hands_202.csv 100000_hands_532.csv 100000_hands_862.csv\n100000_hands_203.csv 100000_hands_533.csv 100000_hands_863.csv\n100000_hands_204.csv 100000_hands_534.csv 100000_hands_864.csv\n100000_hands_205.csv 100000_hands_535.csv 100000_hands_865.csv\n100000_hands_206.csv 100000_hands_536.csv 100000_hands_866.csv\n100000_hands_207.csv 100000_hands_537.csv 100000_hands_867.csv\n100000_hands_208.csv 100000_hands_538.csv 100000_hands_868.csv\n100000_hands_209.csv 100000_hands_539.csv 100000_hands_869.csv\n100000_hands_210.csv 100000_hands_540.csv 100000_hands_870.csv\n100000_hands_211.csv 100000_hands_541.csv 100000_hands_871.csv\n100000_hands_212.csv 100000_hands_542.csv 100000_hands_872.csv\n100000_hands_213.csv 100000_hands_543.csv 100000_hands_873.csv\n100000_hands_214.csv 100000_hands_544.csv 100000_hands_874.csv\n100000_hands_215.csv 100000_hands_545.csv 100000_hands_875.csv\n100000_hands_216.csv 100000_hands_546.csv 100000_hands_876.csv\n100000_hands_217.csv 100000_hands_547.csv 100000_hands_877.csv\n100000_hands_218.csv 100000_hands_548.csv 100000_hands_878.csv\n100000_hands_219.csv 100000_hands_549.csv 100000_hands_879.csv\n100000_hands_220.csv 100000_hands_550.csv 100000_hands_880.csv\n100000_hands_221.csv 100000_hands_551.csv 100000_hands_881.csv\n100000_hands_222.csv 100000_hands_552.csv 100000_hands_882.csv\n100000_hands_223.csv 100000_hands_553.csv 100000_hands_883.csv\n100000_hands_224.csv 100000_hands_554.csv 100000_hands_884.csv\n100000_hands_225.csv 100000_hands_555.csv 100000_hands_885.csv\n100000_hands_226.csv 100000_hands_556.csv 100000_hands_886.csv\n100000_hands_227.csv 100000_hands_557.csv 100000_hands_887.csv\n100000_hands_228.csv 100000_hands_558.csv 100000_hands_888.csv\n100000_hands_229.csv 100000_hands_559.csv 100000_hands_889.csv\n100000_hands_230.csv 100000_hands_560.csv 100000_hands_890.csv\n100000_hands_231.csv 100000_hands_561.csv 100000_hands_891.csv\n100000_hands_232.csv 100000_hands_562.csv 100000_hands_892.csv\n100000_hands_233.csv 100000_hands_563.csv 100000_hands_893.csv\n100000_hands_234.csv 100000_hands_564.csv 100000_hands_894.csv\n100000_hands_235.csv 100000_hands_565.csv 100000_hands_895.csv\n100000_hands_236.csv 100000_hands_566.csv 100000_hands_896.csv\n100000_hands_237.csv 100000_hands_567.csv 100000_hands_897.csv\n100000_hands_238.csv 100000_hands_568.csv 100000_hands_898.csv\n100000_hands_239.csv 100000_hands_569.csv 100000_hands_899.csv\n100000_hands_240.csv 100000_hands_570.csv 100000_hands_900.csv\n100000_hands_241.csv 100000_hands_571.csv 100000_hands_901.csv\n100000_hands_242.csv 100000_hands_572.csv 100000_hands_902.csv\n100000_hands_243.csv 100000_hands_573.csv 100000_hands_903.csv\n100000_hands_244.csv 100000_hands_574.csv 100000_hands_904.csv\n100000_hands_245.csv 100000_hands_575.csv 100000_hands_905.csv\n100000_hands_246.csv 100000_hands_576.csv 100000_hands_906.csv\n100000_hands_247.csv 100000_hands_577.csv 100000_hands_907.csv\n100000_hands_248.csv 100000_hands_578.csv 100000_hands_908.csv\n100000_hands_249.csv 100000_hands_579.csv 100000_hands_909.csv\n100000_hands_250.csv 100000_hands_580.csv 100000_hands_910.csv\n100000_hands_251.csv 100000_hands_581.csv 100000_hands_911.csv\n100000_hands_252.csv 100000_hands_582.csv 100000_hands_912.csv\n100000_hands_253.csv 100000_hands_583.csv 100000_hands_913.csv\n100000_hands_254.csv 100000_hands_584.csv 100000_hands_914.csv\n100000_hands_255.csv 100000_hands_585.csv 100000_hands_915.csv\n100000_hands_256.csv 100000_hands_586.csv 100000_hands_916.csv\n100000_hands_257.csv 100000_hands_587.csv 100000_hands_917.csv\n100000_hands_258.csv 100000_hands_588.csv 100000_hands_918.csv\n100000_hands_259.csv 100000_hands_589.csv 100000_hands_919.csv\n100000_hands_260.csv 100000_hands_590.csv 100000_hands_920.csv\n100000_hands_261.csv 100000_hands_591.csv 100000_hands_921.csv\n100000_hands_262.csv 100000_hands_592.csv 100000_hands_922.csv\n100000_hands_263.csv 100000_hands_593.csv 100000_hands_923.csv\n100000_hands_264.csv 100000_hands_594.csv 100000_hands_924.csv\n100000_hands_265.csv 100000_hands_595.csv 100000_hands_925.csv\n100000_hands_266.csv 100000_hands_596.csv 100000_hands_926.csv\n100000_hands_267.csv 100000_hands_597.csv 100000_hands_927.csv\n100000_hands_268.csv 100000_hands_598.csv 100000_hands_928.csv\n100000_hands_269.csv 100000_hands_599.csv 100000_hands_929.csv\n100000_hands_270.csv 100000_hands_600.csv 100000_hands_930.csv\n100000_hands_271.csv 100000_hands_601.csv 100000_hands_931.csv\n100000_hands_272.csv 100000_hands_602.csv 100000_hands_932.csv\n100000_hands_273.csv 100000_hands_603.csv 100000_hands_933.csv\n100000_hands_274.csv 100000_hands_604.csv 100000_hands_934.csv\n100000_hands_275.csv 100000_hands_605.csv 100000_hands_935.csv\n100000_hands_276.csv 100000_hands_606.csv 100000_hands_936.csv\n100000_hands_277.csv 100000_hands_607.csv 100000_hands_937.csv\n100000_hands_278.csv 100000_hands_608.csv 100000_hands_938.csv\n100000_hands_279.csv 100000_hands_609.csv 100000_hands_939.csv\n100000_hands_280.csv 100000_hands_610.csv 100000_hands_940.csv\n100000_hands_281.csv 100000_hands_611.csv 100000_hands_941.csv\n100000_hands_282.csv 100000_hands_612.csv 100000_hands_942.csv\n100000_hands_283.csv 100000_hands_613.csv 100000_hands_943.csv\n100000_hands_284.csv 100000_hands_614.csv 100000_hands_944.csv\n100000_hands_285.csv 100000_hands_615.csv 100000_hands_945.csv\n100000_hands_286.csv 100000_hands_616.csv 100000_hands_946.csv\n100000_hands_287.csv 100000_hands_617.csv 100000_hands_947.csv\n100000_hands_288.csv 100000_hands_618.csv 100000_hands_948.csv\n100000_hands_289.csv 100000_hands_619.csv 100000_hands_949.csv\n100000_hands_290.csv 100000_hands_620.csv 100000_hands_950.csv\n100000_hands_291.csv 100000_hands_621.csv 100000_hands_951.csv\n100000_hands_292.csv 100000_hands_622.csv 100000_hands_952.csv\n100000_hands_293.csv 100000_hands_623.csv 100000_hands_953.csv\n100000_hands_294.csv 100000_hands_624.csv 100000_hands_954.csv\n100000_hands_295.csv 100000_hands_625.csv 100000_hands_955.csv\n100000_hands_296.csv 100000_hands_626.csv 100000_hands_956.csv\n100000_hands_297.csv 100000_hands_627.csv 100000_hands_957.csv\n100000_hands_298.csv 100000_hands_628.csv 100000_hands_958.csv\n100000_hands_299.csv 100000_hands_629.csv 100000_hands_959.csv\n100000_hands_300.csv 100000_hands_630.csv 100000_hands_960.csv\n100000_hands_301.csv 100000_hands_631.csv 100000_hands_961.csv\n100000_hands_302.csv 100000_hands_632.csv 100000_hands_962.csv\n100000_hands_303.csv 100000_hands_633.csv 100000_hands_963.csv\n100000_hands_304.csv 100000_hands_634.csv 100000_hands_964.csv\n100000_hands_305.csv 100000_hands_635.csv 100000_hands_965.csv\n100000_hands_306.csv 100000_hands_636.csv 100000_hands_966.csv\n100000_hands_307.csv 100000_hands_637.csv 100000_hands_967.csv\n100000_hands_308.csv 100000_hands_638.csv 100000_hands_968.csv\n100000_hands_309.csv 100000_hands_639.csv 100000_hands_969.csv\n100000_hands_310.csv 100000_hands_640.csv 100000_hands_970.csv\n100000_hands_311.csv 100000_hands_641.csv 100000_hands_971.csv\n100000_hands_312.csv 100000_hands_642.csv 100000_hands_972.csv\n100000_hands_313.csv 100000_hands_643.csv 100000_hands_973.csv\n100000_hands_314.csv 100000_hands_644.csv 100000_hands_974.csv\n100000_hands_315.csv 100000_hands_645.csv 100000_hands_975.csv\n100000_hands_316.csv 100000_hands_646.csv 100000_hands_976.csv\n100000_hands_317.csv 100000_hands_647.csv 100000_hands_977.csv\n100000_hands_318.csv 100000_hands_648.csv 100000_hands_978.csv\n100000_hands_319.csv 100000_hands_649.csv 100000_hands_979.csv\n100000_hands_320.csv 100000_hands_650.csv 100000_hands_980.csv\n100000_hands_321.csv 100000_hands_651.csv 100000_hands_981.csv\n100000_hands_322.csv 100000_hands_652.csv 100000_hands_982.csv\n100000_hands_323.csv 100000_hands_653.csv 100000_hands_983.csv\n100000_hands_324.csv 100000_hands_654.csv 100000_hands_984.csv\n100000_hands_325.csv 100000_hands_655.csv 100000_hands_985.csv\n100000_hands_326.csv 100000_hands_656.csv 100000_hands_986.csv\n100000_hands_327.csv 100000_hands_657.csv 100000_hands_987.csv\n100000_hands_328.csv 100000_hands_658.csv 100000_hands_988.csv\n100000_hands_329.csv 100000_hands_659.csv 100000_hands_989.csv\n" ] ], [ [ "# Create intermediate-vs-basic dataset", "_____no_output_____" ] ], [ [ "if os.path.exists('/staging/'):\n outfolder = '/staging/fast/taylora/euchre/inter_vs_basic'\nelse:\n outfolder = 'inter_vs_basic'\nif not os.path.exists(outfolder): os.mkdir(outfolder)\np0, p2 = subopt.make_BASIC_player(0), subopt.make_BASIC_player(2)\np1, p3 = subopt.make_INTERMEDIATE_player(1), subopt.make_INTERMEDIATE_player(3)", "_____no_output_____" ], [ "n_epochs, n_hands = int(1e3), int(1e5)\nAVAILABLE_CPUS = 16-1\nparticular_performance_function = partial(performance, n_epochs=int(n_epochs/AVAILABLE_CPUS), n_hands=n_hands, p0=p0, p1=p1, p2=p2, p3=p3)", "_____no_output_____" ], [ "%%time\nz=\"\"\"\npool = multiprocessing.Pool(AVAILABLE_CPUS)\nfor worker in range(AVAILABLE_CPUS):\n pool.apply_async(particular_performance_function, args=(worker,))\npool.close()\npool.join()\nprint('Done!')\"\"\"", "CPU times: user 0 ns, sys: 3 µs, total: 3 µs\nWall time: 79.9 µs\n" ], [ "!ls /staging/fast/taylora/euchre/inter_vs_basic", "_____no_output_____" ] ], [ [ "# Synthesize performance", "_____no_output_____" ] ], [ [ "CORRECT_COMMA_COUNT = 124\ndef clean_csv(folder):\n for file in os.listdir(folder):\n if '.csv' not in file: continue\n with open(os.path.join(folder, file), 'r') as f:\n lines = f.read().split('\\n')\n good_lines = [l for l in lines if len(l.split(',')) == CORRECT_COMMA_COUNT]\n if len(good_lines) < 0.5*len(lines):\n print('BIG ERROR')\n continue\n if len(good_lines)+1 < len(lines):\n print('%s has %i bad lines' %(file, len(lines)-len(good_lines)))\n with open(os.path.join(folder, file), 'w') as f:\n f.write('\\n'.join(good_lines))", "_____no_output_____" ], [ "!rm -r /staging/fast/taylora/euchre/opt_vs_basic/*", "_____no_output_____" ], [ "!ls /staging/fast/taylora/euchre/opt_vs_basic", "10000_hands_000.csv 10000_hands_330.csv 10000_hands_660.csv\n10000_hands_001.csv 10000_hands_331.csv 10000_hands_661.csv\n10000_hands_002.csv 10000_hands_332.csv 10000_hands_662.csv\n10000_hands_003.csv 10000_hands_333.csv 10000_hands_663.csv\n10000_hands_004.csv 10000_hands_334.csv 10000_hands_664.csv\n10000_hands_005.csv 10000_hands_335.csv 10000_hands_665.csv\n10000_hands_006.csv 10000_hands_336.csv 10000_hands_666.csv\n10000_hands_007.csv 10000_hands_337.csv 10000_hands_667.csv\n10000_hands_008.csv 10000_hands_338.csv 10000_hands_668.csv\n10000_hands_009.csv 10000_hands_339.csv 10000_hands_669.csv\n10000_hands_010.csv 10000_hands_340.csv 10000_hands_670.csv\n10000_hands_011.csv 10000_hands_341.csv 10000_hands_671.csv\n10000_hands_012.csv 10000_hands_342.csv 10000_hands_672.csv\n10000_hands_013.csv 10000_hands_343.csv 10000_hands_673.csv\n10000_hands_014.csv 10000_hands_344.csv 10000_hands_674.csv\n10000_hands_015.csv 10000_hands_345.csv 10000_hands_675.csv\n10000_hands_016.csv 10000_hands_346.csv 10000_hands_676.csv\n10000_hands_017.csv 10000_hands_347.csv 10000_hands_677.csv\n10000_hands_018.csv 10000_hands_348.csv 10000_hands_678.csv\n10000_hands_019.csv 10000_hands_349.csv 10000_hands_679.csv\n10000_hands_020.csv 10000_hands_350.csv 10000_hands_680.csv\n10000_hands_021.csv 10000_hands_351.csv 10000_hands_681.csv\n10000_hands_022.csv 10000_hands_352.csv 10000_hands_682.csv\n10000_hands_023.csv 10000_hands_353.csv 10000_hands_683.csv\n10000_hands_024.csv 10000_hands_354.csv 10000_hands_684.csv\n10000_hands_025.csv 10000_hands_355.csv 10000_hands_685.csv\n10000_hands_026.csv 10000_hands_356.csv 10000_hands_686.csv\n10000_hands_027.csv 10000_hands_357.csv 10000_hands_687.csv\n10000_hands_028.csv 10000_hands_358.csv 10000_hands_688.csv\n10000_hands_029.csv 10000_hands_359.csv 10000_hands_689.csv\n10000_hands_030.csv 10000_hands_360.csv 10000_hands_690.csv\n10000_hands_031.csv 10000_hands_361.csv 10000_hands_691.csv\n10000_hands_032.csv 10000_hands_362.csv 10000_hands_692.csv\n10000_hands_033.csv 10000_hands_363.csv 10000_hands_693.csv\n10000_hands_034.csv 10000_hands_364.csv 10000_hands_694.csv\n10000_hands_035.csv 10000_hands_365.csv 10000_hands_695.csv\n10000_hands_036.csv 10000_hands_366.csv 10000_hands_696.csv\n10000_hands_037.csv 10000_hands_367.csv 10000_hands_697.csv\n10000_hands_038.csv 10000_hands_368.csv 10000_hands_698.csv\n10000_hands_039.csv 10000_hands_369.csv 10000_hands_699.csv\n10000_hands_040.csv 10000_hands_370.csv 10000_hands_700.csv\n10000_hands_041.csv 10000_hands_371.csv 10000_hands_701.csv\n10000_hands_042.csv 10000_hands_372.csv 10000_hands_702.csv\n10000_hands_043.csv 10000_hands_373.csv 10000_hands_703.csv\n10000_hands_044.csv 10000_hands_374.csv 10000_hands_704.csv\n10000_hands_045.csv 10000_hands_375.csv 10000_hands_705.csv\n10000_hands_046.csv 10000_hands_376.csv 10000_hands_706.csv\n10000_hands_047.csv 10000_hands_377.csv 10000_hands_707.csv\n10000_hands_048.csv 10000_hands_378.csv 10000_hands_708.csv\n10000_hands_049.csv 10000_hands_379.csv 10000_hands_709.csv\n10000_hands_050.csv 10000_hands_380.csv 10000_hands_710.csv\n10000_hands_051.csv 10000_hands_381.csv 10000_hands_711.csv\n10000_hands_052.csv 10000_hands_382.csv 10000_hands_712.csv\n10000_hands_053.csv 10000_hands_383.csv 10000_hands_713.csv\n10000_hands_054.csv 10000_hands_384.csv 10000_hands_714.csv\n10000_hands_055.csv 10000_hands_385.csv 10000_hands_715.csv\n10000_hands_056.csv 10000_hands_386.csv 10000_hands_716.csv\n10000_hands_057.csv 10000_hands_387.csv 10000_hands_717.csv\n10000_hands_058.csv 10000_hands_388.csv 10000_hands_718.csv\n10000_hands_059.csv 10000_hands_389.csv 10000_hands_719.csv\n10000_hands_060.csv 10000_hands_390.csv 10000_hands_720.csv\n10000_hands_061.csv 10000_hands_391.csv 10000_hands_721.csv\n10000_hands_062.csv 10000_hands_392.csv 10000_hands_722.csv\n10000_hands_063.csv 10000_hands_393.csv 10000_hands_723.csv\n10000_hands_064.csv 10000_hands_394.csv 10000_hands_724.csv\n10000_hands_065.csv 10000_hands_395.csv 10000_hands_725.csv\n10000_hands_066.csv 10000_hands_396.csv 10000_hands_726.csv\n10000_hands_067.csv 10000_hands_397.csv 10000_hands_727.csv\n10000_hands_068.csv 10000_hands_398.csv 10000_hands_728.csv\n10000_hands_069.csv 10000_hands_399.csv 10000_hands_729.csv\n10000_hands_070.csv 10000_hands_400.csv 10000_hands_730.csv\n10000_hands_071.csv 10000_hands_401.csv 10000_hands_731.csv\n10000_hands_072.csv 10000_hands_402.csv 10000_hands_732.csv\n10000_hands_073.csv 10000_hands_403.csv 10000_hands_733.csv\n10000_hands_074.csv 10000_hands_404.csv 10000_hands_734.csv\n10000_hands_075.csv 10000_hands_405.csv 10000_hands_735.csv\n10000_hands_076.csv 10000_hands_406.csv 10000_hands_736.csv\n10000_hands_077.csv 10000_hands_407.csv 10000_hands_737.csv\n10000_hands_078.csv 10000_hands_408.csv 10000_hands_738.csv\n10000_hands_079.csv 10000_hands_409.csv 10000_hands_739.csv\n10000_hands_080.csv 10000_hands_410.csv 10000_hands_740.csv\n10000_hands_081.csv 10000_hands_411.csv 10000_hands_741.csv\n10000_hands_082.csv 10000_hands_412.csv 10000_hands_742.csv\n10000_hands_083.csv 10000_hands_413.csv 10000_hands_743.csv\n10000_hands_084.csv 10000_hands_414.csv 10000_hands_744.csv\n10000_hands_085.csv 10000_hands_415.csv 10000_hands_745.csv\n10000_hands_086.csv 10000_hands_416.csv 10000_hands_746.csv\n10000_hands_087.csv 10000_hands_417.csv 10000_hands_747.csv\n10000_hands_088.csv 10000_hands_418.csv 10000_hands_748.csv\n10000_hands_089.csv 10000_hands_419.csv 10000_hands_749.csv\n10000_hands_090.csv 10000_hands_420.csv 10000_hands_750.csv\n10000_hands_091.csv 10000_hands_421.csv 10000_hands_751.csv\n10000_hands_092.csv 10000_hands_422.csv 10000_hands_752.csv\n10000_hands_093.csv 10000_hands_423.csv 10000_hands_753.csv\n10000_hands_094.csv 10000_hands_424.csv 10000_hands_754.csv\n10000_hands_095.csv 10000_hands_425.csv 10000_hands_755.csv\n10000_hands_096.csv 10000_hands_426.csv 10000_hands_756.csv\n10000_hands_097.csv 10000_hands_427.csv 10000_hands_757.csv\n10000_hands_098.csv 10000_hands_428.csv 10000_hands_758.csv\n10000_hands_099.csv 10000_hands_429.csv 10000_hands_759.csv\n10000_hands_100.csv 10000_hands_430.csv 10000_hands_760.csv\n10000_hands_101.csv 10000_hands_431.csv 10000_hands_761.csv\n10000_hands_102.csv 10000_hands_432.csv 10000_hands_762.csv\n10000_hands_103.csv 10000_hands_433.csv 10000_hands_763.csv\n10000_hands_104.csv 10000_hands_434.csv 10000_hands_764.csv\n10000_hands_105.csv 10000_hands_435.csv 10000_hands_765.csv\n10000_hands_106.csv 10000_hands_436.csv 10000_hands_766.csv\n10000_hands_107.csv 10000_hands_437.csv 10000_hands_767.csv\n10000_hands_108.csv 10000_hands_438.csv 10000_hands_768.csv\n10000_hands_109.csv 10000_hands_439.csv 10000_hands_769.csv\n10000_hands_110.csv 10000_hands_440.csv 10000_hands_770.csv\n10000_hands_111.csv 10000_hands_441.csv 10000_hands_771.csv\n10000_hands_112.csv 10000_hands_442.csv 10000_hands_772.csv\n10000_hands_113.csv 10000_hands_443.csv 10000_hands_773.csv\n10000_hands_114.csv 10000_hands_444.csv 10000_hands_774.csv\n10000_hands_115.csv 10000_hands_445.csv 10000_hands_775.csv\n10000_hands_116.csv 10000_hands_446.csv 10000_hands_776.csv\n10000_hands_117.csv 10000_hands_447.csv 10000_hands_777.csv\n10000_hands_118.csv 10000_hands_448.csv 10000_hands_778.csv\n10000_hands_119.csv 10000_hands_449.csv 10000_hands_779.csv\n10000_hands_120.csv 10000_hands_450.csv 10000_hands_780.csv\n10000_hands_121.csv 10000_hands_451.csv 10000_hands_781.csv\n10000_hands_122.csv 10000_hands_452.csv 10000_hands_782.csv\n10000_hands_123.csv 10000_hands_453.csv 10000_hands_783.csv\n10000_hands_124.csv 10000_hands_454.csv 10000_hands_784.csv\n10000_hands_125.csv 10000_hands_455.csv 10000_hands_785.csv\n10000_hands_126.csv 10000_hands_456.csv 10000_hands_786.csv\n10000_hands_127.csv 10000_hands_457.csv 10000_hands_787.csv\n10000_hands_128.csv 10000_hands_458.csv 10000_hands_788.csv\n10000_hands_129.csv 10000_hands_459.csv 10000_hands_789.csv\n10000_hands_130.csv 10000_hands_460.csv 10000_hands_790.csv\n10000_hands_131.csv 10000_hands_461.csv 10000_hands_791.csv\n10000_hands_132.csv 10000_hands_462.csv 10000_hands_792.csv\n10000_hands_133.csv 10000_hands_463.csv 10000_hands_793.csv\n10000_hands_134.csv 10000_hands_464.csv 10000_hands_794.csv\n10000_hands_135.csv 10000_hands_465.csv 10000_hands_795.csv\n10000_hands_136.csv 10000_hands_466.csv 10000_hands_796.csv\n10000_hands_137.csv 10000_hands_467.csv 10000_hands_797.csv\n10000_hands_138.csv 10000_hands_468.csv 10000_hands_798.csv\n10000_hands_139.csv 10000_hands_469.csv 10000_hands_799.csv\n10000_hands_140.csv 10000_hands_470.csv 10000_hands_800.csv\n10000_hands_141.csv 10000_hands_471.csv 10000_hands_801.csv\n10000_hands_142.csv 10000_hands_472.csv 10000_hands_802.csv\n10000_hands_143.csv 10000_hands_473.csv 10000_hands_803.csv\n10000_hands_144.csv 10000_hands_474.csv 10000_hands_804.csv\n10000_hands_145.csv 10000_hands_475.csv 10000_hands_805.csv\n10000_hands_146.csv 10000_hands_476.csv 10000_hands_806.csv\n10000_hands_147.csv 10000_hands_477.csv 10000_hands_807.csv\n10000_hands_148.csv 10000_hands_478.csv 10000_hands_808.csv\n10000_hands_149.csv 10000_hands_479.csv 10000_hands_809.csv\n10000_hands_150.csv 10000_hands_480.csv 10000_hands_810.csv\n10000_hands_151.csv 10000_hands_481.csv 10000_hands_811.csv\n10000_hands_152.csv 10000_hands_482.csv 10000_hands_812.csv\n10000_hands_153.csv 10000_hands_483.csv 10000_hands_813.csv\n10000_hands_154.csv 10000_hands_484.csv 10000_hands_814.csv\n10000_hands_155.csv 10000_hands_485.csv 10000_hands_815.csv\n10000_hands_156.csv 10000_hands_486.csv 10000_hands_816.csv\n10000_hands_157.csv 10000_hands_487.csv 10000_hands_817.csv\n10000_hands_158.csv 10000_hands_488.csv 10000_hands_818.csv\n10000_hands_159.csv 10000_hands_489.csv 10000_hands_819.csv\n10000_hands_160.csv 10000_hands_490.csv 10000_hands_820.csv\n10000_hands_161.csv 10000_hands_491.csv 10000_hands_821.csv\n10000_hands_162.csv 10000_hands_492.csv 10000_hands_822.csv\n10000_hands_163.csv 10000_hands_493.csv 10000_hands_823.csv\n10000_hands_164.csv 10000_hands_494.csv 10000_hands_824.csv\n10000_hands_165.csv 10000_hands_495.csv 10000_hands_825.csv\n10000_hands_166.csv 10000_hands_496.csv 10000_hands_826.csv\n10000_hands_167.csv 10000_hands_497.csv 10000_hands_827.csv\n10000_hands_168.csv 10000_hands_498.csv 10000_hands_828.csv\n10000_hands_169.csv 10000_hands_499.csv 10000_hands_829.csv\n10000_hands_170.csv 10000_hands_500.csv 10000_hands_830.csv\n10000_hands_171.csv 10000_hands_501.csv 10000_hands_831.csv\n10000_hands_172.csv 10000_hands_502.csv 10000_hands_832.csv\n10000_hands_173.csv 10000_hands_503.csv 10000_hands_833.csv\n10000_hands_174.csv 10000_hands_504.csv 10000_hands_834.csv\n10000_hands_175.csv 10000_hands_505.csv 10000_hands_835.csv\n10000_hands_176.csv 10000_hands_506.csv 10000_hands_836.csv\n10000_hands_177.csv 10000_hands_507.csv 10000_hands_837.csv\n10000_hands_178.csv 10000_hands_508.csv 10000_hands_838.csv\n10000_hands_179.csv 10000_hands_509.csv 10000_hands_839.csv\n10000_hands_180.csv 10000_hands_510.csv 10000_hands_840.csv\n10000_hands_181.csv 10000_hands_511.csv 10000_hands_841.csv\n10000_hands_182.csv 10000_hands_512.csv 10000_hands_842.csv\n10000_hands_183.csv 10000_hands_513.csv 10000_hands_843.csv\n10000_hands_184.csv 10000_hands_514.csv 10000_hands_844.csv\n10000_hands_185.csv 10000_hands_515.csv 10000_hands_845.csv\n10000_hands_186.csv 10000_hands_516.csv 10000_hands_846.csv\n10000_hands_187.csv 10000_hands_517.csv 10000_hands_847.csv\n10000_hands_188.csv 10000_hands_518.csv 10000_hands_848.csv\n10000_hands_189.csv 10000_hands_519.csv 10000_hands_849.csv\n10000_hands_190.csv 10000_hands_520.csv 10000_hands_850.csv\n10000_hands_191.csv 10000_hands_521.csv 10000_hands_851.csv\n10000_hands_192.csv 10000_hands_522.csv 10000_hands_852.csv\n10000_hands_193.csv 10000_hands_523.csv 10000_hands_853.csv\n10000_hands_194.csv 10000_hands_524.csv 10000_hands_854.csv\n10000_hands_195.csv 10000_hands_525.csv 10000_hands_855.csv\n10000_hands_196.csv 10000_hands_526.csv 10000_hands_856.csv\n10000_hands_197.csv 10000_hands_527.csv 10000_hands_857.csv\n10000_hands_198.csv 10000_hands_528.csv 10000_hands_858.csv\n10000_hands_199.csv 10000_hands_529.csv 10000_hands_859.csv\n10000_hands_200.csv 10000_hands_530.csv 10000_hands_860.csv\n10000_hands_201.csv 10000_hands_531.csv 10000_hands_861.csv\n10000_hands_202.csv 10000_hands_532.csv 10000_hands_862.csv\n10000_hands_203.csv 10000_hands_533.csv 10000_hands_863.csv\n10000_hands_204.csv 10000_hands_534.csv 10000_hands_864.csv\n10000_hands_205.csv 10000_hands_535.csv 10000_hands_865.csv\n10000_hands_206.csv 10000_hands_536.csv 10000_hands_866.csv\n10000_hands_207.csv 10000_hands_537.csv 10000_hands_867.csv\n10000_hands_208.csv 10000_hands_538.csv 10000_hands_868.csv\n10000_hands_209.csv 10000_hands_539.csv 10000_hands_869.csv\n10000_hands_210.csv 10000_hands_540.csv 10000_hands_870.csv\n10000_hands_211.csv 10000_hands_541.csv 10000_hands_871.csv\n10000_hands_212.csv 10000_hands_542.csv 10000_hands_872.csv\n10000_hands_213.csv 10000_hands_543.csv 10000_hands_873.csv\n10000_hands_214.csv 10000_hands_544.csv 10000_hands_874.csv\n10000_hands_215.csv 10000_hands_545.csv 10000_hands_875.csv\n10000_hands_216.csv 10000_hands_546.csv 10000_hands_876.csv\n10000_hands_217.csv 10000_hands_547.csv 10000_hands_877.csv\n10000_hands_218.csv 10000_hands_548.csv 10000_hands_878.csv\n10000_hands_219.csv 10000_hands_549.csv 10000_hands_879.csv\n10000_hands_220.csv 10000_hands_550.csv 10000_hands_880.csv\n10000_hands_221.csv 10000_hands_551.csv 10000_hands_881.csv\n10000_hands_222.csv 10000_hands_552.csv 10000_hands_882.csv\n10000_hands_223.csv 10000_hands_553.csv 10000_hands_883.csv\n10000_hands_224.csv 10000_hands_554.csv 10000_hands_884.csv\n10000_hands_225.csv 10000_hands_555.csv 10000_hands_885.csv\n10000_hands_226.csv 10000_hands_556.csv 10000_hands_886.csv\n10000_hands_227.csv 10000_hands_557.csv 10000_hands_887.csv\n10000_hands_228.csv 10000_hands_558.csv 10000_hands_888.csv\n10000_hands_229.csv 10000_hands_559.csv 10000_hands_889.csv\n10000_hands_230.csv 10000_hands_560.csv 10000_hands_890.csv\n10000_hands_231.csv 10000_hands_561.csv 10000_hands_891.csv\n10000_hands_232.csv 10000_hands_562.csv 10000_hands_892.csv\n10000_hands_233.csv 10000_hands_563.csv 10000_hands_893.csv\n10000_hands_234.csv 10000_hands_564.csv 10000_hands_894.csv\n10000_hands_235.csv 10000_hands_565.csv 10000_hands_895.csv\n10000_hands_236.csv 10000_hands_566.csv 10000_hands_896.csv\n10000_hands_237.csv 10000_hands_567.csv 10000_hands_897.csv\n10000_hands_238.csv 10000_hands_568.csv 10000_hands_898.csv\n10000_hands_239.csv 10000_hands_569.csv 10000_hands_899.csv\n10000_hands_240.csv 10000_hands_570.csv 10000_hands_900.csv\n10000_hands_241.csv 10000_hands_571.csv 10000_hands_901.csv\n10000_hands_242.csv 10000_hands_572.csv 10000_hands_902.csv\n10000_hands_243.csv 10000_hands_573.csv 10000_hands_903.csv\n10000_hands_244.csv 10000_hands_574.csv 10000_hands_904.csv\n10000_hands_245.csv 10000_hands_575.csv 10000_hands_905.csv\n10000_hands_246.csv 10000_hands_576.csv 10000_hands_906.csv\n10000_hands_247.csv 10000_hands_577.csv 10000_hands_907.csv\n10000_hands_248.csv 10000_hands_578.csv 10000_hands_908.csv\n10000_hands_249.csv 10000_hands_579.csv 10000_hands_909.csv\n10000_hands_250.csv 10000_hands_580.csv 10000_hands_910.csv\n10000_hands_251.csv 10000_hands_581.csv 10000_hands_911.csv\n10000_hands_252.csv 10000_hands_582.csv 10000_hands_912.csv\n10000_hands_253.csv 10000_hands_583.csv 10000_hands_913.csv\n10000_hands_254.csv 10000_hands_584.csv 10000_hands_914.csv\n10000_hands_255.csv 10000_hands_585.csv 10000_hands_915.csv\n10000_hands_256.csv 10000_hands_586.csv 10000_hands_916.csv\n10000_hands_257.csv 10000_hands_587.csv 10000_hands_917.csv\n10000_hands_258.csv 10000_hands_588.csv 10000_hands_918.csv\n10000_hands_259.csv 10000_hands_589.csv 10000_hands_919.csv\n10000_hands_260.csv 10000_hands_590.csv 10000_hands_920.csv\n10000_hands_261.csv 10000_hands_591.csv 10000_hands_921.csv\n10000_hands_262.csv 10000_hands_592.csv 10000_hands_922.csv\n10000_hands_263.csv 10000_hands_593.csv 10000_hands_923.csv\n10000_hands_264.csv 10000_hands_594.csv 10000_hands_924.csv\n10000_hands_265.csv 10000_hands_595.csv 10000_hands_925.csv\n10000_hands_266.csv 10000_hands_596.csv 10000_hands_926.csv\n10000_hands_267.csv 10000_hands_597.csv 10000_hands_927.csv\n10000_hands_268.csv 10000_hands_598.csv 10000_hands_928.csv\n10000_hands_269.csv 10000_hands_599.csv 10000_hands_929.csv\n10000_hands_270.csv 10000_hands_600.csv 10000_hands_930.csv\n10000_hands_271.csv 10000_hands_601.csv 10000_hands_931.csv\n10000_hands_272.csv 10000_hands_602.csv 10000_hands_932.csv\n10000_hands_273.csv 10000_hands_603.csv 10000_hands_933.csv\n10000_hands_274.csv 10000_hands_604.csv 10000_hands_934.csv\n10000_hands_275.csv 10000_hands_605.csv 10000_hands_935.csv\n10000_hands_276.csv 10000_hands_606.csv 10000_hands_936.csv\n10000_hands_277.csv 10000_hands_607.csv 10000_hands_937.csv\n10000_hands_278.csv 10000_hands_608.csv 10000_hands_938.csv\n10000_hands_279.csv 10000_hands_609.csv 10000_hands_939.csv\n10000_hands_280.csv 10000_hands_610.csv 10000_hands_940.csv\n10000_hands_281.csv 10000_hands_611.csv 10000_hands_941.csv\n10000_hands_282.csv 10000_hands_612.csv 10000_hands_942.csv\n10000_hands_283.csv 10000_hands_613.csv 10000_hands_943.csv\n10000_hands_284.csv 10000_hands_614.csv 10000_hands_944.csv\n10000_hands_285.csv 10000_hands_615.csv 10000_hands_945.csv\n10000_hands_286.csv 10000_hands_616.csv 10000_hands_946.csv\n10000_hands_287.csv 10000_hands_617.csv 10000_hands_947.csv\n10000_hands_288.csv 10000_hands_618.csv 10000_hands_948.csv\n10000_hands_289.csv 10000_hands_619.csv 10000_hands_949.csv\n10000_hands_290.csv 10000_hands_620.csv 10000_hands_950.csv\n10000_hands_291.csv 10000_hands_621.csv 10000_hands_951.csv\n10000_hands_292.csv 10000_hands_622.csv 10000_hands_952.csv\n10000_hands_293.csv 10000_hands_623.csv 10000_hands_953.csv\n10000_hands_294.csv 10000_hands_624.csv 10000_hands_954.csv\n10000_hands_295.csv 10000_hands_625.csv 10000_hands_955.csv\n10000_hands_296.csv 10000_hands_626.csv 10000_hands_956.csv\n10000_hands_297.csv 10000_hands_627.csv 10000_hands_957.csv\n10000_hands_298.csv 10000_hands_628.csv 10000_hands_958.csv\n10000_hands_299.csv 10000_hands_629.csv 10000_hands_959.csv\n10000_hands_300.csv 10000_hands_630.csv 10000_hands_960.csv\n10000_hands_301.csv 10000_hands_631.csv 10000_hands_961.csv\n10000_hands_302.csv 10000_hands_632.csv 10000_hands_962.csv\n10000_hands_303.csv 10000_hands_633.csv 10000_hands_963.csv\n10000_hands_304.csv 10000_hands_634.csv 10000_hands_964.csv\n10000_hands_305.csv 10000_hands_635.csv 10000_hands_965.csv\n10000_hands_306.csv 10000_hands_636.csv 10000_hands_966.csv\n10000_hands_307.csv 10000_hands_637.csv 10000_hands_967.csv\n10000_hands_308.csv 10000_hands_638.csv 10000_hands_968.csv\n10000_hands_309.csv 10000_hands_639.csv 10000_hands_969.csv\n10000_hands_310.csv 10000_hands_640.csv 10000_hands_970.csv\n10000_hands_311.csv 10000_hands_641.csv 10000_hands_971.csv\n10000_hands_312.csv 10000_hands_642.csv 10000_hands_972.csv\n10000_hands_313.csv 10000_hands_643.csv 10000_hands_973.csv\n10000_hands_314.csv 10000_hands_644.csv 10000_hands_974.csv\n10000_hands_315.csv 10000_hands_645.csv 10000_hands_975.csv\n10000_hands_316.csv 10000_hands_646.csv 10000_hands_976.csv\n10000_hands_317.csv 10000_hands_647.csv 10000_hands_977.csv\n10000_hands_318.csv 10000_hands_648.csv 10000_hands_978.csv\n10000_hands_319.csv 10000_hands_649.csv 10000_hands_979.csv\n10000_hands_320.csv 10000_hands_650.csv 10000_hands_980.csv\n10000_hands_321.csv 10000_hands_651.csv 10000_hands_981.csv\n10000_hands_322.csv 10000_hands_652.csv 10000_hands_982.csv\n10000_hands_323.csv 10000_hands_653.csv 10000_hands_983.csv\n10000_hands_324.csv 10000_hands_654.csv 10000_hands_984.csv\n10000_hands_325.csv 10000_hands_655.csv 10000_hands_985.csv\n10000_hands_326.csv 10000_hands_656.csv 10000_hands_986.csv\n10000_hands_327.csv 10000_hands_657.csv 10000_hands_987.csv\n10000_hands_328.csv 10000_hands_658.csv 10000_hands_988.csv\n10000_hands_329.csv 10000_hands_659.csv\n" ], [ "clean_csv('/staging/fast/taylora/euchre/opt_vs_basic')", "10000_hands_712.csv has 10 bad lines\n" ], [ "clean_csv('/staging/fast/taylora/euchre/opt_vs_inter')", "_____no_output_____" ], [ "clean_csv('/staging/fast/taylora/euchre/inter_vs_basic')", "_____no_output_____" ], [ "def get_performance(folder, p02_level, p13_level):\n print('Loading files into memory...')\n if folder[-1] != '/': folder += '/'\n df = pd.concat([pd.read_csv(folder+file) for file in os.listdir(folder) if '.csv' in file]).reset_index(drop=True)\n if len(p02_level) < len(p13_level): p02_level += ' '*(len(p13_level)-len(p02_level))\n else: p13_level += ' '*(len(p02_level)-len(p13_level))\n \n print('Applying caller trueid / points...')\n df['caller_trueid'] = df.apply(lambda row: row['p' + str(row['caller']) + 'trueid'], axis=1)\n df['caller_points'] = df.apply(lambda x: 4*(x['result']=='Loner') + 2*(x['result']=='Sweep') +\\\n 1*(x['result']=='Single') - 2*(x['result']=='EUCHRE'), axis=1)\n calls = [df[df['caller_trueid']==i] for i in range(4)]\n \n \"\"\"\n What do I want to return? What useful statistics can I show to prove that Optimal > Intermediate > Basic?\n \n 1.) Avg points per EVERY hand\n 2.) Avg points per call\n 3.) Rate of euchres / singles / sweeps per call\n 4.) Rate of loner successes\n \"\"\"\n mask_02 = (df['caller_trueid'] == 0) | (df['caller_trueid'] == 2)\n mask_13 = (df['caller_trueid'] == 1) | (df['caller_trueid'] == 3)\n callcount_02 = len(calls[0]) + len(calls[2])\n callcount_13 = len(calls[1]) + len(calls[3])\n \n # Statistic 1: Avg points (by team) per every hand\n points_02 = sum(df[(df['caller_points'] > 0) & (mask_02)]['caller_points']) + sum(df[(df['caller_points'] < 0) & (mask_13)]['caller_points'])\n points_13 = sum(df[(df['caller_points'] > 0) & (mask_13)]['caller_points']) + sum(df[(df['caller_points'] < 0) & (mask_02)]['caller_points'])\n print('Non-negative PPH Team 0/2 (%s): %.2f' %(p02_level, points_02/len(df)))\n print('Non-negative PPH Team 1/3 (%s): %.2f' %(p13_level, points_13/len(df)))\n print()\n \n # Statistic 2: Avg points (by team) per call\n print('PPC Team 0/2 (%s): %.2f' %(p02_level, (sum(calls[0]['caller_points'])+sum(calls[2]['caller_points'])/callcount_02)))\n print('PPC Team 1/3 (%s): %.2f' %(p13_level, (sum(calls[1]['caller_points'])+sum(calls[3]['caller_points'])/callcount_13)))\n print()\n \n # Statistic 3: Rate of euchres / singles / sweeps (by team) per call\n print('Euchre / single / sweep rate Team 0/2 (%s): %.3f / %.3f / %.3f' %(p02_level,\n (sum(calls[0]['caller_points']==-2) + sum(calls[2]['caller_points']==-2)) / callcount_02,\n (sum((calls[0]['caller_points']==1)&(calls[0]['alone']==0)) + sum((calls[2]['caller_points']==1)&(calls[2]['alone']==0))) / callcount_02,\n (sum(calls[0]['caller_points']==2) + sum(calls[2]['caller_points']==2)) / callcount_02\n ))\n print('Euchre / single / sweep rate Team 1/3 (%s): %.3f / %.3f / %.3f' %(p13_level,\n (sum(calls[1]['caller_points']==-2) + sum(calls[3]['caller_points']==-2)) / callcount_13,\n (sum((calls[1]['caller_points']==1)&(calls[1]['alone']==0)) + sum((calls[3]['caller_points']==1)&(calls[3]['alone']==0))) / callcount_13,\n (sum(calls[1]['caller_points']==2) + sum(calls[3]['caller_points']==2)) / callcount_13\n ))\n \n # Statistic 4: Rate of loner successes (by team)\n loners = [c['alone']==1 for c in calls]\n print('Loner success rate Team 0/2 (%s): %.2f' %(p02_level, (sum(loners[0]['caller_points']==4)+sum(loners[2]['caller_points']==4))\\\n /(len(loners[0])+len(loners[2]))))\n print('Loner success rate Team 0/2 (%s): %.2f' %(p02_level, (sum(loners[1]['caller_points']==4)+sum(loners[3]['caller_points']==4))\\\n /(len(loners[1])+len(loners[3]))))", "_____no_output_____" ], [ "def get_performance_incremental(folder, p02_level, p13_level, prnt=True, every=20, max_count=None):\n totalcounts, callcounts, totallonercounts, successfullonercounts, euchrecounts, singlecounts, sweepcounts = 0,[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]\n totalpositivepoints, totalpoints = [0,0],[0,0]\n \n for i, file in enumerate(os.listdir(folder)):\n if '.csv' not in file: continue\n if max_count is not None:\n if i > max_count: break\n if prnt and i%every == 0: print('Starting file %i / %i...' %(i, len(os.listdir(folder))))\n df = pd.read_csv(os.path.join(folder, file))\n if len(p02_level) < len(p13_level): p02_level += ' '*(len(p13_level)-len(p02_level))\n else: p13_level += ' '*(len(p02_level)-len(p13_level))\n\n df['caller_trueid'] = df.apply(lambda row: row['p' + str(row['caller']) + 'trueid'], axis=1)\n df['caller_points'] = 4*(df['result']=='Loner') + 2*(df['result']=='Sweep') + 1*(df['result']=='Single') - 2*(df['result']=='EUCHRE')\n calls = [df[df['caller_trueid']==i] for i in range(4)]\n loners = [c[c['alone']==1] for c in calls]\n masks = [(df['caller_trueid'] == 0) | (df['caller_trueid'] == 2), (df['caller_trueid'] == 1) | (df['caller_trueid'] == 3)]\n \n totalcounts += len(df)\n for i in range(2):\n callcounts[i] += len(calls[i])+len(calls[i+2])\n totallonercounts[i] += len(loners[i])+len(loners[i+2])\n successfullonercounts[i] += sum(loners[i]['caller_points']==4)+sum(loners[i+2]['caller_points']==4)\n euchrecounts[i] += sum(calls[i]['caller_points']==-2)+sum(calls[i+2]['caller_points']==-2)\n singlecounts[i] += sum((calls[i]['caller_points']==1) & (calls[i]['alone']==0))+\\\n sum((calls[i+2]['caller_points']==1) & (calls[i+2]['alone']==0))\n sweepcounts[i] += sum(calls[i]['caller_points']==2)+sum(calls[i+2]['caller_points']==2)\n \n totalpositivepoints[i] += sum(df[(masks[i]) & (df['caller_points']>0)]['caller_points'])\n totalpoints[i] += sum(calls[i]['caller_points'])+sum(calls[i+2]['caller_points'])\n \n \n # Statistic 1: Avg positive points (by team) per every hand\n print()\n print('Non-negative PPH Team 0/2 (%s): %.3f' %(p02_level, totalpositivepoints[0]/totalcounts))\n print('Non-negative PPH Team 1/3 (%s): %.3f' %(p13_level, totalpositivepoints[1]/totalcounts))\n print()\n\n # Statistic 2: Avg points (by team) per call\n print('PPC Team 0/2 (%s): %.3f' %(p02_level, totalpoints[0]/callcounts[0]))\n print('PPC Team 1/3 (%s): %.3f' %(p13_level, totalpoints[1]/callcounts[1]))\n print()\n\n # Statistic 3: Rate of euchres / singles / sweeps (by team) per call\n print('Euchre / single / sweep rate Team 0/2 (%s): %.3f / %.3f / %.3f' %(p02_level,\n euchrecounts[0]/callcounts[0], singlecounts[0]/callcounts[0], sweepcounts[0]/callcounts[0]))\n print('Euchre / single / sweep rate Team 1/3 (%s): %.3f / %.3f / %.3f' %(p13_level,\n euchrecounts[1]/callcounts[1], singlecounts[1]/callcounts[1], sweepcounts[1]/callcounts[1]))\n print()\n\n # Statistic 4: Rate of loner successes (by team)\n print('Loner success rate Team 0/2 (%s): %.2f' %(p02_level, successfullonercounts[0]/totallonercounts[0]))\n print('Loner success rate Team 1/3 (%s): %.2f' %(p13_level, successfullonercounts[1]/totallonercounts[1]))\n print()", "_____no_output_____" ], [ "%%time\nget_performance_incremental('/staging/fast/taylora/euchre/opt_vs_basic', 'optimal', 'basic')", "Starting file 0 / 989...\nStarting file 20 / 989...\nStarting file 40 / 989...\nStarting file 60 / 989...\nStarting file 80 / 989...\nStarting file 100 / 989...\nStarting file 120 / 989...\nStarting file 140 / 989...\nStarting file 160 / 989...\nStarting file 180 / 989...\nStarting file 200 / 989...\nStarting file 220 / 989...\nStarting file 240 / 989...\nStarting file 260 / 989...\nStarting file 280 / 989...\nStarting file 300 / 989...\nStarting file 320 / 989...\nStarting file 340 / 989...\nStarting file 360 / 989...\nStarting file 380 / 989...\nStarting file 400 / 989...\nStarting file 420 / 989...\nStarting file 440 / 989...\nStarting file 460 / 989...\nStarting file 480 / 989...\nStarting file 500 / 989...\nStarting file 520 / 989...\nStarting file 540 / 989...\nStarting file 560 / 989...\nStarting file 580 / 989...\nStarting file 600 / 989...\nStarting file 620 / 989...\nStarting file 640 / 989...\nStarting file 660 / 989...\nStarting file 680 / 989...\nStarting file 700 / 989...\nStarting file 720 / 989...\nStarting file 740 / 989...\nStarting file 760 / 989...\nStarting file 780 / 989...\nStarting file 800 / 989...\nStarting file 820 / 989...\nStarting file 840 / 989...\nStarting file 860 / 989...\nStarting file 880 / 989...\nStarting file 900 / 989...\nStarting file 920 / 989...\nStarting file 940 / 989...\nStarting file 960 / 989...\nStarting file 980 / 989...\n\nNon-negative PPH Team 0/2 (optimal): 0.503\nNon-negative PPH Team 1/3 (basic ): 0.507\n\nPPC Team 0/2 (optimal): 0.600\nPPC Team 1/3 (basic ): 0.608\n\nEuchre / single / sweep rate Team 0/2 (optimal): 0.203 / 0.610 / 0.147\nEuchre / single / sweep rate Team 1/3 (basic ): 0.202 / 0.609 / 0.150\n\nLoner success rate Team 0/2 (optimal): 0.54\nLoner success rate Team 1/3 (basic ): 0.54\n\nCPU times: user 4min, sys: 3.25 s, total: 4min 4s\nWall time: 4min 3s\n" ], [ "%%time\nfolder = '/staging/fast/taylora/euchre/opt_vs_basic'\nprint('Loading files into memory...')\nif folder[-1] != '/': folder += '/'\np02_level, p13_level = 'optimal', 'basic'\ndf = pd.concat([pd.read_csv(folder+file) for file in os.listdir(folder) if '.csv' in file]).reset_index(drop=True)\nif len(p02_level) < len(p13_level): p02_level += ' '*(len(p13_level)-len(p02_level))\nelse: p13_level += ' '*(len(p02_level)-len(p13_level))", "Loading files into memory...\nCPU times: user 56.1 s, sys: 6.46 s, total: 1min 2s\nWall time: 1min 2s\n" ], [ "%%time\nprint('Applying caller trueid / points...')\ndf['caller_trueid'] = df.apply(lambda row: row['p' + str(row['caller']) + 'trueid'], axis=1)\ndf['caller_points'] = df.apply(lambda x: 4*(x['result']=='Loner') + 2*(x['result']=='Sweep') +\\\n 1*(x['result']=='Single') - 2*(x['result']=='EUCHRE'), axis=1)\nprint('Done!')", "Applying caller trueid / points...\nDone!\nCPU times: user 7min 14s, sys: 6.34 s, total: 7min 20s\nWall time: 7min 20s\n" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 9869980 entries, 0 to 9869979\nColumns: 124 entries, p0c1 to caller_points\ndtypes: int64(103), object(21)\nmemory usage: 9.1+ GB\n" ], [ "mask_02 = (df['caller_trueid'] == 0) | (df['caller_trueid'] == 2)\nmask_13 = (df['caller_trueid'] == 1) | (df['caller_trueid'] == 3)", "_____no_output_____" ], [ "sum(df[(df['caller_points'] > 0) & (mask_02)]['caller_points'])", "_____no_output_____" ], [ "df = pd.read_csv('/staging/fast/taylora/euchre/opt_vs_basic/10000_hands_000.csv')", "_____no_output_____" ], [ "%%time\ndf['caller_points'] = df.apply(lambda x: 4*(x['result']=='Loner') + 2*(x['result']=='Sweep') +\\\n 1*(x['result']=='Single') - 2*(x['result']=='EUCHRE'), axis=1)", "CPU times: user 279 ms, sys: 7.41 ms, total: 286 ms\nWall time: 285 ms\n" ], [ "%%time\ndf['caller_points'] = 4*(df['result']=='Loner') + 2*(df['result']=='Sweep') + 1*(df['result']=='Single') - 2*(df['result']=='EUCHRE')", "CPU times: user 4.1 ms, sys: 45 µs, total: 4.14 ms\nWall time: 3.73 ms\n" ], [ "df.head()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7173aea32fe459938e0c8967b439f1ff2dacea0
7,254
ipynb
Jupyter Notebook
Validation_and_Classification_Maps.ipynb
kotfic/Classification-of-Hyperspectral-Image
cb38a7986045f3189b0feb8df794732f0d86e3bf
[ "MIT" ]
null
null
null
Validation_and_Classification_Maps.ipynb
kotfic/Classification-of-Hyperspectral-Image
cb38a7986045f3189b0feb8df794732f0d86e3bf
[ "MIT" ]
null
null
null
Validation_and_Classification_Maps.ipynb
kotfic/Classification-of-Hyperspectral-Image
cb38a7986045f3189b0feb8df794732f0d86e3bf
[ "MIT" ]
null
null
null
29.975207
174
0.547698
[ [ [ "# Define Dependencies", "_____no_output_____" ] ], [ [ "# Import the necessary libraries\nfrom sklearn.decomposition import PCA\nimport os\nimport scipy.io as sio\nimport numpy as np\nfrom keras.models import load_model\nfrom keras.utils import np_utils\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport itertools\nimport spectral", "_____no_output_____" ], [ "# Global Variables\nwindowSize = 5\nnumComponents = numPCAcomponents = 30\ntestRatio = 0.25", "_____no_output_____" ] ], [ [ "# Define the neccesary functions for later use", "_____no_output_____" ] ], [ [ "def loadIndianPinesData():\n data_path = os.path.join(os.getcwd(),'data')\n data = sio.loadmat(os.path.join(data_path, 'Indian_pines_corrected.mat'))['indian_pines_corrected']\n labels = sio.loadmat(os.path.join(data_path, 'Indian_pines_gt.mat'))['indian_pines_gt']\n \n return data, labels\n\ndef reports (X_test,y_test):\n Y_pred = model.predict(X_test)\n y_pred = np.argmax(Y_pred, axis=1)\n target_names = ['Alfalfa', 'Corn-notill', 'Corn-mintill', 'Corn'\n ,'Grass-pasture', 'Grass-trees', 'Grass-pasture-mowed', \n 'Hay-windrowed', 'Oats', 'Soybean-notill', 'Soybean-mintill',\n 'Soybean-clean', 'Wheat', 'Woods', 'Buildings-Grass-Trees-Drives',\n 'Stone-Steel-Towers']\n\n \n classification = classification_report(np.argmax(y_test, axis=1), y_pred, target_names=target_names)\n confusion = confusion_matrix(np.argmax(y_test, axis=1), y_pred)\n score = model.evaluate(X_test, y_test, batch_size=32)\n Test_Loss = score[0]*100\n Test_accuracy = score[1]*100\n \n return classification, confusion, Test_Loss, Test_accuracy\n\n\ndef applyPCA(X, numComponents=75):\n newX = np.reshape(X, (-1, X.shape[2]))\n pca = PCA(n_components=numComponents, whiten=True)\n newX = pca.fit_transform(newX)\n newX = np.reshape(newX, (X.shape[0],X.shape[1], numComponents))\n return newX, pca\n\ndef Patch(data,height_index,width_index):\n #transpose_array = data.transpose((2,0,1))\n #print transpose_array.shape\n height_slice = slice(height_index, height_index+PATCH_SIZE)\n width_slice = slice(width_index, width_index+PATCH_SIZE)\n patch = data[height_slice, width_slice, :]\n \n return patch", "_____no_output_____" ], [ "X_test = np.load(\"GITHUB/XtestWindowSize\" \n + str(windowSize) + \"PCA\" + str(numPCAcomponents) + \"testRatio\" + str(testRatio) + \".npy\")\ny_test = np.load(\"GITHUB/ytestWindowSize\" \n + str(windowSize) + \"PCA\" + str(numPCAcomponents) + \"testRatio\" + str(testRatio) + \".npy\")", "_____no_output_____" ], [ "X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[3], X_test.shape[1], X_test.shape[2]))\ny_test = np_utils.to_categorical(y_test)", "_____no_output_____" ], [ "# load the model architecture and weights\nmodel = load_model('my_model.h5')", "_____no_output_____" ], [ "classification, confusion, Test_loss, Test_accuracy = reports(X_test,y_test)\nclassification = str(classification)\nconfusion = str(confusion)\nfile_name = 'report' + \"WindowSize\" + str(windowSize) + \"PCA\" + str(numComponents) + \"testRatio\" + str(testRatio) +\".txt\"\nwith open(file_name, 'w') as x_file:\n x_file.write('{} Test loss (%)'.format(Test_loss))\n x_file.write('\\n')\n x_file.write('{} Test accuracy (%)'.format(Test_accuracy))\n x_file.write('\\n')\n x_file.write('\\n')\n x_file.write('{}'.format(classification))\n x_file.write('\\n')\n x_file.write('{}'.format(confusion))", "_____no_output_____" ], [ "# load the original image\nX, y = loadIndianPinesData()", "_____no_output_____" ], [ "X,pca = applyPCA(X,numComponents=numComponents)", "_____no_output_____" ], [ "height = y.shape[0]\nwidth = y.shape[1]\nPATCH_SIZE = 5\nnumComponents = 30", "_____no_output_____" ], [ "# calculate the predicted image\noutputs = np.zeros((height,width))\nfor i in range(height-PATCH_SIZE+1):\n for j in range(width-PATCH_SIZE+1):\n target = int(y[int(i+PATCH_SIZE/2), int(j+PATCH_SIZE/2)])\n if target == 0 :\n continue\n else :\n image_patch=Patch(X,i,j)\n #print (image_patch.shape)\n X_test_image = image_patch.reshape(1,image_patch.shape[2],image_patch.shape[0],image_patch.shape[1]).astype('float32') \n prediction = (model.predict_classes(X_test_image)) \n outputs[int(i+PATCH_SIZE/2)][int(j+PATCH_SIZE/2)] = prediction+1", "_____no_output_____" ], [ "ground_truth = spectral.imshow(classes = y,figsize =(5,5))", "_____no_output_____" ], [ "predict_image = spectral.imshow(classes = outputs.astype(int),figsize =(5,5))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7174dfae6553a576f1a36bceab1c08eaa5604ce
2,144
ipynb
Jupyter Notebook
test/py/Vernalizationprogress.ipynb
cyrillemidingoyi/SQ_Wheat_Phenology
9f145e34eb837a7aadfb861f2c632d21b2e679f3
[ "MIT" ]
3
2018-12-06T07:54:25.000Z
2022-02-03T16:31:33.000Z
test/py/Vernalizationprogress.ipynb
cyrillemidingoyi/SQ_Wheat_Phenology
9f145e34eb837a7aadfb861f2c632d21b2e679f3
[ "MIT" ]
2
2018-12-06T07:51:42.000Z
2020-11-14T18:03:12.000Z
test/py/Vernalizationprogress.ipynb
cyrillemidingoyi/SQ_Wheat_Phenology
9f145e34eb837a7aadfb861f2c632d21b2e679f3
[ "MIT" ]
5
2018-12-10T12:11:46.000Z
2022-03-04T10:52:03.000Z
28.210526
80
0.586287
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e7174e4c69df95a3bb4ffae7ca22e50f7a590791
18,876
ipynb
Jupyter Notebook
grades.ipynb
rogerfitz/nfl_graders
d2696b5a884e6edd67b78c06d96d1f5f2635e9d5
[ "MIT" ]
null
null
null
grades.ipynb
rogerfitz/nfl_graders
d2696b5a884e6edd67b78c06d96d1f5f2635e9d5
[ "MIT" ]
null
null
null
grades.ipynb
rogerfitz/nfl_graders
d2696b5a884e6edd67b78c06d96d1f5f2635e9d5
[ "MIT" ]
null
null
null
28.818321
208
0.395582
[ [ [ "## Bucky Brooks \nhttp://www.nfl.com/news/author?id=09000d5d80f97bfd \nScrape all his articles above and look for draft grades", "_____no_output_____" ] ], [ [ "from bs4 import BeautifulSoup\nimport pandas as pd\nimport urllib\nfrom collections import defaultdict\nimport datetime\nimport string", "_____no_output_____" ], [ "writer_id=\"09000d5d80f97bfd\"\nbase_url = 'http://www.nfl.com/news/author?id='+writer_id\nhtml_week = urllib.urlopen(base_url).read()\nsoup = BeautifulSoup(html_week, 'html.parser')", "_____no_output_____" ], [ "articles=soup.select('div[class=news-author-blurb-container]')", "_____no_output_____" ], [ "def gradesArticles(writer_id):\n writer_id=\"09000d5d80f97bfd\"\n base_url = 'http://www.nfl.com/news/author?id='+writer_id\n html_week = urllib.urlopen(base_url).read()\n soup = BeautifulSoup(html_week, 'html.parser')\n articles=soup.select('div[class=news-author-blurb-container]')\n gradeArticles=[]\n for article in articles:\n a=article.select(\"a\")[0]\n title=a.text\n url=a.get(\"href\")\n if 'draft grades' in title:\n gradeArticles.append({'url': url, 'title':title})\n return gradeArticles\ngrades=pd.DataFrame(gradesArticles(articles))", "_____no_output_____" ], [ "grades.tail()", "_____no_output_____" ] ], [ [ "## Team Grades", "_____no_output_____" ] ], [ [ "def procLinks(paragraph):\n #can also return quote in entity graph form\n team=''\n players=[]\n for link in paragraph.select('a'):\n if 'team' in str(link):\n if link.text[0].isupper():\n team=link.text\n elif 'player' in str(link):\n players.append({'url': link.get('href'), 'name': link.text})\n return {'team': team, 'players': players}\n \n \ndef procBold(paragraph):\n grade=None\n for b in paragraph.select('b'):\n if 'GRADE: ' in b.text:\n grade=b.text.split('GRADE: ')[1]\n return {'grade': grade}\n \ndef getMeta(paragraph):\n linkData=procLinks(paragraph)\n boldData=procBold(paragraph)\n meta=dict(linkData.items()+boldData.items())\n return meta", "_____no_output_____" ], [ "rows=[]\nfor url in grades['url']:\n html = urllib.urlopen('http://www.nfl.com/'+url).read()\n soup=BeautifulSoup(html, 'html.parser')\n rawTime=soup.find(id='article-time').get('title')[0:10]\n date=datetime.datetime.strptime(rawTime, '%Y-%m-%d')\n for paragraph in soup.select('p'):\n if 'GRADE: ' in str(paragraph):\n row = getMeta(paragraph)\n row['published']=date\n rows.append(row)\nbucky=pd.DataFrame(rows)", "_____no_output_____" ], [ "bucky=bucky.dropna(subset=['team', 'grade'])", "_____no_output_____" ], [ "gradeVals={'A': 4., 'B': 3., 'C': 2., 'D': 1., 'F': 0.}\ndef gradeToNum(grade):\n offset=0\n if grade[-1]=='+':\n offset=.33\n elif grade[-1]=='-':\n offset=-.33\n return offset+gradeVals[grade[0]]\nbucky['gradeVal']=bucky['grade'].apply(gradeToNum)", "_____no_output_____" ], [ "bucky['year']=bucky['published'].dt.year", "_____no_output_____" ] ], [ [ "iterate through players and see if still playing or starting and then team record", "_____no_output_____" ] ], [ [ "bucky['gradeVals'].mean()", "_____no_output_____" ], [ "time_series=[]\nfor idx, t in bucky.groupby('team', axis=0):\n sum_=0\n for year in [2015,2014,2013]:\n row=t[t['published'].dt.year==year]\n gradVal=0\n if len(row)==0:\n gradeVal=3.05\n else:\n gradeVal=row.iloc[0]['gradeVal']\n score=.8**(-2013+year)*gradeVal\n sum_+=score\n \n time_series.append((t['team'].iloc[0], sum_, len(t)))", "_____no_output_____" ], [ "teamGrades=pd.DataFrame(time_series)\nteamGrades.sort(1, ascending=False)", "C:\\Users\\Roger\\Anaconda2\\envs\\py2\\lib\\site-packages\\ipykernel\\__main__.py:2: FutureWarning: sort(columns=....) is deprecated, use sort_values(by=.....)\n from ipykernel import kernelapp as app\n" ] ], [ [ "Better teams are ranked worse. panthers had strong draft classes. Same with patriots. At least moreso than chargers and especially browns", "_____no_output_____" ], [ "for team grades need to compare this discounted score to records. Plot scatter plot of win percent and score. No coaching built in so perhaps poor correlation corresponds to coach skill or other factors", "_____no_output_____" ], [ "Ironically, http://www.nfl.com/news/story/0ap3000000664439/article/seahawks-steelers-panthers-headline-10-most-talented-teams", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
e7175357b9a927d71b2ce310a7d4f4efdbd612e8
29,872
ipynb
Jupyter Notebook
dMRI Reconstruction Project.ipynb
brainhack-school2020/BHS_Project_dMRI
58131a577153dd85ab2b446f304d3b4c1528d020
[ "CC0-1.0" ]
null
null
null
dMRI Reconstruction Project.ipynb
brainhack-school2020/BHS_Project_dMRI
58131a577153dd85ab2b446f304d3b4c1528d020
[ "CC0-1.0" ]
9
2020-05-22T18:26:55.000Z
2022-03-12T00:34:59.000Z
dMRI Reconstruction Project.ipynb
brainhack-school2020/BHS_Project_dMRI
58131a577153dd85ab2b446f304d3b4c1528d020
[ "CC0-1.0" ]
1
2021-01-15T00:20:59.000Z
2021-01-15T00:20:59.000Z
33.984073
739
0.618003
[ [ [ "# dMRI Data Reconstruction\n\n**Contributor:** Erjun Zhang\n\n**Update time:** June 12, 2020\n\n**Status:** Complete first phase, and still working on this project untill the end of 2020 year\n\nWelcome anyone, who has questions about this project, to contact the contributor through [email]([email protected]).\n\n", "_____no_output_____" ], [ "This project is about diffusion magnetic resonance (MR) data processing and analysis. It mainly consists of three parts: brain diffusion MR data preprocessing, diffusion MRI images reconstruction, data visualization and left and right hemispherical preprocessed MR images classification. The whole procedures can be found in [this Jupyter Notebook file](https://github.com/brainhack-school2020/BHS_Project_dMRI/blob/master/dMRI%20Reconstruction%20Project.ipynb). Explanations about procedures results and other details are given in it.\n\nWith reproducibility being a primary concern, this project was completed by using open-source softwares/tools (Python, FSL, DIPYPE...) and dataset (dHCP and PRIME). It can be as a simple tutorial/example for new students in neuroscience (diffusion MR brain imaging) to familiar/start with their further neuroscience study. \n\nGoals:\n\n* **Data Preprocesssing:** Get preprocessed diffusion MR images from raw MRI data;\n* **Image Reconstruction:** Reconstruct diffusion tensor images from the preprocessed data;\n* **Machine Learning Classsification:** By using machine learning, randomly choose left or right hemispheres on each slice of MRI images preprocessed above and find method to give results that which part it is belonging to (right or left?). ", "_____no_output_____" ], [ "## Data Preprocessing", "_____no_output_____" ], [ "Data preprocessing is quit important for dMRI reconstruction. Different data preprocessing may lead to different reconstruction image qualities, which will make the comparation of different reconstruct methods unreliable. Thus, here we first preprocessing MRI by following same steps: denosing, topup (susceptibility-induced distortion correction) and eddy current-induced distortion and motion correction.", "_____no_output_____" ], [ "### __Import python libraries__", "_____no_output_____" ] ], [ [ "import os #TO control directories\nimport nibabel as nib # read and save medical images\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n%matplotlib inline\nimport plotly\nimport plotly.express as px\nfrom ipywidgets import interact, interactive, fixed, interact_manual\nimport ipywidgets as widgets\nfrom skimage import io # image/video read and show\n\nimport timeit #compute time, useage: timeit.timeit()\nimport math\nimport time\nimport warnings\nimport numpy as np\nimport pandas as pd\n\nimport nipype.interfaces.fsl as fsl #topup\nfrom nipype.interfaces.fsl import TOPUP\nfrom nipype.interfaces.fsl import ApplyTOPUP\nfrom nipype.interfaces.fsl import Eddy\nfrom nipype.testing import anatfile\n\nfrom dipy.denoise.localpca import mppca #denoising\nfrom dipy.io import read_bvals_bvecs\nfrom dipy.core.gradients import gradient_table\nfrom dipy.reconst.dti import TensorModel\nfrom dipy.reconst.dti import fractional_anisotropy\nfrom dipy.reconst.dti import color_fa\nimport dipy.reconst.dki as dki\n\n\n# Self-defined functions\nfrom Extract_b0_Image import Extract_b0_Image\nfrom nib_rdshow_img import nib_rdshow_img\nfrom nib_read_img import nib_read_img\nfrom nib_show_img import nib_show_img\nfrom vol_plot import vol_plot\nfrom interact_vol_plot import interact_vol_plot\n\n# Import libararies used to data analysis\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report,confusion_matrix\nfrom sklearn.neighbors import KNeighborsClassifier", "_____no_output_____" ] ], [ [ "### Set data path and load data\n\nThis project used data from online dataset offered by:\n1. [The Developing Human Connectome Project](http://www.developingconnectome.org/second-data-release/). It consists of over 800 neonatal scans and over 250 fetal scans and can be used for data analysis after image reconstruction. \n2. [PRIME](http://fcon_1000.projects.nitrc.org/indi/PRIMEdownloads.html): used this dataset to reconstruct diffusion Images. This also can be downloaded [here](https://drive.google.com/file/d/1zgxynxjUCETBC6MAl4rfh0sL0WhFtKA9/view?usp=sharing) directly. \n3. Since during preprocessing, we used epi data with two opposite phase-encoding directions to correct distortions, other data can also be used as the source data if it meets this requirement.\n4. Data used for analysis is generated from image data after preprocessing. The final data can be found in this [github project folder](https://github.com/brainhack-school2020/BHS_Project_dMRI/blob/master/brain_water.csv).", "_____no_output_____" ], [ "### Data type (NII.GZ)\n\n- NII (NifTI, format by Neuroimaging Informatics Technology Initiative ) file is commonly used format for multi-dimensional (can be up to 7-dimensional) neuroimaging data. Fisrt four dimension: spatial dimensions and time. \n\n- GZ means gzip-compressed NII files.\n* nib.nifil.Nifti1Image: three parts included in, namely, image data array, an affine array and image metadata.\n* image metadata: machine info., voxel size and slices\n\nThus, in order to know the exact position of each voxel, we have to combine image data array and affine array. For more information, please check [fMRI Processing based on python](https://ff120.github.io/2016/06/12/%E8%AE%A4%E7%9F%A5%E7%A5%9E%E7%BB%8F%E7%A7%91%E5%AD%A6%E4%B8%93%E9%A2%98/%E4%BD%BF%E7%94%A8Python%E5%A4%84%E7%90%86fMRI%E6%95%B0%E6%8D%AE/).", "_____no_output_____" ], [ "### __Denoising__", "_____no_output_____" ], [ "Here we use [Marcenko-Pastur PCA algorithm](https://dipy.org/documentation/1.0.0./examples_built/denoise_mppca/) to denoise images. This algorithm has been shown to provide an optimal compromise between noise suppression and loss of anatomical information for different techniques such as DTI.\n\nDuring the denoising, mppca use a 3D sliding window (decised by denoising radius, pathc_radius) to denoise. Basicaly, this 3D sliding window voxles should be larger than DTI volumes.\n\n* Input: AP_file and PA_file, patch radius\n* Output: 'Denoised_AP_dwi.nii.gz' and 'Denoised_AP_dwi.nii.gz'", "_____no_output_____" ], [ "#### Extract b0 images\n\n* Input file: ap_file, or pa_file, or denoised_AP_file, or denoised_PA_file\n* Corresponding extract 3D file", "_____no_output_____" ], [ "### Merge images\n\n* Input files: 'Extract_denoised_AP_b0.nii.gz' +'Extract_denoised_PA_b0.nii.gz'\n* Output files: 'Extract_denoised_AP_b0_merged.nii.gz', 'topup_encoding.txt'\n\nWhile edit topup encoding file, phase encoded direciton can be found in .json file. The fourth parameter is the [time duration](file:///home/erjun/Downloads/topup(2f)ExampleTopupFollowedByApplytopup.html) between the readout of the centre of the first echo and the centre of the last echo. It can also be found in .json file or use parameters in .json file to calculate it. ", "_____no_output_____" ], [ "### TOPUP\n* Input files: 'Extract_denoised_AP_b0_merged.nii.gz' +'topup_encoding.txt'\n* Output files: 'Extract_denoised_AP_b0_merged_base_fieldcoef.nii.gz', 'Extract_denoised_AP_b0_merged_base_movpar.txt', 'Extract_denoised_AP_b0_merged_corrected.nii.gz',Extract_denoised_AP_b0_merged_field.nii.gz, Extract_denoised_AP_b0_merged_topup.log, jac_01.nii.gz, jac_02.nii.gz, warpfield_01.nii.gz, warpfield_02.nii.gz, xfm_01.mat, xfm_02.mat", "_____no_output_____" ], [ "### ApplyTOPUP\n* Input files: 'Extract_denoised_AP_b0.nii.gz','Extract_denoised_PA_b0.nii.gz'\n* Output files: Extract_denoised_AP_b0_merged_corrected.nii.gz", "_____no_output_____" ], [ "### bet\n\n* Input files: 'Extract_denoised_AP_b0_corrected.nii.gz'\n* Output files: bet_brain.nii.gz, bet_brain_mask.nii.gz, index.txt", "_____no_output_____" ], [ "### EDDY\n\nDuring MRI scannning, subject movements and eddy current-induced distortions may occur. These distortion can be corrected by using FSL.Eddy. \n* Input files: denoised_AP_file, bet_brain_mask.nii.gz, index.txt\n* Output files: eddy_corrected_AP.nii.gz", "_____no_output_____" ], [ "## DTI Reconstruction", "_____no_output_____" ], [ "## Data Visualization", "_____no_output_____" ], [ "In this section, I first show the basic images generate during preprocessing and final image reconstruction process. Then I will go the data visualization part. And before that, to create images more eassily, I will definie several image showing function first.\n\nAll the images are generated from this project and base on the data preprocessing and iamge reconstruction part.\n\nLet's go to check what basic images we aready have!", "_____no_output_____" ], [ "### Basic Images", "_____no_output_____" ] ], [ [ "# DTI Images\n# set plot background\n#plt.style.use('seaborn-dark')\nplt.style.use('grayscale')\n# plot paramter maps \naxf1=np.fliplr(DTI_RGB[:,:,28,:])\n#axf1=axf1.T\naxf2=np.fliplr(DTI_MD[:,:,28])\naxf2=axf2.T\naxf3=np.fliplr(DTI_RD[:,:,28])\naxf3=axf3.T\naxf4=np.fliplr(DTI_AD[:,:,28])\naxf4=axf4.T\nfig, [ax0,ax2, ax3, ax4] = plt.subplots(1,4,figsize=(12,10),subplot_kw={'xticks': [], 'yticks': []})\nax0.imshow(axf1); ax0.set_title('Color coded FA',fontweight='bold',size=13)\n#ax1.imshow(DTI_FA[:,:,28]); ax1.set_title('Fractional anisotropy',fontweight='bold',size=13)\nax2.imshow(axf2); ax2.set_title('Mean diffusivity',fontweight='bold',size=13)\nax3.imshow(axf3); ax3.set_title('Radial diffusivity',fontweight='bold',size=13)\nax4.imshow(axf4); ax4.set_title('Axial diffusivity',fontweight='bold',size=13)", "_____no_output_____" ], [ "# DKI images \naxt0=np.fliplr(DKI_AD[:,:,28])\naxt0=axt0.T\n\naxt1=np.fliplr(DKI_RD[:,:,28])\naxt1=axt1.T\n\naxt2=np.fliplr(DKI_MD[:,:,28])\naxt2=axt2.T\n\naxt3=np.fliplr(DKI_AK[:,:,28])\naxt3=axt3.T\n\naxt4=np.fliplr(DKI_RK[:,:,28])\naxt4=axt4.T\n\naxt5=np.fliplr(DKI_MK[:,:,28])\naxt5=axt5.T\n\nfig, ([ax0, ax1, ax2],[ax3, ax4, ax5]) = plt.subplots(2,3,figsize=(10,8),subplot_kw={'xticks': [], 'yticks': []})\nax0.imshow(axt0); ax0.set_title('Axial diffusivity',fontweight='bold',size=10)\nax1.imshow(axt0); ax1.set_title('Radial diffusivity',fontweight='bold',size=10)\nax2.imshow(axt0); ax2.set_title('Mean diffusivity',fontweight='bold',size=10)\nax3.imshow(axt0); ax3.set_title('Axial kurtosis',fontweight='bold',size=10)\nax4.imshow(axt0); ax4.set_title('Radial kurtosis',fontweight='bold',size=10)\nax5.imshow(axt0); ax5.set_title('Mean kurtosis',fontweight='bold',size=10)", "_____no_output_____" ] ], [ [ "### Visualization", "_____no_output_____" ] ], [ [ "# Function, used to show images, definition\n# To read images data from nii.gz file: nib_read_img(path)\n# To show images from nii.gz file: nib_show_img(img0,slices,intenseScale)\n# To read and show images from nii.gz file: nib_rdshow_img(Images,Slices,IntenseScale,TitleImg)\n# Function to show volume slices images: vol_plot(x) and interact_vol_plot(x,IntenseScale):", "_____no_output_____" ] ], [ [ "#### 3D volume slice image\n\nThis images will quickly check the general apperance of you images during the slice-by-slice animation. One can also rotate it and zoom in to see clearly.\n\nHere, as an example, I just show FA map generated from DTI model. Once can change it to other maps, such as DTI_MD or DKI_FA.\n\n \t1. Show different slices of images;\n \t2. Change observe view point;\n \t3. Play animation", "_____no_output_____" ] ], [ [ "#vol_plot(DTI_FA[:,:,:]) # or can use interact_vol_plot(DTI_MD,90) to show images\ninteract_vol_plot(DTI_FA,0.90)", "_____no_output_____" ] ], [ [ "#### Data visualization (Preprocessing part)\n\nI am curious about what changed of the iamges during our data preprocessing. This interactive images can be used to reach this purpose.\n\nOne can control Slices to check different slice changes among them. You can control the level of high color scale to get higher image contrast. You can put cursor on images to get the exact color value if you want.\n\n \t1. Select different maps;\n \t2. Select slices;\n \t3. Select maximum diffusion sthrength;\n \t4. Display title", "_____no_output_____" ] ], [ [ "# Set image data for visualization (preprocessing part)\n# This will be used for visualization of preprocessing part 1 and part 2\nF11='Extract_AP_b0.nii.gz'\nF12='Extract_PA_b0.nii.gz'\nF13='Extract_denoised_AP_b0.nii.gz'\nF14='Extract_denoised_PA_b0.nii.gz'\nF15='Extract_denoised_AP_b0_merged1.nii.gz'\nF16='Extract_denoised_AP_b0_merged_corrected1.nii.gz'\nF17='bet_brain.nii.gz'\nF18='bet_brain_mask.nii.gz'\nF19='Extract_eddy_correct_AP.nii.gz'\n\n\nPreproImage= dict()\nPreproImage['Original_AP_Image']=F11\nPreproImage['Original_PA_Image']=F12\nPreproImage['Denoised_AP_Image']=F13\nPreproImage['Denoised_PA_Image']=F14\nPreproImage['Merged_Image']=F15\nPreproImage['TOPUP_AP_Image']=F16\nPreproImage['Brain_After_bet']=F17\n\nPreproImage['Brain_mask']=F18\nPreproImage['Eddy_Correct_AP_Image']=F19\n\n# Load DTI and DKI results\n# \nImages= dict()\nImages['DTI_FA_RGB']=(data_path+'/DTI/FA_RGB.nii.gz')\nImages['DTI_AD']=(data_path+'/DTI/AD.nii.gz')\nImages['DTI_FA']=data_path+'/DTI/FA.nii.gz'\nImages['DTI_MD']=data_path+'/DTI/MD.nii.gz'\nImages['DTI_RD']=data_path+'/DTI/RD.nii.gz'\n# DKI\nImages['DKI_AD']=data_path+'/DKI/dki_AD.nii.gz'\nImages['DKI_FA']=data_path+'/DKI/dki_FA.nii.gz'\nImages['DKI_MD']=data_path+'/DKI/dki_MD.nii.gz'\nImages['DKI_RD']=data_path+'/DKI/dki_RD.nii.gz'\nImages['DKI_AK']=data_path+'/DKI/AK.nii.gz'\nImages['DKI_MK']=data_path+'/DKI/MK.nii.gz'\nImages['DKI_RK']=data_path+'/DKI/RK.nii.gz'", "_____no_output_____" ], [ "# Preprocessing image visualization 1\ninteractive(nib_rdshow_img,Images=PreproImage,Slices=widgets.IntSlider(min=0,max=43,step=1,value=28),\\\n IntenseScale=widgets.IntSlider(min=0,max=100,step=1,value=90),\\\n TitleImg='Reconstructed Images of Different Models')", "_____no_output_____" ] ], [ [ "#### Preprocessing image visualization 2\n\n1. play different maps;\n2. Select slices;\n3. Select maximum diffusion sthrength;\n4. Display title", "_____no_output_____" ] ], [ [ "# Preprocessing image visualization 2\n# Function below is used to show correct figure title while animaiton playing\ndef nib_rdshow_play(Slices,IntenseScale,NoX):\n warnings.filterwarnings('ignore')\n if(NoX==0):\n Image=F11\n TitleImg='Original_AP_Image'\n elif(NoX==1):\n Image=F12 #'epi_b0_merged.nii.gz'\n TitleImg='Original_PA_Image'\n elif(NoX==2):\n Image=F13\n TitleImg='Denoised_AP_Image'\n elif(NoX==3):\n Image=F14\n TitleImg='Denoised_PA_Image'\n elif(NoX==4):\n Image=F15 #'epi_b0_merged.nii.gz'\n TitleImg='Merged_Image'\n elif(NoX==5):\n Image=F16\n TitleImg='TOPUP_AP_Image'\n elif(NoX==6):\n Image=F17\n TitleImg='Brain_After_bet'\n elif(NoX==7):\n Image=F18\n TitleImg='Brain_mask'\n else:\n Image=F15\n TitleImg='Eddy_Correct_AP_Image'\n image_data = nib.load(Image).get_data()\n img00=image_data[:,:,Slices]\n zmax0=img00.max()\n fig=px.imshow(image_data[:,:,Slices],color_continuous_scale=\"Viridis\",\\\n zmin=0,zmax=zmax0*IntenseScale/100,\\\n labels={},template=\"plotly_white\")\n fig.update_xaxes(showticklabels=False)\n fig.update_yaxes(showticklabels=False)\n fig.update_layout(coloraxis_showscale=False)\n fig.update_layout(title=TitleImg)\n fig.show()\n\ninteractive(nib_rdshow_play,Slices=widgets.IntSlider(min=0,max=43,step=1,value=28),\\\n IntenseScale=widgets.IntSlider(min=0,max=100,step=1,value=90),\\\n NoX = widgets.Play(value=0,min=0,max=8,step=1,interval=2000,description=\"Press play\",\\\n disabled=False))", "_____no_output_____" ] ], [ [ "#### Data visualization (Reconstruction part)\n\nThis part is used to compare images generated from DTI and DKI models. The founction is almost the same as virsualizaiotn above. But to make us better choose iamges, we use dropdown bar to replace slider animation.\n\n1. Play different maps;\n2. Select slices;\n3. Slect maximum diffusion sthrength;\n4. Display give title", "_____no_output_____" ] ], [ [ "# Reconstruction image visualization 1\ninteractive(nib_rdshow_img,Images=Images,Slices=widgets.IntSlider(min=0,max=43,step=1,value=22),\\\n IntenseScale=widgets.IntSlider(min=0,max=100,step=1,value=90),\\\n TitleImg='Reconstructed Images of Different Models')", "_____no_output_____" ] ], [ [ "## Data Analysis\n\n1. 80 Slices of brain images (40 left hemisphere slice and 40 right hemisphere) were extracted and transfered to brain parts classification;\n2. Left hemisphere brain image and right hemisphere brain iamge were taged with 0 and 1 specifically;\n3. Slice number was treated as one of features used for classification;\n4. Diffusino strength of each slice hemisphere was calculated as the second feature of classification;\n5. Total effictive voxel of eahc slice hemisphere was calculated as the third feature for classification;\n6. Dataset was splitted into train dataset (70%) and test dataset (30%);\n7. Use KNN to classify", "_____no_output_____" ] ], [ [ "# Import libararies used to data analysis\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report,confusion_matrix\nfrom sklearn.neighbors import KNeighborsClassifier", "_____no_output_____" ], [ "shapeT18=np.shape(T18)\ndata_MR=np.zeros([80,4])\nprint('MR data shape:',shapeT18)\nprint('dMRI data shape:',np.shape(data_MR))", "_____no_output_____" ], [ "# Generate data of right heimesphere and left heimesphere\nfor i in range(0,39):\n data_MR[i,0]=i\n data_MR[i+39,0]=i\n data_MR[i,3]=0\n data_MR[i+39,3]=1\n \n EDDY_valueA=T19[0:47,:,i]\n EDDY_valueA=np.sum(EDDY_valueA)\n data_MR[i,2]=5*np.log2(1+EDDY_valueA)\n EDDY_valueB=T19[48:95,:,i]\n EDDY_valueB=5*np.sum(EDDY_valueB)\n data_MR[i+39,2]=np.log2(1+EDDY_valueB)\n \n Num_voxA=T18[0:47,:,i]\n Num_voxA=np.sum(Num_voxA)\n data_MR[i,1]=Num_voxA\n Num_voxB=T18[48:95,:,i]\n Num_voxB=np.sum(Num_voxB)\n data_MR[i+39,1]=Num_voxB\nnp.savetxt('brain_water.csv',data_MR,delimiter=',')\nbrain_water_csv = pd.read_csv('brain_water.csv',names=['Num_slice','Num_vox','Semi_AB','Position'])", "_____no_output_____" ], [ "# Details of data waiting for analyzing\nbrain_water_csv=brain_water_csv.dropna() # Delete NAN data line\nbrain_water_csv.head()", "_____no_output_____" ], [ "# Details of data waiting for analyzing\nbrain = brain_water_csv.describe()\nbrain", "_____no_output_____" ], [ "#sns.pairplot(brain_water_csv)", "_____no_output_____" ], [ "# Checek frequency of right and left hemispheres\nbrain_water_csv['Position'].plot.hist()", "_____no_output_____" ], [ "# Check brain diffusion water data location\nbrain_water_csv['Semi_AB'].plot.density()", "_____no_output_____" ], [ "# Set train dataset (catergoricalfetures and target class)\nx1=(brain_water_csv['Num_slice'])\nx2=(brain_water_csv['Num_vox'])\nx3=(brain_water_csv['Semi_AB'])\nx=np.vstack((x1,x2,x3))\nx=x.T\ny=5*(brain_water_csv['Position'])\ny=y.T", "_____no_output_____" ], [ "# Split dataset into train dataset and test dataset\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=123)", "_____no_output_____" ], [ "# Use KNN with different K value to train and test\nerror_rate = []\nfor i in range(1,40): \n knn = KNeighborsClassifier(n_neighbors=i)\n knn.fit(x_train,y_train)\n pred_i = knn.predict(x_test)\n error_rate.append(np.mean(pred_i != y_test))\n\nplt.figure(figsize=(12,9))\nplt.plot(range(1,40),error_rate,color='gray', linestyle='dashed', marker='o',\n markerfacecolor='black', markersize=5)\nplt.title('Error Rate vs. K Value', fontsize=20)\nplt.xlabel('K value',fontsize=14)\nplt.ylabel('Error Rate',fontsize=14)", "_____no_output_____" ], [ "# Use KNN to train and test\nknn = KNeighborsClassifier(n_neighbors=1)\nknn.fit(x_train,y_train)\npred = knn.predict(x_test)\nconf_mat=confusion_matrix(y_test,pred)\nprint('Conf_mat',conf_mat)\n\n# Show clasification report\nprint(classification_report(y_test,pred))\nprint(\"Misclassification error rate:\",round(np.mean(pred!=y_test),3))", "_____no_output_____" ] ], [ [ "## Conclusion\n\nThis project started with medical imaging data format, then preprocessed the diffusion weighted images, which includes MP-PCA denoising, FSL TOPUP distortion correction and head movement correction. After this, DTI images were reconstructed from these preprocessed images successfully. Additionally, a dataset for left and right hemispheres classification was generated from these preprocessed images. By using the KNN method, classification accuracy of 92% was reached. As a starter, I will continue this project in the future. Soon, self-made models, instead of DTI model, will be used to reconstruct diffusion images. After that, I would like to use machine learning methods to classify potential changes of brain microstructures.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
e71762705166d6a270f641dfdcbba4f91bac2581
1,940
ipynb
Jupyter Notebook
Ex_Files_Python_Data_Functions/Exercise Files/06_04_pie_charts.ipynb
cinthiatengan/Python-Functions-intro
ea25ab3a9e95ab81726b8efc946f5ff1184d3b2e
[ "MIT" ]
null
null
null
Ex_Files_Python_Data_Functions/Exercise Files/06_04_pie_charts.ipynb
cinthiatengan/Python-Functions-intro
ea25ab3a9e95ab81726b8efc946f5ff1184d3b2e
[ "MIT" ]
null
null
null
Ex_Files_Python_Data_Functions/Exercise Files/06_04_pie_charts.ipynb
cinthiatengan/Python-Functions-intro
ea25ab3a9e95ab81726b8efc946f5ff1184d3b2e
[ "MIT" ]
null
null
null
22.55814
79
0.545361
[ [ [ "# import relevant libraries and modules\nimport pandas as pd\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "# create a list of categories\ncateg = ['Homework', 'Labs', 'Quizzes', 'Midterm', 'Final']\n\n# create a list of weights (in %)\nweights = [15, 15, 15, 22, 33]\n\n# create a pandas dataframe containing the weighted components \n# used to give students their grades in a particular course\nbreakdown = pd.DataFrame(data={'category':categ,'weight':weights})", "_____no_output_____" ], [ "# display breakdown\nbreakdown", "_____no_output_____" ], [ "# create a pie chart --- illustrate the breakdown of students' course \n# grades in a particular course\nplt.figure(figsize=(7,7))\nplt.pie(breakdown['weight'], labels=breakdown['category'], \n autopct='%1.0f%%')\nplt.title('Breakdown of Course Grade')\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
e717635779f4d977135bf4aaa4a9518f96da4ec0
25,850
ipynb
Jupyter Notebook
Moodle-Simple/notebooks/012-EC2インスタンスの作成.ipynb
nii-gakunin-cloud/ocs-templates
a2a39bb8824d489488af3c3972007317bb1ef6a2
[ "BSD-3-Clause" ]
4
2020-05-11T06:30:53.000Z
2022-01-26T03:31:55.000Z
Moodle-Simple/notebooks/012-EC2インスタンスの作成.ipynb
nii-gakunin-cloud/ocs-templates
a2a39bb8824d489488af3c3972007317bb1ef6a2
[ "BSD-3-Clause" ]
1
2021-06-17T01:34:27.000Z
2021-06-17T01:34:27.000Z
Moodle-Simple/notebooks/012-EC2インスタンスの作成.ipynb
nii-gakunin-cloud/ocs-templates
a2a39bb8824d489488af3c3972007317bb1ef6a2
[ "BSD-3-Clause" ]
3
2020-09-08T00:57:52.000Z
2022-01-18T10:42:22.000Z
20.515873
193
0.539574
[ [ [ "# About: ECインスタンスの作成\n\n---\n\nMoodle環境を構築するための計算資源をAWS EC2インスタンスで作成します。", "_____no_output_____" ], [ "## 概要", "_____no_output_____" ], [ "### 前提条件\n\nこのNotebookで EC2インスタンスを作成する際の前提条件を以下に示します。", "_____no_output_____" ], [ "* AWSを操作するための[認証情報(アクセスキー、シークレットキー)](https://docs.aws.amazon.com/ja_jp/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey)があること\n* [Amazon VPC](https://docs.aws.amazon.com/ja_jp/vpc/latest/userguide/what-is-amazon-vpc.html)と[サブネット](https://docs.aws.amazon.com/ja_jp/vpc/latest/userguide/VPC_Subnets.html)が作成済であること\n* [EC2キーペア](https://docs.aws.amazon.com/ja_jp/AWSEC2/latest/UserGuide/ec2-key-pairs.html)が作成済であること\n* 起動したEC2インスタンスに対して、このNotebook環境からsshでログイン可能であること\n - ネットワーク的に到達可能なこと\n - ログインするためのSSH鍵ファイルをこのNotebook環境に配置してあること", "_____no_output_____" ], [ "### 準備\n\nAWS EC2インスタンスを操作するために必要となるライブラリをインストールします。", "_____no_output_____" ], [ "このNotebookでは Ansible を利用してEC2インスタンスを作成します。\n現在の環境にインストールされているライブラリとの競合をさけるためにpipenvで独立した環境を構築します。\n\nまず pipenv をインストールします。", "_____no_output_____" ] ], [ [ "!pip install --user pipenv", "_____no_output_____" ] ], [ [ "pipenvの環境に ansible とAWSの操作に必要となるライブラリをインストールします。", "_____no_output_____" ] ], [ [ "import os\nos.environ['PATH'] += f':{os.environ[\"HOME\"]}/.local/bin'\n!pipenv install ansible\n!pipenv run ansible-galaxy collection install community.aws\n!pipenv install -r $HOME/.ansible/collections/ansible_collections/community/aws/requirements.txt", "_____no_output_____" ] ], [ [ "## パラメータの設定\n\nこのNotebookで作成するAWS EC2インスタンスのパラメータを設定します。", "_____no_output_____" ], [ "### AWSの認証情報\n\nEC2インスタンスの操作を行う際に必要となるAWSの認証情報を指定します。", "_____no_output_____" ], [ "AWSの認証情報(アクセスキー、シークレットキー)を取得する手順については[「アクセスキーの管理」](https://docs.aws.amazon.com/ja_jp/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey)などを参照してください。", "_____no_output_____" ], [ "次のセルを実行すると入力枠が表示されるのでAWSのアクセスキーの値を入力してください。\n\n> 入力後に Enter キーを押すことで入力が完了します。", "_____no_output_____" ] ], [ [ "from getpass import getpass\naws_access_key = getpass()", "_____no_output_____" ] ], [ [ "AWSのシークレットキーを入力してください。", "_____no_output_____" ] ], [ [ "aws_secret_key = getpass()", "_____no_output_____" ] ], [ [ "このNotebookから実行するコマンドでAWS認証情報を参照できるようにするために、環境変数`AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`の設定を行います。", "_____no_output_____" ] ], [ [ "import os\n\nos.environ['AWS_ACCESS_KEY_ID'] = aws_access_key\nos.environ['AWS_SECRET_ACCESS_KEY'] = aws_secret_key", "_____no_output_____" ] ], [ [ "### EC2インスタンスの名前の指定", "_____no_output_____" ], [ "EC2インスタンスの名前を指定してください。指定した値はEC2インスタンスのNameタグに設定されます。", "_____no_output_____" ] ], [ [ "# (例)\n# aws_ec2_name = 'moodle'\n\naws_ec2_name =", "_____no_output_____" ] ], [ [ "### ECインスタンスを起動する環境の指定", "_____no_output_____" ], [ "EC2インスタンスを起動するリージョンを指定してください。", "_____no_output_____" ] ], [ [ "# (例)\n# aws_region = 'ap-northeast-1' # アジアパシフィック (東京)\n\naws_region =", "_____no_output_____" ] ], [ [ "EC2インスタンスのサブネットIDを指定してください。", "_____no_output_____" ] ], [ [ "# (例)\n# aws_vpc_subnet_id = 'subnet-xxxxxxxxxxxxxxxxx'\n\naws_vpc_subnet_id =", "_____no_output_____" ] ], [ [ "### EC2インスタンスに割り当てるリソースの指定\n\n起動するEC2インスタンスに割り当てるリソース量を指定します。", "_____no_output_____" ], [ "EC2インスタンスの[インスタンスタイプ](https://aws.amazon.com/jp/ec2/instance-types/)を指定してください。", "_____no_output_____" ] ], [ [ "# (例)\n# aws_instance_type = 'm5.large'\n# aws_instance_type = 't3a.large'\n\naws_instance_type =", "_____no_output_____" ] ], [ [ "EC2インスタンスのルートボリュームサイズ(GB)を指定してください。", "_____no_output_____" ] ], [ [ "# (例)\n# aws_volume_size = 32\n\naws_volume_size =", "_____no_output_____" ] ], [ [ "### AMIの指定", "_____no_output_____" ], [ "ECインスタンスの[AMI](https://docs.aws.amazon.com/ja_jp/AWSEC2/latest/UserGuide/AMIs.html)を指定します。このアプリケーションテンプレートでは、ノードのOSとしてCentOS7を使用することを前提としています。\n\n\n指定したリージョンに対応するAMI IDを[CentOS7](https://aws.amazon.com/marketplace/pp/Centosorg-CentOS-7-x8664-with-Updates-HVM/B00O7WM7QW)で確認して次のセルに指定してください。", "_____no_output_____" ] ], [ [ "# (例)\n# aws_image_id = 'ami-06a46da680048c8ae' # CentOS 7, 2002_01(Mar 16, 2020), Asia Pacific (Tokyo)\n\naws_image_id = 'ami-06a46da680048c8ae'", "_____no_output_____" ] ], [ [ "### セキュリティグループの指定\n\n[セキュリティグループ](https://docs.aws.amazon.com/ja_jp/AWSEC2/latest/UserGuide/ec2-security-groups.html)を指定します。\n\n次のセルでセキュリティグループIDを指定してください。", "_____no_output_____" ] ], [ [ "# (例)\n# aws_security_group = 'sg-xxxxxxxxxxxxxxxxx'\n\naws_security_group =", "_____no_output_____" ] ], [ [ "### キーペアの指定\n\nEC2インスタンスに設定する[キーペア](https://docs.aws.amazon.com/ja_jp/AWSEC2/latest/UserGuide/ec2-key-pairs.html)を指定します。", "_____no_output_____" ] ], [ [ "# (例)\n# aws_key_name = 'key_name'\n\naws_key_name =", "_____no_output_____" ] ], [ [ "## EC2インスタンスの作成\n\nこれまでに入力したパラメータを指定したEC2インスタンスを作成します。", "_____no_output_____" ], [ "### Ansible Playbook の生成\n\nこのNotebookではEC2インスタンスを作成するために [Ansible](https://www.ansible.com/)を利用します。", "_____no_output_____" ], [ "次のセルを実行するとEC2インスタンスを作成するための [Ansible Playbook](https://docs.ansible.com/ansible/latest/user_guide/playbooks_intro.html) を生成します。", "_____no_output_____" ] ], [ [ "%run scripts/edit_conf.py\n\nplaybook = create_conf_file('localhost', 'aws-ec2.yml')\nwith playbook.open(mode='w') as f:\n f.write(f'''\n- hosts: localhost\n tasks:\n - community.aws.ec2_instance:\n name: {aws_ec2_name}\n image_id: {aws_image_id}\n instance_type: {aws_instance_type}\n vpc_subnet_id: {aws_vpc_subnet_id}\n security_group: {aws_security_group}\n region: {aws_region}\n key_name: {aws_key_name}\n volumes:\n - device_name: /dev/sda1\n ebs:\n volume_size: {aws_volume_size}\n delete_on_termination: true\n''')\ngenerate_edit_link(playbook)", "_____no_output_____" ] ], [ [ "上のリンクをクリックするとブラウザの新しいウィンドウ(タブ)で playbook の編集画面が開きます。編集を行った場合は `ctrl-s` またはメニューの[File]-[Save]で編集結果を保存してください。\n\nEC2インスタンスを作成するためのパラメータの詳細についてはAnsibleのドキュメント[「community.aws.ec2_instanceモジュール」](https://docs.ansible.com/ansible/latest/collections/community/aws/ec2_instance_module.html)を参照してください。\n例えばEC2インスタンスのプライベートIPアドレスに `172.30.2.10` を設定するには以下のような指定を追加してください。\n\n```\n network:\n private_ip_address: 172.30.2.10\n```", "_____no_output_____" ], [ "確認のためplaybookの内容を表示します。", "_____no_output_____" ] ], [ [ "!cat {playbook}", "_____no_output_____" ] ], [ [ "### EC2インスタンスの起動\n\nplaybookを実行してEC2インスタンスを起動します。", "_____no_output_____" ] ], [ [ "!pipenv run ansible-playbook -c local {playbook}", "_____no_output_____" ] ], [ [ "作成したEC2インスタンスのIDなどを確認するために[community.aws.ec2_instance_infoモジュール](https://docs.ansible.com/ansible/latest/collections/community/aws/ec2_instance_info_module.html)を用いて情報を取得します。", "_____no_output_____" ] ], [ [ "import json\nout = !pipenv run ansible localhost -c local -m community.aws.ec2_instance_info -a 'region={aws_region} \\\n filters=\"{{\"tag:Name\": \"{aws_ec2_name}\", \"instance-state-name\": \"running\", \"network-interface.subnet-id\": \"{aws_vpc_subnet_id}\"}}\"'\nidx = [i for i, x in enumerate(out) if x.startswith('localhost |')][0]\nif out[idx] == 'localhost | SUCCESS => {':\n ec2_info = json.loads(' '.join(['{'] + out[(idx + 1):]))\n for line in out[idx:]:\n print(line)\nelse:\n for line in out:\n print(line)\n raise RuntimeError(\"error!\")", "_____no_output_____" ] ], [ [ "取得した情報からインスタンスIDの一覧を表示します。", "_____no_output_____" ] ], [ [ "instance_ids = [x['instance_id'] for x in ec2_info['instances']]\nfor id in instance_ids:\n print(id)", "_____no_output_____" ] ], [ [ "取得した情報はインスタンスID以外の情報も多く含まれています。\n例えばプライベートIPアドレスを取得するには次のセルに示した手順で一覧を表示できます。", "_____no_output_____" ] ], [ [ "for x in ec2_info['instances']:\n print(x['private_ip_address'])", "_____no_output_____" ] ], [ [ "取得した情報の内容については[Ansibleのドキュメント](https://docs.ansible.com/ansible/latest/modules/ec2_instance_info_module.html#return-values)を参照してください。", "_____no_output_____" ], [ "## Ansibleの設定\n\n起動したEC2インスタンスをAnsibleで操作するための設定を行います。", "_____no_output_____" ], [ "### パラメータの設定", "_____no_output_____" ], [ "EC2インスタンスを登録するAnsibleのグループ名を指定してください。", "_____no_output_____" ] ], [ [ "# (例)\n# target_group = 'Moodle'\n# target_group = aws_ec2_name # EC2インスタンスのNameと同じグループ名にする場合\n\ntarget_group = aws_ec2_name", "_____no_output_____" ] ], [ [ "このNotebook環境からEC2インスタンスに接続する際のホスト名(IPアドレス)を指定してください。", "_____no_output_____" ] ], [ [ "# (例)\n# target_hostname = 'moodle.example.org'\n# target_hostname = '172.30.1.10'\n\n\ntarget_hostname =", "_____no_output_____" ] ], [ [ "EC2インスタンスにログインするためのSSH秘密鍵のファイルを指定してください。", "_____no_output_____" ] ], [ [ "# (例)\n# ssh_private_key_file = '~/.ssh/aws_key.pem'\n# ssh_private_key_file = '~/.ssh/id_rsa'\n\nssh_private_key_file =", "_____no_output_____" ] ], [ [ "### インベントリの作成\n\nAnsibleのインベントリ`inventory.yml`をカレントディレクトリに作成します。", "_____no_output_____" ] ], [ [ "%run scripts/group.py\n\ninventory = {'all': {'children': {\n target_group: {\n 'hosts': {\n target_hostname: {},\n },\n 'vars': {\n 'ansible_user': 'centos',\n 'ansible_ssh_private_key_file': os.path.expanduser(ssh_private_key_file),\n 'ansible_python_interpreter': '/usr/bin/python',\n }\n }\n}}}\n\ngenerate_edit_link(update_inventory_yml(inventory))", "_____no_output_____" ] ], [ [ "次のセルを実行すると作成したインベントリの内容を表示します。インベントリの内容を変更したい場合は、上のセルの出力結果に表示しているリンクから編集することができます。", "_____no_output_____" ] ], [ [ "!cat inventory.yml", "_____no_output_____" ] ], [ [ "### ansible.cfg の作成", "_____no_output_____" ], [ "先程、カレントディレクトリに作成した`inventory.yml`をAnsibleのインベントリとして指定するための設定を行います。\n\n> カレントディレクトリにコンフィギュレーションファイル`ansible.cfg`を作成すると、Ansibleを実行する際にその設定が適用されます。", "_____no_output_____" ] ], [ [ "cfg = setup_ansible_cfg()\ngenerate_edit_link(cfg)", "_____no_output_____" ] ], [ [ "次のセルを実行すると作成したコンフィギュレーションファイルの内容を表示します。コンフィギュレーションファイルの内容を変更したい場合は、上のセルの出力結果に表示しているリンクから編集することができます。", "_____no_output_____" ] ], [ [ "!cat ansible.cfg", "_____no_output_____" ] ], [ [ "### EC2インスタンスへの接続確認", "_____no_output_____" ], [ "EC2インスタンスに対して Ansible で接続できることを確認します。", "_____no_output_____" ] ], [ [ "!ssh-keygen -R {target_hostname} || true\n!mkdir -p -m 0700 ~/.ansible/cp\n!env ANSIBLE_HOST_KEY_CHECKING=False \\\n ansible {target_group} -m ping", "_____no_output_____" ] ], [ [ "正常に接続できると以下のように表示されます。\n\n```\nXXX.XXX.XXX.XXX | SUCCESS => {\n \"changed\": false, \n \"ping\": \"pong\"\n}\n```", "_____no_output_____" ], [ "`~/.ssh/known_hosts`の内容を更新します。", "_____no_output_____" ] ], [ [ "!ssh-keyscan -H {target_hostname} >> ~/.ssh/known_hosts", "_____no_output_____" ] ], [ [ "VCノードに対して設定ファイルの変更やパッケージの追加を行う場合にVCノードの管理者権限が必要になる場合があります。Ansibleで管理者権限によるコマンド実行が可能かどうかを確認します。", "_____no_output_____" ] ], [ [ "# 管理者権限(-b)でのコマンド実行\n!ansible {target_group} -b -a 'whoami'", "_____no_output_____" ] ], [ [ "### group_vars ファイルの更新", "_____no_output_____" ] ], [ [ "%run scripts/group.py\n\nupdate_group_vars(\n target_group,\n aws_region=aws_region,\n instance_ids=instance_ids,\n)", "_____no_output_____" ] ], [ [ "## パッケージなどのインストール\n\nMoodle環境を構築する際に必要となるパッケージなどのインストールを行います。", "_____no_output_____" ], [ "### Dockerのインストール\n\n[Install Docker Engine on CentOS](https://docs.docker.com/engine/install/centos/) の手順に従い Docker のインストールを行います。", "_____no_output_____" ], [ "まず、必要なパッケージをインストールします。", "_____no_output_____" ] ], [ [ "!ansible {target_group} -b -m yum -a 'name=yum-utils'", "_____no_output_____" ] ], [ [ "Dockerのレポジトリを追加します。", "_____no_output_____" ] ], [ [ "!ansible {target_group} -b -a \\\n 'yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo'", "_____no_output_____" ] ], [ [ "Dockerのパッケージをインストールします。", "_____no_output_____" ] ], [ [ "!ansible {target_group} -b -m yum -a 'name=docker-ce,docker-ce-cli,containerd.io'", "_____no_output_____" ] ], [ [ "Docker Engine を実行するサービスを開始します。", "_____no_output_____" ] ], [ [ "!ansible {target_group} -b -a 'systemctl start docker'", "_____no_output_____" ], [ "!ansible {target_group} -b -a 'systemctl enable docker'", "_____no_output_____" ] ], [ [ "Docker Engine が実行されていることを確認するために `docker info` を実行してみます。", "_____no_output_____" ] ], [ [ "!ansible {target_group} -b -a 'docker info'", "_____no_output_____" ] ], [ [ "dockerコマンドを管理者権限なしで実行できるようにするためにユーザを `docker` グループに所属させるようにします。", "_____no_output_____" ] ], [ [ "!ansible {target_group} -b -m user -a 'name={{{{ansible_user}}}} append=yes groups=docker'", "_____no_output_____" ] ], [ [ "管理者権限なしで docker コマンドが実行できることを確認します。まず、新しいグループでログインし直すために、現在のsshの接続をいったん終了します。", "_____no_output_____" ] ], [ [ "if os.path.exists(os.path.expanduser(f'~/.ansible/cp/centos@{target_hostname}:22')):\n !ssh -o ControlPath=~/.ansible/cp/centos@{target_hostname}:22 -O exit {target_hostname}", "_____no_output_____" ] ], [ [ "一般ユーザで `docker info`が実行できることを確認します。", "_____no_output_____" ] ], [ [ "!ansible {target_group} -a 'docker info'", "_____no_output_____" ] ], [ [ "### docker-compose のインストール\n\n[Install Compose on Linux systems](https://docs.docker.com/compose/install/#install-compose-on-linux-systems)の手順に従い `docker-compose` コマンドをインストールします。", "_____no_output_____" ] ], [ [ "docker_compose_url = 'https://github.com/docker/compose/releases/download/1.26.2/docker-compose-Linux-x86_64'", "_____no_output_____" ], [ "!ansible {target_group} -b -m get_url -a 'url={docker_compose_url} dest=/usr/bin/docker-compose mode=0755'", "_____no_output_____" ], [ "!ansible {target_group} -a 'docker-compose version'", "_____no_output_____" ] ], [ [ "### Python3などのインストール", "_____no_output_____" ] ], [ [ "!ansible {target_group} -b -m yum -a 'name=python3,python3-pip,libselinux-python,libselinux-python3,tree'", "_____no_output_____" ] ], [ [ "### aws cli のインストール\n\n構築したMoodle環境をAmazon S3にバックアップする際に利用する aws cli のインストールを行います。", "_____no_output_____" ] ], [ [ "!ansible {target_group} -b -m pip -e ansible_python_interpreter=/usr/bin/python3 -a 'name=awscli'", "_____no_output_____" ] ], [ [ "### タイムゾーンの変更\n\nタイムゾーンをJSTに変更します。", "_____no_output_____" ] ], [ [ "!ansible {target_group} -b -a 'timedatectl set-timezone Asia/Tokyo'", "_____no_output_____" ] ], [ [ "タイムゾーンが変更されたことを確認します。", "_____no_output_____" ] ], [ [ "!ansible {target_group} -a 'date'", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7176b9213730613a1c6dd9e13f3a68705fbf20b
181,566
ipynb
Jupyter Notebook
pymadlib/doc/PyMADlib Tutorial.ipynb
spring-operator/pymadlib
64a6a26bdc6b5daf876c0c77f31b16529fdc3000
[ "BSD-2-Clause" ]
4
2021-07-17T07:59:59.000Z
2021-11-14T19:45:18.000Z
pymadlib/doc/PyMADlib Tutorial.ipynb
rpb/pymadlib
12895a9a0f97f4b8dd4bfc2aebbe06abc6e75471
[ "BSD-2-Clause" ]
null
null
null
pymadlib/doc/PyMADlib Tutorial.ipynb
rpb/pymadlib
12895a9a0f97f4b8dd4bfc2aebbe06abc6e75471
[ "BSD-2-Clause" ]
1
2021-06-13T12:34:05.000Z
2021-06-13T12:34:05.000Z
258.641026
74,205
0.877516
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e7176d38b2535cf2647f8aec2b55527a58018681
83,322
ipynb
Jupyter Notebook
Python Tutorial Reinforcement Learning/6_doom/Deep Q learning with Doom.ipynb
PaulPan00/donkey_wrapper
a03cf0f42f65625fbce792b06c98acd153c5d6c8
[ "MIT" ]
6
2021-03-26T01:42:31.000Z
2021-04-11T16:17:42.000Z
Python Tutorial Reinforcement Learning/6_doom/Deep Q learning with Doom.ipynb
packetsss/Python
a03cf0f42f65625fbce792b06c98acd153c5d6c8
[ "MIT" ]
null
null
null
Python Tutorial Reinforcement Learning/6_doom/Deep Q learning with Doom.ipynb
packetsss/Python
a03cf0f42f65625fbce792b06c98acd153c5d6c8
[ "MIT" ]
7
2021-04-06T06:55:22.000Z
2021-05-03T11:26:38.000Z
52.043723
444
0.580039
[ [ [ "# Deep Q learning with Doom 🕹️\nIn this notebook we'll implement an agent <b>that plays Doom by using a Deep Q learning architecture.</b> <br>\nOur agent playing Doom:\n\n<img src=\"assets/doom.gif\" style=\"max-width: 600px;\" alt=\"Deep Q learning with Doom\"/>\n", "_____no_output_____" ], [ "# This is a notebook from [Deep Reinforcement Learning Course with Tensorflow](https://simoninithomas.github.io/Deep_reinforcement_learning_Course/)\n<img src=\"https://raw.githubusercontent.com/simoninithomas/Deep_reinforcement_learning_Course/master/docs/assets/img/DRLC%20Environments.png\" alt=\"Deep Reinforcement Course\"/>\n<br>\n<p> Deep Reinforcement Learning Course is a free series of articles and videos tutorials 🆕 about Deep Reinforcement Learning, where **we'll learn the main algorithms (Q-learning, Deep Q Nets, Dueling Deep Q Nets, Policy Gradients, A2C, Proximal Policy Gradients…), and how to implement them with Tensorflow.**\n<br><br>\n \n📜The articles explain the architectures from the big picture to the mathematical details behind them.\n<br>\n📹 The videos explain how to build the agents with Tensorflow </b></p>\n<br>\nThis course will give you a **solid foundation for understanding and implementing the future state of the art algorithms**. And, you'll build a strong professional portfolio by creating **agents that learn to play awesome environments**: Doom© 👹, Space invaders 👾, Outrun, Sonic the Hedgehog©, Michael Jackson’s Moonwalker, agents that will be able to navigate in 3D environments with DeepMindLab (Quake) and able to walk with Mujoco. \n<br><br>\n</p> \n\n## 📚 The complete [Syllabus HERE](https://simoninithomas.github.io/Deep_reinforcement_learning_Course/)\n\n\n## Any questions 👨‍💻\n<p> If you have any questions, feel free to ask me: </p>\n<p> 📧: <a href=\"mailto:[email protected]\">[email protected]</a> </p>\n<p> Github: https://github.com/simoninithomas/Deep_reinforcement_learning_Course </p>\n<p> 🌐 : https://simoninithomas.github.io/Deep_reinforcement_learning_Course/ </p>\n<p> Twitter: <a href=\"https://twitter.com/ThomasSimonini\">@ThomasSimonini</a> </p>\n<p> Don't forget to <b> follow me on <a href=\"https://twitter.com/ThomasSimonini\">twitter</a>, <a href=\"https://github.com/simoninithomas/Deep_reinforcement_learning_Course\">github</a> and <a href=\"https://medium.com/@thomassimonini\">Medium</a> to be alerted of the new articles that I publish </b></p>\n \n## How to help 🙌\n3 ways:\n- **Clap our articles and like our videos a lot**:Clapping in Medium means that you really like our articles. And the more claps we have, the more our article is shared Liking our videos help them to be much more visible to the deep learning community.\n- **Share and speak about our articles and videos**: By sharing our articles and videos you help us to spread the word. \n- **Improve our notebooks**: if you found a bug or **a better implementation** you can send a pull request.\n<br>\n\n## Important note 🤔\n<b> You can run it on your computer but it's better to run it on GPU based services</b>, personally I use Microsoft Azure and their Deep Learning Virtual Machine (they offer 170$)\nhttps://azuremarketplace.microsoft.com/en-us/marketplace/apps/microsoft-ads.dsvm-deep-learning\n<br>\n⚠️ I don't have any business relations with them. I just loved their excellent customer service.\n\nIf you have some troubles to use Microsoft Azure follow the explainations of this excellent article here (without last the part fast.ai): https://medium.com/@manikantayadunanda/setting-up-deeplearning-machine-and-fast-ai-on-azure-a22eb6bd6429", "_____no_output_____" ], [ "## Prerequisites 🏗️\nBefore diving on the notebook **you need to understand**:\n- The foundations of Reinforcement learning (MC, TD, Rewards hypothesis...) [Article](https://medium.freecodecamp.org/an-introduction-to-reinforcement-learning-4339519de419)\n- Q-learning [Article](https://medium.freecodecamp.org/diving-deeper-into-reinforcement-learning-with-q-learning-c18d0db58efe)\n- Deep Q-Learning [Article](https://medium.freecodecamp.org/an-introduction-to-deep-q-learning-lets-play-doom-54d02d8017d8)\n- In the [video version](https://www.youtube.com/watch?v=gCJyVX98KJ4) we implemented a Deep Q-learning agent with Tensorflow that learns to play Atari Space Invaders 🕹️👾.", "_____no_output_____" ] ], [ [ "from IPython.display import HTML\r\nHTML('<iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/gCJyVX98KJ4?showinfo=0\" frameborder=\"0\" allow=\"autoplay; encrypted-media\" allowfullscreen></iframe>')", "_____no_output_____" ] ], [ [ "## Step 1: Import the libraries 📚", "_____no_output_____" ] ], [ [ "import tensorflow as tf # Deep Learning library\r\nimport numpy as np # Handle matrices\r\nfrom vizdoom import * # Doom Environment\r\n\r\nimport random # Handling random number generation\r\nimport time # Handling time calculation\r\nfrom skimage import transform# Help us to preprocess the frames\r\n\r\nfrom collections import deque# Ordered collection with ends\r\nimport matplotlib.pyplot as plt # Display graphs\r\n\r\nimport warnings # This ignore all the warning messages that are normally printed during the training because of skiimage\r\nwarnings.filterwarnings('ignore')", "_____no_output_____" ] ], [ [ "## Step 2: Create our environment 🎮\n- Now that we imported the libraries/dependencies, we will create our environment.\n- Doom environment takes:\n - A `configuration file` that **handle all the options** (size of the frame, possible actions...)\n - A `scenario file`: that **generates the correct scenario** (in our case basic **but you're invited to try other scenarios**).\n- Note: We have 3 possible actions `[[0,0,1], [1,0,0], [0,1,0]]` so we don't need to do one hot encoding (thanks to < a href=\"https://stackoverflow.com/users/2237916/silgon\">silgon</a> for figuring out. \n\n### Our environment\n<img src=\"assets/doom.png\" style=\"max-width:500px;\" alt=\"Doom\"/>\n \n- A monster is spawned **randomly somewhere along the opposite wall**. \n- Player can only go **left/right and shoot**. \n- 1 hit is enough **to kill the monster**. \n- Episode finishes when **monster is killed or on timeout (300)**.\n<br><br>\nREWARDS:\n\n- +101 for killing the monster \n- -5 for missing \n- Episode ends after killing the monster or on timeout.\n- living reward = -1", "_____no_output_____" ] ], [ [ "\"\"\"\r\nHere we create our environment\r\n\"\"\"\r\ndef create_environment():\r\n game = DoomGame()\r\n \r\n # Load the correct configuration\r\n game.load_config(\"basic.cfg\")\r\n \r\n # Load the correct scenario (in our case basic scenario)\r\n game.set_doom_scenario_path(\"basic.wad\")\r\n \r\n game.init()\r\n \r\n # Here our possible actions\r\n left = [1, 0, 0]\r\n right = [0, 1, 0]\r\n shoot = [0, 0, 1]\r\n possible_actions = [left, right, shoot]\r\n \r\n return game, possible_actions\r\n \r\n\"\"\"\r\nHere we performing random action to test the environment\r\n\"\"\"\r\ndef test_environment():\r\n game = DoomGame()\r\n game.load_config(\"basic.cfg\")\r\n game.set_doom_scenario_path(\"basic.wad\")\r\n game.init()\r\n shoot = [0, 0, 1]\r\n left = [1, 0, 0]\r\n right = [0, 1, 0]\r\n actions = [shoot, left, right]\r\n\r\n episodes = 10\r\n for i in range(episodes):\r\n game.new_episode()\r\n while not game.is_episode_finished():\r\n state = game.get_state()\r\n img = state.screen_buffer\r\n misc = state.game_variables\r\n action = random.choice(actions)\r\n print(action)\r\n reward = game.make_action(action)\r\n print (\"\\treward:\", reward)\r\n time.sleep(0.02)\r\n print (\"Result:\", game.get_total_reward())\r\n time.sleep(2)\r\n game.close()", "_____no_output_____" ], [ "game, possible_actions = create_environment()", "_____no_output_____" ] ], [ [ "## Step 3: Define the preprocessing functions ⚙️\n### preprocess_frame\nPreprocessing is an important step, <b>because we want to reduce the complexity of our states to reduce the computation time needed for training.</b>\n<br><br>\nOur steps:\n- Grayscale each of our frames (because <b> color does not add important information </b>). But this is already done by the config file.\n- Crop the screen (in our case we remove the roof because it contains no information)\n- We normalize pixel values\n- Finally we resize the preprocessed frame", "_____no_output_____" ] ], [ [ "\"\"\"\r\n preprocess_frame:\r\n Take a frame.\r\n Resize it.\r\n __________________\r\n | |\r\n | |\r\n | |\r\n | |\r\n |_________________|\r\n \r\n to\r\n _____________\r\n | |\r\n | |\r\n | |\r\n |____________|\r\n Normalize it.\r\n \r\n return preprocessed_frame\r\n \r\n \"\"\"\r\ndef preprocess_frame(frame):\r\n # Greyscale frame already done in our vizdoom config\r\n # x = np.mean(frame,-1)\r\n \r\n # Crop the screen (remove the roof because it contains no information)\r\n cropped_frame = frame[30:-10, 30:-30]\r\n \r\n # Normalize Pixel Values\r\n normalized_frame = cropped_frame / 255.0\r\n \r\n # Resize\r\n preprocessed_frame = transform.resize(normalized_frame, [84, 84])\r\n \r\n return preprocessed_frame", "_____no_output_____" ] ], [ [ "### stack_frames\n👏 This part was made possible thanks to help of <a href=\"https://github.com/Miffyli\">Anssi</a><br>\n\nAs explained in this really <a href=\"https://danieltakeshi.github.io/2016/11/25/frame-skipping-and-preprocessing-for-deep-q-networks-on-atari-2600-games/\"> good article </a> we stack frames.\n\nStacking frames is really important because it helps us to **give have a sense of motion to our Neural Network.**\n\n- First we preprocess frame\n- Then we append the frame to the deque that automatically **removes the oldest frame**\n- Finally we **build the stacked state**\n\nThis is how work stack:\n- For the first frame, we feed 4 frames\n- At each timestep, **we add the new frame to deque and then we stack them to form a new stacked frame**\n- And so on\n<img src=\"https://raw.githubusercontent.com/simoninithomas/Deep_reinforcement_learning_Course/master/DQN/Space%20Invaders/assets/stack_frames.png\" alt=\"stack\">\n- If we're done, **we create a new stack with 4 new frames (because we are in a new episode)**.", "_____no_output_____" ] ], [ [ "stack_size = 4 # We stack 4 frames\r\n\r\n# Initialize deque with zero-images one array for each image\r\nstacked_frames = deque([np.zeros((84, 84), dtype=np.int) for i in range(stack_size)], maxlen=4) \r\n\r\ndef stack_frames(stacked_frames, state, is_new_episode):\r\n # Preprocess frame\r\n frame = preprocess_frame(state)\r\n \r\n if is_new_episode:\r\n # Clear our stacked_frames\r\n stacked_frames = deque([np.zeros((84, 84), dtype=np.int) for i in range(stack_size)], maxlen=4)\r\n \r\n # Because we're in a new episode, copy the same frame 4x\r\n stacked_frames.append(frame)\r\n stacked_frames.append(frame)\r\n stacked_frames.append(frame)\r\n stacked_frames.append(frame)\r\n \r\n # Stack the frames\r\n stacked_state = np.stack(stacked_frames, axis=2)\r\n else:\r\n # Append frame to deque, automatically removes the oldest frame\r\n stacked_frames.append(frame)\r\n\r\n # Build the stacked state (first dimension specifies different frames)\r\n stacked_state = np.stack(stacked_frames, axis=2) \r\n \r\n return stacked_state, stacked_frames", "_____no_output_____" ] ], [ [ "## Step 4: Set up our hyperparameters ⚗️\nIn this part we'll set up our different hyperparameters. But when you implement a Neural Network by yourself you will **not implement hyperparamaters at once but progressively**.\n\n- First, you begin by defining the neural networks hyperparameters when you implement the model.\n- Then, you'll add the training hyperparameters when you implement the training algorithm.", "_____no_output_____" ] ], [ [ "### MODEL HYPERPARAMETERS\r\nstate_size = [84, 84, 4] # Our input is a stack of 4 frames hence 84x84x4 (Width, height, channels) \r\naction_size = game.get_available_buttons_size() # 3 possible actions: left, right, shoot\r\nlearning_rate = 0.0002 # Alpha (aka learning rate)\r\n\r\n### TRAINING HYPERPARAMETERS\r\ntotal_episodes = 500 # Total episodes for training\r\nmax_steps = 100 # Max possible steps in an episode\r\nbatch_size = 64 \r\n\r\n# Exploration parameters for epsilon greedy strategy\r\nexplore_start = 1.0 # exploration probability at start\r\nexplore_stop = 0.01 # minimum exploration probability \r\ndecay_rate = 0.0001 # exponential decay rate for exploration prob\r\n\r\n# Q learning hyperparameters\r\ngamma = 0.95 # Discounting rate\r\n\r\n### MEMORY HYPERPARAMETERS\r\npretrain_length = batch_size # Number of experiences stored in the Memory when initialized for the first time\r\nmemory_size = 1000000 # Number of experiences the Memory can keep\r\n\r\n### MODIFY THIS TO FALSE IF YOU JUST WANT TO SEE THE TRAINED AGENT\r\ntraining = True\r\n\r\n## TURN THIS TO TRUE IF YOU WANT TO RENDER THE ENVIRONMENT\r\nepisode_render = False", "_____no_output_____" ] ], [ [ "## Step 5: Create our Deep Q-learning Neural Network model 🧠\n<img src=\"assets/model.png\" alt=\"Model\" />\nThis is our Deep Q-learning model:\n- We take a stack of 4 frames as input\n- It passes through 3 convnets\n- Then it is flatened\n- Finally it passes through 2 FC layers\n- It outputs a Q value for each actions", "_____no_output_____" ] ], [ [ "class DQNetwork:\r\n def __init__(self, state_size, action_size, learning_rate, name='DQNetwork'):\r\n self.state_size = state_size\r\n self.action_size = action_size\r\n self.learning_rate = learning_rate\r\n \r\n with tf.variable_scope(name):\r\n # We create the placeholders\r\n # *state_size means that we take each elements of state_size in tuple hence is like if we wrote\r\n # [None, 84, 84, 4]\r\n self.inputs_ = tf.placeholder(tf.float32, [None, *state_size], name=\"inputs\")\r\n self.actions_ = tf.placeholder(tf.float32, [None, 3], name=\"actions_\")\r\n \r\n # Remember that target_Q is the R(s,a) + ymax Qhat(s', a')\r\n self.target_Q = tf.placeholder(tf.float32, [None], name=\"target\")\r\n \r\n \"\"\"\r\n First convnet:\r\n CNN\r\n BatchNormalization\r\n ELU\r\n \"\"\"\r\n # Input is 84x84x4\r\n self.conv1 = tf.layers.conv2d(inputs = self.inputs_,\r\n filters = 32,\r\n kernel_size = [8,8],\r\n strides = [4,4],\r\n padding = \"VALID\",\r\n kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d(),\r\n name = \"conv1\")\r\n \r\n self.conv1_batchnorm = tf.layers.batch_normalization(self.conv1,\r\n training = True,\r\n epsilon = 1e-5,\r\n name = 'batch_norm1')\r\n \r\n self.conv1_out = tf.nn.elu(self.conv1_batchnorm, name=\"conv1_out\")\r\n ## --> [20, 20, 32]\r\n \r\n \r\n \"\"\"\r\n Second convnet:\r\n CNN\r\n BatchNormalization\r\n ELU\r\n \"\"\"\r\n self.conv2 = tf.layers.conv2d(inputs = self.conv1_out,\r\n filters = 64,\r\n kernel_size = [4,4],\r\n strides = [2,2],\r\n padding = \"VALID\",\r\n kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d(),\r\n name = \"conv2\")\r\n \r\n self.conv2_batchnorm = tf.layers.batch_normalization(self.conv2,\r\n training = True,\r\n epsilon = 1e-5,\r\n name = 'batch_norm2')\r\n\r\n self.conv2_out = tf.nn.elu(self.conv2_batchnorm, name=\"conv2_out\")\r\n ## --> [9, 9, 64]\r\n \r\n \r\n \"\"\"\r\n Third convnet:\r\n CNN\r\n BatchNormalization\r\n ELU\r\n \"\"\"\r\n self.conv3 = tf.layers.conv2d(inputs = self.conv2_out,\r\n filters = 128,\r\n kernel_size = [4,4],\r\n strides = [2,2],\r\n padding = \"VALID\",\r\n kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d(),\r\n name = \"conv3\")\r\n \r\n self.conv3_batchnorm = tf.layers.batch_normalization(self.conv3,\r\n training = True,\r\n epsilon = 1e-5,\r\n name = 'batch_norm3')\r\n\r\n self.conv3_out = tf.nn.elu(self.conv3_batchnorm, name=\"conv3_out\")\r\n ## --> [3, 3, 128]\r\n \r\n \r\n self.flatten = tf.layers.flatten(self.conv3_out)\r\n ## --> [1152]\r\n \r\n \r\n self.fc = tf.layers.dense(inputs = self.flatten,\r\n units = 512,\r\n activation = tf.nn.elu,\r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n name=\"fc1\")\r\n \r\n \r\n self.output = tf.layers.dense(inputs = self.fc, \r\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\r\n units = 3, \r\n activation=None)\r\n\r\n \r\n # Q is our predicted Q value.\r\n self.Q = tf.reduce_sum(tf.multiply(self.output, self.actions_), axis=1)\r\n \r\n \r\n # The loss is the difference between our predicted Q_values and the Q_target\r\n # Sum(Qtarget - Q)^2\r\n self.loss = tf.reduce_mean(tf.square(self.target_Q - self.Q))\r\n \r\n self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.loss)", "_____no_output_____" ], [ "# Reset the graph\r\n# tf.reset_default_graph()\r\n\r\n# Instantiate the DQNetwork\r\nDQNetwork = DQNetwork(state_size, action_size, learning_rate)", "WARNING:tensorflow:\nThe TensorFlow contrib module will not be included in TensorFlow 2.0.\nFor more information, please see:\n * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n * https://github.com/tensorflow/addons\n * https://github.com/tensorflow/io (for I/O related ops)\nIf you depend on functionality not listed there, please file an issue.\n\nWARNING:tensorflow:From <ipython-input-10-24f0f99070ea>:30: conv2d (from tensorflow.python.layers.convolutional) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse `tf.keras.layers.Conv2D` instead.\nWARNING:tensorflow:From C:\\ProgramData\\Miniconda3\\envs\\python_3.7.4\\lib\\site-packages\\tensorflow_core\\python\\layers\\convolutional.py:424: Layer.apply (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use `layer.__call__` method instead.\nWARNING:tensorflow:From <ipython-input-10-24f0f99070ea>:35: batch_normalization (from tensorflow.python.layers.normalization) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse keras.layers.BatchNormalization instead. In particular, `tf.control_dependencies(tf.GraphKeys.UPDATE_OPS)` should not be used (consult the `tf.keras.layers.batch_normalization` documentation).\nWARNING:tensorflow:From <ipython-input-10-24f0f99070ea>:87: flatten (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse keras.layers.flatten instead.\nWARNING:tensorflow:From <ipython-input-10-24f0f99070ea>:95: dense (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse keras.layers.Dense instead.\nWARNING:tensorflow:From C:\\ProgramData\\Miniconda3\\envs\\python_3.7.4\\lib\\site-packages\\tensorflow_core\\python\\training\\rmsprop.py:119: calling Ones.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\nInstructions for updating:\nCall initializer instance with the dtype argument instead of passing it to the constructor\n" ] ], [ [ "## Step 6: Experience Replay 🔁\nNow that we create our Neural Network, **we need to implement the Experience Replay method.** <br><br>\nHere we'll create the Memory object that creates a deque.A deque (double ended queue) is a data type that **removes the oldest element each time that you add a new element.**\n\nThis part was taken from Udacity : <a href=\"https://github.com/udacity/deep-learning/blob/master/reinforcement/Q-learning-cart.ipynb\" Cartpole DQN</a>", "_____no_output_____" ] ], [ [ "class Memory():\r\n def __init__(self, max_size):\r\n self.buffer = deque(maxlen = max_size)\r\n \r\n def add(self, experience):\r\n self.buffer.append(experience)\r\n \r\n def sample(self, batch_size):\r\n buffer_size = len(self.buffer)\r\n index = np.random.choice(np.arange(buffer_size),\r\n size = batch_size,\r\n replace = False)\r\n \r\n return [self.buffer[i] for i in index]", "_____no_output_____" ] ], [ [ "Here we'll **deal with the empty memory problem**: we pre-populate our memory by taking random actions and storing the experience (state, action, reward, new_state).", "_____no_output_____" ] ], [ [ "# Instantiate memory\r\nmemory = Memory(max_size = memory_size)\r\n\r\n# Render the environment\r\ngame.new_episode()\r\n\r\nfor i in range(pretrain_length):\r\n # If it's the first step\r\n if i == 0:\r\n # First we need a state\r\n state = game.get_state().screen_buffer\r\n state, stacked_frames = stack_frames(stacked_frames, state, True)\r\n \r\n # Random action\r\n action = random.choice(possible_actions)\r\n \r\n # Get the rewards\r\n reward = game.make_action(action)\r\n \r\n # Look if the episode is finished\r\n done = game.is_episode_finished()\r\n \r\n # If we're dead\r\n if done:\r\n # We finished the episode\r\n next_state = np.zeros(state.shape)\r\n \r\n # Add experience to memory\r\n memory.add((state, action, reward, next_state, done))\r\n \r\n # Start a new episode\r\n game.new_episode()\r\n \r\n # First we need a state\r\n state = game.get_state().screen_buffer\r\n \r\n # Stack the frames\r\n state, stacked_frames = stack_frames(stacked_frames, state, True)\r\n \r\n else:\r\n # Get the next state\r\n next_state = game.get_state().screen_buffer\r\n next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)\r\n \r\n # Add experience to memory\r\n memory.add((state, action, reward, next_state, done))\r\n \r\n # Our state is now the next_state\r\n state = next_state", "_____no_output_____" ] ], [ [ "## Step 7: Set up Tensorboard 📊\r\nFor more information about tensorboard, please watch this <a href=\"https://www.youtube.com/embed/eBbEDRsCmv4\">excellent 30min tutorial</a> <br><br>\r\nTo launch tensorboard : `tensorboard --logdir=/tensorboard/dqn/1`", "_____no_output_____" ] ], [ [ "# Setup TensorBoard Writer\r\nwriter = tf.summary.FileWriter(\"/tensorboard/dqn/1\")\r\n\r\n## Losses\r\ntf.summary.scalar(\"Loss\", DQNetwork.loss)\r\n\r\nwrite_op = tf.summary.merge_all()", "_____no_output_____" ], [ "!tensorboard --logdir=/tensorboard/dqn/1", "^C\n" ] ], [ [ "## Step 8: Train our Agent 🏃‍♂️\n\nOur algorithm:\n<br>\n* Initialize the weights\n* Init the environment\n* Initialize the decay rate (that will use to reduce epsilon) \n<br><br>\n* **For** episode to max_episode **do** \n * Make new episode\n * Set step to 0\n * Observe the first state $s_0$\n <br><br>\n * **While** step < max_steps **do**:\n * Increase decay_rate\n * With $\\epsilon$ select a random action $a_t$, otherwise select $a_t = \\mathrm{argmax}_a Q(s_t,a)$\n * Execute action $a_t$ in simulator and observe reward $r_{t+1}$ and new state $s_{t+1}$\n * Store transition $<s_t, a_t, r_{t+1}, s_{t+1}>$ in memory $D$\n * Sample random mini-batch from $D$: $<s, a, r, s'>$\n * Set $\\hat{Q} = r$ if the episode ends at $+1$, otherwise set $\\hat{Q} = r + \\gamma \\max_{a'}{Q(s', a')}$\n * Make a gradient descent step with loss $(\\hat{Q} - Q(s, a))^2$\n * **endfor**\n <br><br>\n* **endfor**\n\n ", "_____no_output_____" ] ], [ [ "\"\"\"\r\nThis function will do the part\r\nWith ϵ select a random action atat, otherwise select at=argmaxaQ(st,a)\r\n\"\"\"\r\ndef predict_action(explore_start, explore_stop, decay_rate, decay_step, state, actions):\r\n ## EPSILON GREEDY STRATEGY\r\n # Choose action a from state s using epsilon greedy.\r\n ## First we randomize a number\r\n exp_exp_tradeoff = np.random.rand()\r\n\r\n # Here we'll use an improved version of our epsilon greedy strategy used in Q-learning notebook\r\n explore_probability = explore_stop + (explore_start - explore_stop) * np.exp(-decay_rate * decay_step)\r\n \r\n if (explore_probability > exp_exp_tradeoff):\r\n # Make a random action (exploration)\r\n action = random.choice(possible_actions)\r\n \r\n else:\r\n # Get action from Q-network (exploitation)\r\n # Estimate the Qs values state\r\n Qs = sess.run(DQNetwork.output, feed_dict = {DQNetwork.inputs_: state.reshape((1, *state.shape))})\r\n \r\n # Take the biggest Q value (= the best action)\r\n choice = np.argmax(Qs)\r\n action = possible_actions[int(choice)]\r\n \r\n return action, explore_probability", "_____no_output_____" ], [ "# Saver will help us to save our model\r\nsaver = tf.train.Saver()\r\n\r\nif training == True:\r\n with tf.Session() as sess:\r\n # Initialize the variables\r\n sess.run(tf.global_variables_initializer())\r\n \r\n # Initialize the decay rate (that will use to reduce epsilon) \r\n decay_step = 0\r\n\r\n # Init the game\r\n game.init()\r\n\r\n for episode in range(total_episodes):\r\n # Set step to 0\r\n step = 0\r\n \r\n # Initialize the rewards of the episode\r\n episode_rewards = []\r\n \r\n # Make a new episode and observe the first state\r\n game.new_episode()\r\n state = game.get_state().screen_buffer\r\n \r\n # Remember that stack frame function also call our preprocess function.\r\n state, stacked_frames = stack_frames(stacked_frames, state, True)\r\n\r\n while step < max_steps:\r\n step += 1\r\n \r\n # Increase decay_step\r\n decay_step +=1\r\n \r\n # Predict the action to take and take it\r\n action, explore_probability = predict_action(explore_start, explore_stop, decay_rate, decay_step, state, possible_actions)\r\n\r\n # Do the action\r\n reward = game.make_action(action)\r\n\r\n # Look if the episode is finished\r\n done = game.is_episode_finished()\r\n \r\n # Add the reward to total reward\r\n episode_rewards.append(reward)\r\n\r\n # If the game is finished\r\n if done:\r\n # the episode ends so no next state\r\n next_state = np.zeros((84,84), dtype=np.int)\r\n next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)\r\n\r\n # Set step = max_steps to end the episode\r\n step = max_steps\r\n\r\n # Get the total reward of the episode\r\n total_reward = np.sum(episode_rewards)\r\n\r\n print('Episode: {}'.format(episode),\r\n 'Total reward: {}'.format(total_reward),\r\n 'Training loss: {:.4f}'.format(loss),\r\n 'Explore P: {:.4f}'.format(explore_probability))\r\n\r\n memory.add((state, action, reward, next_state, done))\r\n\r\n else:\r\n # Get the next state\r\n next_state = game.get_state().screen_buffer\r\n \r\n # Stack the frame of the next_state\r\n next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)\r\n \r\n\r\n # Add experience to memory\r\n memory.add((state, action, reward, next_state, done))\r\n \r\n # st+1 is now our current state\r\n state = next_state\r\n\r\n\r\n ### LEARNING PART \r\n # Obtain random mini-batch from memory\r\n batch = memory.sample(batch_size)\r\n states_mb = np.array([each[0] for each in batch], ndmin=3)\r\n actions_mb = np.array([each[1] for each in batch])\r\n rewards_mb = np.array([each[2] for each in batch]) \r\n next_states_mb = np.array([each[3] for each in batch], ndmin=3)\r\n dones_mb = np.array([each[4] for each in batch])\r\n\r\n target_Qs_batch = []\r\n\r\n # Get Q values for next_state \r\n Qs_next_state = sess.run(DQNetwork.output, feed_dict = {DQNetwork.inputs_: next_states_mb})\r\n \r\n # Set Q_target = r if the episode ends at s+1, otherwise set Q_target = r + gamma*maxQ(s', a')\r\n for i in range(0, len(batch)):\r\n terminal = dones_mb[i]\r\n\r\n # If we are in a terminal state, only equals reward\r\n if terminal:\r\n target_Qs_batch.append(rewards_mb[i])\r\n \r\n else:\r\n target = rewards_mb[i] + gamma * np.max(Qs_next_state[i])\r\n target_Qs_batch.append(target)\r\n \r\n\r\n targets_mb = np.array([each for each in target_Qs_batch])\r\n\r\n loss, _ = sess.run([DQNetwork.loss, DQNetwork.optimizer],\r\n feed_dict={DQNetwork.inputs_: states_mb,\r\n DQNetwork.target_Q: targets_mb,\r\n DQNetwork.actions_: actions_mb})\r\n\r\n # Write TF Summaries\r\n summary = sess.run(write_op, feed_dict={DQNetwork.inputs_: states_mb,\r\n DQNetwork.target_Q: targets_mb,\r\n DQNetwork.actions_: actions_mb})\r\n writer.add_summary(summary, episode)\r\n writer.flush()\r\n\r\n # Save model every 5 episodes\r\n if episode % 5 == 0:\r\n save_path = saver.save(sess, \"./models/model.ckpt\")\r\n print(\"Model Saved\")", "Episode: 0 Total reward: 67.0 Training loss: 1.0470 Explore P: 0.9971\nModel Saved\nEpisode: 3 Total reward: 95.0 Training loss: 1.4169 Explore P: 0.9770\nEpisode: 5 Total reward: 94.0 Training loss: 4.2568 Explore P: 0.9667\nModel Saved\nEpisode: 6 Total reward: 95.0 Training loss: 8.6831 Explore P: 0.9661\nEpisode: 7 Total reward: 95.0 Training loss: 19.1226 Explore P: 0.9656\nEpisode: 10 Total reward: 94.0 Training loss: 1.2128 Explore P: 0.9460\nModel Saved\nEpisode: 11 Total reward: 95.0 Training loss: 4.7878 Explore P: 0.9454\nEpisode: 12 Total reward: 95.0 Training loss: 4.3903 Explore P: 0.9449\nEpisode: 13 Total reward: 91.0 Training loss: 4.0964 Explore P: 0.9439\nEpisode: 15 Total reward: 95.0 Training loss: 1.5670 Explore P: 0.9341\nModel Saved\nEpisode: 17 Total reward: 92.0 Training loss: 0.8014 Explore P: 0.9241\nEpisode: 18 Total reward: 17.0 Training loss: 8.3715 Explore P: 0.9178\nEpisode: 19 Total reward: 7.0 Training loss: 8.2944 Explore P: 0.9111\nEpisode: 20 Total reward: 95.0 Training loss: 68.3635 Explore P: 0.9105\nModel Saved\nEpisode: 21 Total reward: 22.0 Training loss: 14.0980 Explore P: 0.9048\nEpisode: 22 Total reward: 57.0 Training loss: 9.6928 Explore P: 0.9018\nEpisode: 24 Total reward: 94.0 Training loss: 7.9901 Explore P: 0.8923\nModel Saved\nEpisode: 26 Total reward: 75.0 Training loss: 12.4202 Explore P: 0.8817\nEpisode: 27 Total reward: 94.0 Training loss: 2.9862 Explore P: 0.8811\nEpisode: 28 Total reward: 92.0 Training loss: 10.9161 Explore P: 0.8803\nEpisode: 30 Total reward: 89.0 Training loss: 3.1156 Explore P: 0.8706\nModel Saved\nEpisode: 31 Total reward: 70.0 Training loss: 6.4539 Explore P: 0.8683\nEpisode: 32 Total reward: 70.0 Training loss: 6.9091 Explore P: 0.8661\nEpisode: 33 Total reward: 95.0 Training loss: 7.8032 Explore P: 0.8656\nEpisode: 35 Total reward: 89.0 Training loss: 17.9251 Explore P: 0.8561\nModel Saved\nEpisode: 37 Total reward: 93.0 Training loss: 15.5984 Explore P: 0.8470\nEpisode: 38 Total reward: 91.0 Training loss: 4.0658 Explore P: 0.8461\nEpisode: 40 Total reward: 86.0 Training loss: 5.9232 Explore P: 0.8366\nModel Saved\nEpisode: 41 Total reward: 95.0 Training loss: 5.4902 Explore P: 0.8361\nEpisode: 42 Total reward: 67.0 Training loss: 5.2741 Explore P: 0.8337\nModel Saved\nEpisode: 46 Total reward: 93.0 Training loss: 15.1568 Explore P: 0.8087\nEpisode: 47 Total reward: -1.0 Training loss: 7.6571 Explore P: 0.8022\nEpisode: 49 Total reward: 61.0 Training loss: 9.8291 Explore P: 0.7916\nEpisode: 50 Total reward: 93.0 Training loss: 5.1462 Explore P: 0.7909\nModel Saved\nEpisode: 51 Total reward: 68.0 Training loss: 6.9957 Explore P: 0.7888\nEpisode: 52 Total reward: 95.0 Training loss: 4.0570 Explore P: 0.7883\nEpisode: 53 Total reward: 70.0 Training loss: 4.6686 Explore P: 0.7863\nEpisode: 54 Total reward: 93.0 Training loss: 35.0223 Explore P: 0.7857\nEpisode: 55 Total reward: 94.0 Training loss: 5.1175 Explore P: 0.7851\nModel Saved\nEpisode: 56 Total reward: 95.0 Training loss: 8.8173 Explore P: 0.7846\nEpisode: 57 Total reward: 95.0 Training loss: 6.0636 Explore P: 0.7842\nEpisode: 58 Total reward: 95.0 Training loss: 7.3905 Explore P: 0.7837\nEpisode: 59 Total reward: 95.0 Training loss: 13.5766 Explore P: 0.7833\nEpisode: 60 Total reward: 93.0 Training loss: 3.3526 Explore P: 0.7826\nModel Saved\nEpisode: 61 Total reward: 93.0 Training loss: 7.0056 Explore P: 0.7820\nEpisode: 62 Total reward: 95.0 Training loss: 6.7185 Explore P: 0.7816\nEpisode: 63 Total reward: 75.0 Training loss: 8.4884 Explore P: 0.7799\nEpisode: 64 Total reward: 95.0 Training loss: 15.2424 Explore P: 0.7795\nModel Saved\nEpisode: 66 Total reward: 15.0 Training loss: 4.3537 Explore P: 0.7664\nEpisode: 68 Total reward: 93.0 Training loss: 17.3211 Explore P: 0.7583\nEpisode: 69 Total reward: 95.0 Training loss: 7.6926 Explore P: 0.7579\nEpisode: 70 Total reward: 95.0 Training loss: 5.5006 Explore P: 0.7574\nModel Saved\nEpisode: 71 Total reward: 95.0 Training loss: 5.8952 Explore P: 0.7570\nEpisode: 72 Total reward: 95.0 Training loss: 12.6244 Explore P: 0.7565\nEpisode: 73 Total reward: 94.0 Training loss: 19.9322 Explore P: 0.7560\nEpisode: 74 Total reward: 95.0 Training loss: 5.5838 Explore P: 0.7555\nModel Saved\nEpisode: 76 Total reward: 94.0 Training loss: 6.0367 Explore P: 0.7476\nEpisode: 77 Total reward: 39.0 Training loss: 6.1296 Explore P: 0.7438\nEpisode: 78 Total reward: 72.0 Training loss: 4.9757 Explore P: 0.7420\nEpisode: 79 Total reward: 42.0 Training loss: 12.9289 Explore P: 0.7384\nEpisode: 80 Total reward: 95.0 Training loss: 5.8628 Explore P: 0.7380\nModel Saved\nEpisode: 81 Total reward: 95.0 Training loss: 7.3286 Explore P: 0.7376\nEpisode: 82 Total reward: 32.0 Training loss: 9.9085 Explore P: 0.7333\nEpisode: 83 Total reward: 93.0 Training loss: 16.4866 Explore P: 0.7327\nEpisode: 84 Total reward: 88.0 Training loss: 9.2080 Explore P: 0.7318\nEpisode: 85 Total reward: 93.0 Training loss: 7.4303 Explore P: 0.7312\nModel Saved\nEpisode: 86 Total reward: 94.0 Training loss: 5.8300 Explore P: 0.7307\nEpisode: 87 Total reward: 95.0 Training loss: 76.6679 Explore P: 0.7303\nEpisode: 90 Total reward: 95.0 Training loss: 3.8223 Explore P: 0.7156\nModel Saved\nEpisode: 93 Total reward: 95.0 Training loss: 10.5011 Explore P: 0.7012\nModel Saved\nEpisode: 97 Total reward: -1.0 Training loss: 9.5406 Explore P: 0.6753\nEpisode: 98 Total reward: 95.0 Training loss: 9.5706 Explore P: 0.6749\nEpisode: 99 Total reward: 17.0 Training loss: 13.9971 Explore P: 0.6703\nModel Saved\nEpisode: 101 Total reward: 12.0 Training loss: 8.9545 Explore P: 0.6589\nEpisode: 102 Total reward: 95.0 Training loss: 5.1769 Explore P: 0.6585\nEpisode: 104 Total reward: 95.0 Training loss: 10.0329 Explore P: 0.6517\nEpisode: 105 Total reward: -2.0 Training loss: 10.2597 Explore P: 0.6464\nModel Saved\nEpisode: 106 Total reward: 95.0 Training loss: 7.6344 Explore P: 0.6460\nEpisode: 107 Total reward: 93.0 Training loss: 7.1491 Explore P: 0.6455\nEpisode: 108 Total reward: -9.0 Training loss: 5.3065 Explore P: 0.6398\nEpisode: 109 Total reward: 14.0 Training loss: 6.2435 Explore P: 0.6353\nEpisode: 110 Total reward: 94.0 Training loss: 3.3438 Explore P: 0.6348\nModel Saved\nEpisode: 111 Total reward: 95.0 Training loss: 8.9315 Explore P: 0.6345\nEpisode: 112 Total reward: 95.0 Training loss: 5.4676 Explore P: 0.6341\nEpisode: 114 Total reward: 69.0 Training loss: 5.6337 Explore P: 0.6262\nEpisode: 115 Total reward: 28.0 Training loss: 6.5177 Explore P: 0.6227\nModel Saved\nEpisode: 116 Total reward: 2.0 Training loss: 15.2997 Explore P: 0.6175\nEpisode: 117 Total reward: 95.0 Training loss: 7.2096 Explore P: 0.6172\nEpisode: 118 Total reward: 68.0 Training loss: 5.2490 Explore P: 0.6155\nEpisode: 119 Total reward: 95.0 Training loss: 8.3633 Explore P: 0.6151\nEpisode: 120 Total reward: 15.0 Training loss: 7.5095 Explore P: 0.6108\nModel Saved\nEpisode: 121 Total reward: 41.0 Training loss: 4.4882 Explore P: 0.6078\nEpisode: 122 Total reward: 95.0 Training loss: 6.7449 Explore P: 0.6075\nEpisode: 123 Total reward: 76.0 Training loss: 3.7798 Explore P: 0.6063\nEpisode: 124 Total reward: 95.0 Training loss: 9.2780 Explore P: 0.6059\nEpisode: 125 Total reward: 94.0 Training loss: 10.7110 Explore P: 0.6055\nModel Saved\nEpisode: 126 Total reward: 95.0 Training loss: 11.6103 Explore P: 0.6051\nEpisode: 127 Total reward: 93.0 Training loss: 11.7001 Explore P: 0.6047\nEpisode: 128 Total reward: 94.0 Training loss: 9.5795 Explore P: 0.6043\nEpisode: 129 Total reward: 95.0 Training loss: 18.2652 Explore P: 0.6039\nModel Saved\nEpisode: 131 Total reward: 95.0 Training loss: 7.8107 Explore P: 0.5976\nEpisode: 133 Total reward: 75.0 Training loss: 6.8737 Explore P: 0.5906\nEpisode: 134 Total reward: 95.0 Training loss: 11.2554 Explore P: 0.5902\nEpisode: 135 Total reward: 95.0 Training loss: 7.3393 Explore P: 0.5899\nModel Saved\nEpisode: 136 Total reward: 76.0 Training loss: 17.0768 Explore P: 0.5887\nEpisode: 137 Total reward: 94.0 Training loss: 5.7765 Explore P: 0.5883\nEpisode: 138 Total reward: 92.0 Training loss: 7.8494 Explore P: 0.5878\nEpisode: 139 Total reward: 8.0 Training loss: 5.4285 Explore P: 0.5836\nEpisode: 140 Total reward: 95.0 Training loss: 19.1841 Explore P: 0.5832\nModel Saved\nEpisode: 141 Total reward: 71.0 Training loss: 7.1564 Explore P: 0.5818\nEpisode: 143 Total reward: 92.0 Training loss: 5.7148 Explore P: 0.5756\nEpisode: 144 Total reward: 95.0 Training loss: 6.0192 Explore P: 0.5753\nEpisode: 145 Total reward: 31.0 Training loss: 10.3960 Explore P: 0.5719\nModel Saved\nEpisode: 146 Total reward: 76.0 Training loss: 6.8995 Explore P: 0.5708\nEpisode: 147 Total reward: 95.0 Training loss: 11.0561 Explore P: 0.5704\nEpisode: 148 Total reward: 75.0 Training loss: 6.3253 Explore P: 0.5693\nEpisode: 150 Total reward: 95.0 Training loss: 13.9398 Explore P: 0.5634\nModel Saved\nEpisode: 151 Total reward: 93.0 Training loss: 5.6290 Explore P: 0.5629\nEpisode: 152 Total reward: 67.0 Training loss: 4.8774 Explore P: 0.5613\nEpisode: 153 Total reward: 95.0 Training loss: 6.2696 Explore P: 0.5610\nEpisode: 154 Total reward: 94.0 Training loss: 8.1263 Explore P: 0.5606\nModel Saved\nEpisode: 156 Total reward: 94.0 Training loss: 7.1067 Explore P: 0.5547\nEpisode: 158 Total reward: 95.0 Training loss: 6.7367 Explore P: 0.5490\nEpisode: 159 Total reward: 71.0 Training loss: 9.2965 Explore P: 0.5476\nEpisode: 160 Total reward: 92.0 Training loss: 4.2130 Explore P: 0.5472\nModel Saved\nEpisode: 161 Total reward: 91.0 Training loss: 8.3934 Explore P: 0.5466\nEpisode: 162 Total reward: 48.0 Training loss: 7.7519 Explore P: 0.5443\nEpisode: 163 Total reward: 95.0 Training loss: 5.0956 Explore P: 0.5440\nEpisode: 164 Total reward: 95.0 Training loss: 5.3058 Explore P: 0.5437\nEpisode: 165 Total reward: 95.0 Training loss: 9.3532 Explore P: 0.5434\nModel Saved\nEpisode: 166 Total reward: 95.0 Training loss: 5.3769 Explore P: 0.5430\nEpisode: 167 Total reward: 95.0 Training loss: 6.3074 Explore P: 0.5427\nEpisode: 168 Total reward: -21.0 Training loss: 14.2721 Explore P: 0.5376\nEpisode: 169 Total reward: 76.0 Training loss: 8.9669 Explore P: 0.5365\nEpisode: 170 Total reward: 95.0 Training loss: 6.5394 Explore P: 0.5362\nModel Saved\nEpisode: 171 Total reward: 94.0 Training loss: 8.3638 Explore P: 0.5358\nEpisode: 172 Total reward: 95.0 Training loss: 5.5448 Explore P: 0.5355\nEpisode: 173 Total reward: 94.0 Training loss: 3.6157 Explore P: 0.5352\nEpisode: 174 Total reward: 94.0 Training loss: 14.1364 Explore P: 0.5348\nEpisode: 175 Total reward: 63.0 Training loss: 17.9014 Explore P: 0.5331\nModel Saved\nEpisode: 176 Total reward: 68.0 Training loss: 11.4577 Explore P: 0.5316\nEpisode: 177 Total reward: 60.0 Training loss: 15.0636 Explore P: 0.5297\nEpisode: 178 Total reward: 76.0 Training loss: 5.1174 Explore P: 0.5287\nEpisode: 179 Total reward: 95.0 Training loss: 4.2875 Explore P: 0.5284\nModel Saved\nEpisode: 181 Total reward: 72.0 Training loss: 3.0206 Explore P: 0.5220\nEpisode: 182 Total reward: 95.0 Training loss: 9.2020 Explore P: 0.5217\nEpisode: 183 Total reward: 90.0 Training loss: 7.4530 Explore P: 0.5211\nEpisode: 184 Total reward: 72.0 Training loss: 8.0814 Explore P: 0.5199\nEpisode: 185 Total reward: 93.0 Training loss: 2.6691 Explore P: 0.5195\nModel Saved\nEpisode: 186 Total reward: 93.0 Training loss: 9.9340 Explore P: 0.5191\nEpisode: 187 Total reward: 95.0 Training loss: 7.1898 Explore P: 0.5188\nEpisode: 188 Total reward: 95.0 Training loss: 5.4221 Explore P: 0.5185\nEpisode: 189 Total reward: 93.0 Training loss: 7.6535 Explore P: 0.5181\nEpisode: 190 Total reward: 95.0 Training loss: 7.2413 Explore P: 0.5178\nModel Saved\nEpisode: 191 Total reward: 95.0 Training loss: 11.1568 Explore P: 0.5175\nEpisode: 192 Total reward: 41.0 Training loss: 7.0685 Explore P: 0.5149\nEpisode: 193 Total reward: 95.0 Training loss: 6.4974 Explore P: 0.5146\nEpisode: 195 Total reward: 30.0 Training loss: 7.7666 Explore P: 0.5068\nModel Saved\nEpisode: 196 Total reward: 95.0 Training loss: 10.0580 Explore P: 0.5065\nEpisode: 197 Total reward: 89.0 Training loss: 10.1030 Explore P: 0.5059\nEpisode: 198 Total reward: 36.0 Training loss: 12.0402 Explore P: 0.5032\nEpisode: 199 Total reward: 29.0 Training loss: 7.7697 Explore P: 0.5004\nEpisode: 200 Total reward: 95.0 Training loss: 12.0135 Explore P: 0.5001\nModel Saved\nEpisode: 201 Total reward: 90.0 Training loss: 7.0591 Explore P: 0.4996\nEpisode: 202 Total reward: 95.0 Training loss: 45.5382 Explore P: 0.4993\nEpisode: 203 Total reward: 95.0 Training loss: 5.2836 Explore P: 0.4990\nEpisode: 204 Total reward: 95.0 Training loss: 3.9595 Explore P: 0.4987\nEpisode: 205 Total reward: 94.0 Training loss: 12.6953 Explore P: 0.4983\nModel Saved\nEpisode: 206 Total reward: 95.0 Training loss: 6.2718 Explore P: 0.4980\nEpisode: 207 Total reward: 32.0 Training loss: 2.5679 Explore P: 0.4954\nEpisode: 208 Total reward: 95.0 Training loss: 15.2178 Explore P: 0.4951\nEpisode: 209 Total reward: 56.0 Training loss: 4.8639 Explore P: 0.4934\nEpisode: 210 Total reward: 95.0 Training loss: 8.3517 Explore P: 0.4931\nModel Saved\nEpisode: 211 Total reward: 23.0 Training loss: 7.4240 Explore P: 0.4901\nEpisode: 212 Total reward: 88.0 Training loss: 4.7976 Explore P: 0.4895\nEpisode: 213 Total reward: 30.0 Training loss: 6.1415 Explore P: 0.4866\nEpisode: 214 Total reward: 92.0 Training loss: 15.4500 Explore P: 0.4861\nEpisode: 215 Total reward: 95.0 Training loss: 9.9634 Explore P: 0.4859\nModel Saved\nEpisode: 216 Total reward: 95.0 Training loss: 10.8063 Explore P: 0.4856\nEpisode: 217 Total reward: 53.0 Training loss: 8.1193 Explore P: 0.4838\nEpisode: 218 Total reward: 18.0 Training loss: 9.4159 Explore P: 0.4806\nEpisode: 219 Total reward: 48.0 Training loss: 8.6391 Explore P: 0.4785\nEpisode: 220 Total reward: 95.0 Training loss: 6.1838 Explore P: 0.4783\nModel Saved\nEpisode: 221 Total reward: 95.0 Training loss: 6.9819 Explore P: 0.4780\nEpisode: 222 Total reward: 46.0 Training loss: 7.3177 Explore P: 0.4759\nEpisode: 223 Total reward: 92.0 Training loss: 10.8050 Explore P: 0.4755\nEpisode: 224 Total reward: 95.0 Training loss: 6.7800 Explore P: 0.4752\nModel Saved\nEpisode: 226 Total reward: 94.0 Training loss: 10.2422 Explore P: 0.4702\nEpisode: 227 Total reward: 50.0 Training loss: 8.4003 Explore P: 0.4683\nEpisode: 228 Total reward: 73.0 Training loss: 5.0683 Explore P: 0.4673\nEpisode: 229 Total reward: 69.0 Training loss: 8.8295 Explore P: 0.4661\nEpisode: 230 Total reward: 64.0 Training loss: 6.1810 Explore P: 0.4646\nModel Saved\nEpisode: 231 Total reward: 94.0 Training loss: 8.5951 Explore P: 0.4643\nEpisode: 232 Total reward: 93.0 Training loss: 8.2481 Explore P: 0.4639\nEpisode: 233 Total reward: 94.0 Training loss: 9.3168 Explore P: 0.4636\nEpisode: 234 Total reward: 94.0 Training loss: 5.7313 Explore P: 0.4633\nEpisode: 235 Total reward: 95.0 Training loss: 8.3524 Explore P: 0.4630\nModel Saved\nEpisode: 236 Total reward: 95.0 Training loss: 3.2534 Explore P: 0.4627\nEpisode: 237 Total reward: 67.0 Training loss: 4.9519 Explore P: 0.4614\nEpisode: 238 Total reward: 95.0 Training loss: 4.9785 Explore P: 0.4612\nEpisode: 239 Total reward: 94.0 Training loss: 6.1041 Explore P: 0.4608\nEpisode: 240 Total reward: 89.0 Training loss: 6.4602 Explore P: 0.4603\nModel Saved\nEpisode: 242 Total reward: 72.0 Training loss: 7.2753 Explore P: 0.4547\nEpisode: 243 Total reward: 7.0 Training loss: 4.0682 Explore P: 0.4512\nEpisode: 244 Total reward: 95.0 Training loss: 5.4801 Explore P: 0.4510\nModel Saved\nEpisode: 247 Total reward: 92.0 Training loss: 4.0671 Explore P: 0.4419\nEpisode: 248 Total reward: 94.0 Training loss: 10.1769 Explore P: 0.4416\nEpisode: 249 Total reward: 95.0 Training loss: 7.8524 Explore P: 0.4413\nModel Saved\nEpisode: 251 Total reward: 94.0 Training loss: 3.7941 Explore P: 0.4367\nEpisode: 252 Total reward: 95.0 Training loss: 5.4722 Explore P: 0.4365\nEpisode: 253 Total reward: 23.0 Training loss: 6.2462 Explore P: 0.4338\nEpisode: 254 Total reward: 94.0 Training loss: 8.9440 Explore P: 0.4335\nEpisode: 255 Total reward: 95.0 Training loss: 7.0957 Explore P: 0.4332\nModel Saved\nEpisode: 256 Total reward: 93.0 Training loss: 6.5590 Explore P: 0.4329\nEpisode: 257 Total reward: 95.0 Training loss: 9.4697 Explore P: 0.4326\nEpisode: 258 Total reward: 93.0 Training loss: 4.4644 Explore P: 0.4323\nEpisode: 259 Total reward: 95.0 Training loss: 5.4563 Explore P: 0.4320\nEpisode: 260 Total reward: 65.0 Training loss: 5.4484 Explore P: 0.4307\nModel Saved\nEpisode: 261 Total reward: 59.0 Training loss: 6.1749 Explore P: 0.4292\nEpisode: 262 Total reward: 50.0 Training loss: 9.7505 Explore P: 0.4275\nEpisode: 263 Total reward: 75.0 Training loss: 13.6371 Explore P: 0.4266\nEpisode: 264 Total reward: 71.0 Training loss: 4.7094 Explore P: 0.4256\nEpisode: 265 Total reward: 45.0 Training loss: 7.2052 Explore P: 0.4236\nModel Saved\nEpisode: 266 Total reward: 70.0 Training loss: 7.2868 Explore P: 0.4226\nEpisode: 267 Total reward: 95.0 Training loss: 23.1591 Explore P: 0.4223\nEpisode: 268 Total reward: 92.0 Training loss: 5.6330 Explore P: 0.4220\nEpisode: 269 Total reward: 95.0 Training loss: 8.5673 Explore P: 0.4217\nEpisode: 270 Total reward: 91.0 Training loss: 8.4516 Explore P: 0.4213\nModel Saved\nEpisode: 271 Total reward: 46.0 Training loss: 37.7152 Explore P: 0.4194\nEpisode: 272 Total reward: 94.0 Training loss: 8.5222 Explore P: 0.4192\nEpisode: 273 Total reward: 51.0 Training loss: 6.2626 Explore P: 0.4173\nEpisode: 274 Total reward: 29.0 Training loss: 6.5222 Explore P: 0.4150\nEpisode: 275 Total reward: 95.0 Training loss: 7.2604 Explore P: 0.4148\nModel Saved\nEpisode: 276 Total reward: 60.0 Training loss: 6.1872 Explore P: 0.4133\nEpisode: 277 Total reward: 91.0 Training loss: 10.2827 Explore P: 0.4129\nEpisode: 278 Total reward: 76.0 Training loss: 6.5719 Explore P: 0.4121\nEpisode: 279 Total reward: 95.0 Training loss: 5.4216 Explore P: 0.4119\nEpisode: 280 Total reward: 95.0 Training loss: 8.4445 Explore P: 0.4116\nModel Saved\nEpisode: 281 Total reward: 94.0 Training loss: 6.8454 Explore P: 0.4113\nEpisode: 282 Total reward: 95.0 Training loss: 4.1795 Explore P: 0.4111\nEpisode: 283 Total reward: 95.0 Training loss: 15.3680 Explore P: 0.4109\nEpisode: 284 Total reward: 95.0 Training loss: 8.8869 Explore P: 0.4106\nEpisode: 285 Total reward: 95.0 Training loss: 4.3235 Explore P: 0.4104\nModel Saved\nEpisode: 286 Total reward: 31.0 Training loss: 9.5937 Explore P: 0.4080\nEpisode: 287 Total reward: 69.0 Training loss: 9.4985 Explore P: 0.4069\nEpisode: 288 Total reward: 76.0 Training loss: 9.0317 Explore P: 0.4061\nModel Saved\nEpisode: 291 Total reward: 95.0 Training loss: 5.4898 Explore P: 0.3980\nEpisode: 292 Total reward: 57.0 Training loss: 4.8576 Explore P: 0.3967\nEpisode: 293 Total reward: 95.0 Training loss: 4.5825 Explore P: 0.3965\nEpisode: 294 Total reward: 94.0 Training loss: 8.1916 Explore P: 0.3962\nModel Saved\nEpisode: 296 Total reward: 32.0 Training loss: 4.2516 Explore P: 0.3903\nEpisode: 297 Total reward: 95.0 Training loss: 9.9330 Explore P: 0.3901\nEpisode: 298 Total reward: 95.0 Training loss: 8.1190 Explore P: 0.3899\nEpisode: 300 Total reward: 92.0 Training loss: 36.0580 Explore P: 0.3857\nModel Saved\nEpisode: 301 Total reward: 94.0 Training loss: 25.6411 Explore P: 0.3855\nEpisode: 302 Total reward: 95.0 Training loss: 7.0234 Explore P: 0.3853\nEpisode: 303 Total reward: 95.0 Training loss: 6.9925 Explore P: 0.3850\nEpisode: 304 Total reward: 48.0 Training loss: 3.7165 Explore P: 0.3834\nEpisode: 305 Total reward: 94.0 Training loss: 5.5598 Explore P: 0.3832\nModel Saved\nEpisode: 306 Total reward: 66.0 Training loss: 6.4380 Explore P: 0.3820\nEpisode: 307 Total reward: 95.0 Training loss: 10.1027 Explore P: 0.3818\nEpisode: 308 Total reward: 76.0 Training loss: 4.8863 Explore P: 0.3811\nEpisode: 309 Total reward: 91.0 Training loss: 3.8596 Explore P: 0.3807\nEpisode: 310 Total reward: 95.0 Training loss: 5.7696 Explore P: 0.3805\nModel Saved\nEpisode: 311 Total reward: 95.0 Training loss: 7.9453 Explore P: 0.3803\nEpisode: 312 Total reward: 94.0 Training loss: 32.3577 Explore P: 0.3800\nEpisode: 313 Total reward: 94.0 Training loss: 6.2316 Explore P: 0.3797\nEpisode: 314 Total reward: 95.0 Training loss: 5.6629 Explore P: 0.3795\nEpisode: 315 Total reward: 51.0 Training loss: 16.0406 Explore P: 0.3780\nModel Saved\nEpisode: 316 Total reward: 64.0 Training loss: 20.9551 Explore P: 0.3769\nEpisode: 317 Total reward: 95.0 Training loss: 8.9643 Explore P: 0.3766\nEpisode: 318 Total reward: 31.0 Training loss: 10.1815 Explore P: 0.3745\nEpisode: 319 Total reward: 95.0 Training loss: 6.8077 Explore P: 0.3742\nEpisode: 320 Total reward: 69.0 Training loss: 6.2630 Explore P: 0.3733\nModel Saved\nEpisode: 321 Total reward: 64.0 Training loss: 9.0876 Explore P: 0.3721\nEpisode: 322 Total reward: 65.0 Training loss: 6.7376 Explore P: 0.3710\nEpisode: 325 Total reward: 95.0 Training loss: 6.3275 Explore P: 0.3636\nModel Saved\nEpisode: 326 Total reward: 61.0 Training loss: 21.0627 Explore P: 0.3624\nEpisode: 327 Total reward: 95.0 Training loss: 8.4630 Explore P: 0.3622\nEpisode: 328 Total reward: 94.0 Training loss: 4.1237 Explore P: 0.3619\nEpisode: 329 Total reward: 95.0 Training loss: 7.0530 Explore P: 0.3617\nEpisode: 330 Total reward: 95.0 Training loss: 5.6433 Explore P: 0.3615\nModel Saved\nEpisode: 331 Total reward: 68.0 Training loss: 7.4114 Explore P: 0.3605\nEpisode: 332 Total reward: 94.0 Training loss: 4.9424 Explore P: 0.3603\nEpisode: 333 Total reward: 95.0 Training loss: 6.4706 Explore P: 0.3601\nEpisode: 334 Total reward: 95.0 Training loss: 10.3608 Explore P: 0.3599\nEpisode: 335 Total reward: 95.0 Training loss: 3.5082 Explore P: 0.3596\nModel Saved\nEpisode: 336 Total reward: 94.0 Training loss: 7.4861 Explore P: 0.3594\nEpisode: 337 Total reward: 95.0 Training loss: 6.5843 Explore P: 0.3592\nEpisode: 339 Total reward: 95.0 Training loss: 6.2024 Explore P: 0.3555\nEpisode: 340 Total reward: 56.0 Training loss: 17.9740 Explore P: 0.3543\nModel Saved\nEpisode: 341 Total reward: 93.0 Training loss: 6.3163 Explore P: 0.3540\nEpisode: 342 Total reward: 95.0 Training loss: 4.4607 Explore P: 0.3538\nEpisode: 343 Total reward: 46.0 Training loss: 12.3406 Explore P: 0.3523\nEpisode: 344 Total reward: 95.0 Training loss: 4.6383 Explore P: 0.3521\nEpisode: 345 Total reward: 49.0 Training loss: 5.2035 Explore P: 0.3506\nModel Saved\nEpisode: 346 Total reward: 62.0 Training loss: 4.3100 Explore P: 0.3495\nEpisode: 347 Total reward: 35.0 Training loss: 7.4819 Explore P: 0.3476\nEpisode: 349 Total reward: 32.0 Training loss: 6.7938 Explore P: 0.3424\nModel Saved\nEpisode: 351 Total reward: 70.0 Training loss: 13.7135 Explore P: 0.3383\nEpisode: 352 Total reward: 95.0 Training loss: 5.8986 Explore P: 0.3381\nEpisode: 353 Total reward: 95.0 Training loss: 6.7222 Explore P: 0.3379\nEpisode: 354 Total reward: 95.0 Training loss: 5.5502 Explore P: 0.3377\nEpisode: 355 Total reward: 95.0 Training loss: 3.9625 Explore P: 0.3375\nModel Saved\nEpisode: 356 Total reward: 66.0 Training loss: 10.4589 Explore P: 0.3365\nEpisode: 357 Total reward: 95.0 Training loss: 4.2301 Explore P: 0.3363\nEpisode: 358 Total reward: 43.0 Training loss: 6.8768 Explore P: 0.3347\nEpisode: 359 Total reward: 27.0 Training loss: 10.0385 Explore P: 0.3328\nEpisode: 360 Total reward: 95.0 Training loss: 6.9034 Explore P: 0.3326\nModel Saved\nEpisode: 361 Total reward: 95.0 Training loss: 5.8895 Explore P: 0.3324\nEpisode: 362 Total reward: 94.0 Training loss: 7.4681 Explore P: 0.3322\nEpisode: 363 Total reward: 95.0 Training loss: 6.9938 Explore P: 0.3320\nEpisode: 364 Total reward: 94.0 Training loss: 4.1534 Explore P: 0.3318\nEpisode: 365 Total reward: 48.0 Training loss: 11.6158 Explore P: 0.3304\nModel Saved\nEpisode: 366 Total reward: 94.0 Training loss: 3.2904 Explore P: 0.3302\nEpisode: 368 Total reward: 93.0 Training loss: 5.1160 Explore P: 0.3267\nEpisode: 369 Total reward: 95.0 Training loss: 12.5915 Explore P: 0.3266\nEpisode: 370 Total reward: 95.0 Training loss: 4.7789 Explore P: 0.3264\nModel Saved\nEpisode: 371 Total reward: 94.0 Training loss: 4.6793 Explore P: 0.3261\nEpisode: 372 Total reward: 23.0 Training loss: 5.5631 Explore P: 0.3242\nEpisode: 373 Total reward: 54.0 Training loss: 6.1912 Explore P: 0.3228\nEpisode: 374 Total reward: 47.0 Training loss: 5.8507 Explore P: 0.3215\nEpisode: 375 Total reward: 95.0 Training loss: 38.6352 Explore P: 0.3213\nModel Saved\nEpisode: 376 Total reward: 94.0 Training loss: 3.8564 Explore P: 0.3211\nEpisode: 377 Total reward: 95.0 Training loss: 3.2192 Explore P: 0.3209\nEpisode: 378 Total reward: 67.0 Training loss: 10.6641 Explore P: 0.3200\nEpisode: 379 Total reward: 74.0 Training loss: 6.4169 Explore P: 0.3193\nEpisode: 380 Total reward: 24.0 Training loss: 5.3259 Explore P: 0.3174\nModel Saved\nEpisode: 381 Total reward: 94.0 Training loss: 8.3347 Explore P: 0.3172\nEpisode: 383 Total reward: 95.0 Training loss: 5.7760 Explore P: 0.3139\nEpisode: 384 Total reward: 95.0 Training loss: 6.2676 Explore P: 0.3137\nEpisode: 385 Total reward: 94.0 Training loss: 8.2629 Explore P: 0.3135\nModel Saved\nEpisode: 386 Total reward: 93.0 Training loss: 13.1794 Explore P: 0.3133\nEpisode: 387 Total reward: 95.0 Training loss: 16.9010 Explore P: 0.3131\nEpisode: 388 Total reward: 71.0 Training loss: 5.7769 Explore P: 0.3124\nEpisode: 389 Total reward: 95.0 Training loss: 8.5311 Explore P: 0.3122\nEpisode: 390 Total reward: 95.0 Training loss: 11.1890 Explore P: 0.3120\nModel Saved\nEpisode: 391 Total reward: 95.0 Training loss: 8.7124 Explore P: 0.3118\nEpisode: 392 Total reward: 41.0 Training loss: 4.5517 Explore P: 0.3103\nEpisode: 393 Total reward: 73.0 Training loss: 11.3653 Explore P: 0.3096\nEpisode: 394 Total reward: 94.0 Training loss: 4.2256 Explore P: 0.3094\nEpisode: 395 Total reward: 76.0 Training loss: 11.7555 Explore P: 0.3088\nModel Saved\nEpisode: 396 Total reward: 95.0 Training loss: 9.5749 Explore P: 0.3086\nEpisode: 397 Total reward: 95.0 Training loss: 4.6111 Explore P: 0.3085\nEpisode: 398 Total reward: 46.0 Training loss: 5.3761 Explore P: 0.3071\nEpisode: 399 Total reward: 95.0 Training loss: 6.5730 Explore P: 0.3069\nEpisode: 400 Total reward: 53.0 Training loss: 2.7583 Explore P: 0.3058\nModel Saved\nEpisode: 403 Total reward: 26.0 Training loss: 3.1875 Explore P: 0.2982\nEpisode: 404 Total reward: 95.0 Training loss: 4.3228 Explore P: 0.2980\nEpisode: 405 Total reward: 94.0 Training loss: 5.4822 Explore P: 0.2978\nModel Saved\nEpisode: 406 Total reward: 69.0 Training loss: 8.0316 Explore P: 0.2971\nEpisode: 407 Total reward: 95.0 Training loss: 5.3825 Explore P: 0.2969\nEpisode: 408 Total reward: 95.0 Training loss: 4.2439 Explore P: 0.2967\nEpisode: 409 Total reward: 95.0 Training loss: 5.8542 Explore P: 0.2965\nEpisode: 410 Total reward: 95.0 Training loss: 7.2658 Explore P: 0.2964\nModel Saved\nEpisode: 411 Total reward: 92.0 Training loss: 3.3414 Explore P: 0.2961\nEpisode: 412 Total reward: 95.0 Training loss: 8.9129 Explore P: 0.2959\nEpisode: 413 Total reward: 95.0 Training loss: 4.9458 Explore P: 0.2958\nEpisode: 414 Total reward: 95.0 Training loss: 10.0100 Explore P: 0.2956\nEpisode: 415 Total reward: 76.0 Training loss: 6.3239 Explore P: 0.2950\nModel Saved\nEpisode: 416 Total reward: 71.0 Training loss: 4.9018 Explore P: 0.2943\nEpisode: 417 Total reward: 95.0 Training loss: 4.4679 Explore P: 0.2942\nEpisode: 418 Total reward: 65.0 Training loss: 6.4482 Explore P: 0.2933\nEpisode: 420 Total reward: 95.0 Training loss: 7.1073 Explore P: 0.2903\nModel Saved\nEpisode: 421 Total reward: 93.0 Training loss: 6.0036 Explore P: 0.2901\nEpisode: 422 Total reward: 95.0 Training loss: 9.8681 Explore P: 0.2899\nEpisode: 423 Total reward: 56.0 Training loss: 7.0279 Explore P: 0.2888\nEpisode: 424 Total reward: 69.0 Training loss: 4.3213 Explore P: 0.2880\nEpisode: 425 Total reward: 95.0 Training loss: 3.7574 Explore P: 0.2879\nModel Saved\nEpisode: 426 Total reward: 95.0 Training loss: 6.3303 Explore P: 0.2877\nEpisode: 427 Total reward: 65.0 Training loss: 6.7198 Explore P: 0.2868\nEpisode: 428 Total reward: 95.0 Training loss: 6.6766 Explore P: 0.2867\nEpisode: 429 Total reward: 95.0 Training loss: 17.5487 Explore P: 0.2865\nEpisode: 430 Total reward: 92.0 Training loss: 4.6535 Explore P: 0.2862\nModel Saved\nEpisode: 431 Total reward: 94.0 Training loss: 6.1203 Explore P: 0.2861\nEpisode: 432 Total reward: 48.0 Training loss: 4.3775 Explore P: 0.2849\nEpisode: 433 Total reward: 95.0 Training loss: 3.8334 Explore P: 0.2847\nEpisode: 435 Total reward: 95.0 Training loss: 4.2451 Explore P: 0.2818\nModel Saved\nEpisode: 436 Total reward: 94.0 Training loss: 6.8777 Explore P: 0.2816\nEpisode: 437 Total reward: 75.0 Training loss: 6.9144 Explore P: 0.2811\nEpisode: 438 Total reward: 95.0 Training loss: 6.6239 Explore P: 0.2809\nEpisode: 439 Total reward: 33.0 Training loss: 8.7485 Explore P: 0.2795\nEpisode: 440 Total reward: 50.0 Training loss: 3.3633 Explore P: 0.2784\nModel Saved\nEpisode: 441 Total reward: 76.0 Training loss: 4.0757 Explore P: 0.2778\nEpisode: 442 Total reward: 95.0 Training loss: 16.1746 Explore P: 0.2777\nEpisode: 443 Total reward: 95.0 Training loss: 6.0739 Explore P: 0.2775\nEpisode: 444 Total reward: 93.0 Training loss: 5.0369 Explore P: 0.2773\nEpisode: 445 Total reward: 95.0 Training loss: 4.8247 Explore P: 0.2771\nModel Saved\nEpisode: 446 Total reward: 95.0 Training loss: 5.8592 Explore P: 0.2770\nEpisode: 447 Total reward: 95.0 Training loss: 9.7572 Explore P: 0.2768\nEpisode: 448 Total reward: 95.0 Training loss: 14.4917 Explore P: 0.2766\nEpisode: 449 Total reward: 95.0 Training loss: 5.8893 Explore P: 0.2765\nEpisode: 450 Total reward: 92.0 Training loss: 6.8390 Explore P: 0.2762\nModel Saved\nEpisode: 452 Total reward: 25.0 Training loss: 7.2424 Explore P: 0.2719\nEpisode: 453 Total reward: 95.0 Training loss: 3.3932 Explore P: 0.2717\nEpisode: 454 Total reward: 33.0 Training loss: 4.5696 Explore P: 0.2702\nEpisode: 455 Total reward: 94.0 Training loss: 4.2600 Explore P: 0.2700\nModel Saved\nEpisode: 456 Total reward: 95.0 Training loss: 3.8030 Explore P: 0.2699\nEpisode: 457 Total reward: 95.0 Training loss: 6.3787 Explore P: 0.2697\nEpisode: 458 Total reward: 64.0 Training loss: 9.2199 Explore P: 0.2689\nEpisode: 459 Total reward: 94.0 Training loss: 4.4109 Explore P: 0.2687\nModel Saved\nEpisode: 461 Total reward: 93.0 Training loss: 3.8496 Explore P: 0.2659\nEpisode: 462 Total reward: 95.0 Training loss: 9.1421 Explore P: 0.2658\nEpisode: 463 Total reward: 87.0 Training loss: 8.9370 Explore P: 0.2654\nEpisode: 464 Total reward: 95.0 Training loss: 4.4315 Explore P: 0.2652\nEpisode: 465 Total reward: 46.0 Training loss: 5.9007 Explore P: 0.2641\nModel Saved\nEpisode: 466 Total reward: 95.0 Training loss: 9.2814 Explore P: 0.2639\nEpisode: 467 Total reward: 95.0 Training loss: 8.5237 Explore P: 0.2638\nEpisode: 468 Total reward: 95.0 Training loss: 3.9001 Explore P: 0.2636\nEpisode: 469 Total reward: 74.0 Training loss: 5.5425 Explore P: 0.2631\nModel Saved\nEpisode: 471 Total reward: 95.0 Training loss: 14.9009 Explore P: 0.2604\nEpisode: 472 Total reward: 71.0 Training loss: 17.0582 Explore P: 0.2598\nEpisode: 473 Total reward: 54.0 Training loss: 5.3778 Explore P: 0.2589\nEpisode: 475 Total reward: 95.0 Training loss: 4.9826 Explore P: 0.2562\nModel Saved\nEpisode: 476 Total reward: 95.0 Training loss: 5.3319 Explore P: 0.2561\nEpisode: 478 Total reward: 42.0 Training loss: 3.6568 Explore P: 0.2525\nEpisode: 479 Total reward: 67.0 Training loss: 4.7285 Explore P: 0.2518\nEpisode: 480 Total reward: 95.0 Training loss: 12.3142 Explore P: 0.2516\nModel Saved\nEpisode: 481 Total reward: 63.0 Training loss: 4.9407 Explore P: 0.2508\nEpisode: 482 Total reward: 95.0 Training loss: 7.1913 Explore P: 0.2507\nEpisode: 483 Total reward: 63.0 Training loss: 8.3169 Explore P: 0.2499\nEpisode: 484 Total reward: 94.0 Training loss: 3.5713 Explore P: 0.2497\nEpisode: 485 Total reward: 53.0 Training loss: 6.2106 Explore P: 0.2488\nModel Saved\nEpisode: 486 Total reward: 95.0 Training loss: 5.9556 Explore P: 0.2487\nEpisode: 488 Total reward: 71.0 Training loss: 6.7643 Explore P: 0.2457\nEpisode: 489 Total reward: 65.0 Training loss: 13.9713 Explore P: 0.2450\nEpisode: 490 Total reward: 74.0 Training loss: 7.3133 Explore P: 0.2444\nModel Saved\nEpisode: 491 Total reward: 95.0 Training loss: 5.5533 Explore P: 0.2443\nEpisode: 492 Total reward: 75.0 Training loss: 4.2623 Explore P: 0.2438\nEpisode: 493 Total reward: 94.0 Training loss: 36.9924 Explore P: 0.2436\nEpisode: 494 Total reward: 95.0 Training loss: 6.5324 Explore P: 0.2435\nEpisode: 495 Total reward: 94.0 Training loss: 4.5361 Explore P: 0.2433\nModel Saved\nEpisode: 496 Total reward: 52.0 Training loss: 5.9572 Explore P: 0.2424\nEpisode: 497 Total reward: 72.0 Training loss: 4.9778 Explore P: 0.2419\nEpisode: 498 Total reward: 95.0 Training loss: 33.0955 Explore P: 0.2417\nEpisode: 499 Total reward: 95.0 Training loss: 6.1322 Explore P: 0.2416\n" ] ], [ [ "## Step 9: Watch our Agent play 👀\nNow that we trained our agent, we can test it", "_____no_output_____" ] ], [ [ "with tf.Session() as sess:\r\n \r\n game, possible_actions = create_environment()\r\n \r\n totalScore = 0\r\n \r\n # Load the model\r\n saver.restore(sess, \"./models/model.ckpt\")\r\n game.init()\r\n for i in range(1):\r\n \r\n done = False\r\n \r\n game.new_episode()\r\n \r\n state = game.get_state().screen_buffer\r\n state, stacked_frames = stack_frames(stacked_frames, state, True)\r\n \r\n while not game.is_episode_finished():\r\n # Take the biggest Q value (= the best action)\r\n Qs = sess.run(DQNetwork.output, feed_dict = {DQNetwork.inputs_: state.reshape((1, *state.shape))})\r\n \r\n # Take the biggest Q value (= the best action)\r\n choice = np.argmax(Qs)\r\n action = possible_actions[int(choice)]\r\n \r\n game.make_action(action)\r\n done = game.is_episode_finished()\r\n score = game.get_total_reward()\r\n \r\n if done:\r\n break \r\n \r\n else:\r\n print(\"else\")\r\n next_state = game.get_state().screen_buffer\r\n next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)\r\n state = next_state\r\n \r\n score = game.get_total_reward()\r\n print(\"Score: \", score)\r\n game.close()", "INFO:tensorflow:Restoring parameters from ./models/model.ckpt\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nelse\nScore: 68.0\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
e7177a873e7979b5176d8101afc38c5bd523a807
35,655
ipynb
Jupyter Notebook
notebooks/Untitled1.ipynb
jihyunbak/rec_to_binaries
5db186b5517dda9eaab2e701b2b067b800fe6524
[ "MIT" ]
null
null
null
notebooks/Untitled1.ipynb
jihyunbak/rec_to_binaries
5db186b5517dda9eaab2e701b2b067b800fe6524
[ "MIT" ]
null
null
null
notebooks/Untitled1.ipynb
jihyunbak/rec_to_binaries
5db186b5517dda9eaab2e701b2b067b800fe6524
[ "MIT" ]
null
null
null
33.957143
254
0.497153
[ [ [ "%matplotlib inline\n%reload_ext autoreload\n%autoreload 2\n%config InlineBackend.figure_format = 'retina'", "_____no_output_____" ], [ "from rec_to_binaries.read_binaries import readTrodesExtractedDataFile\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nreadTrodesExtractedDataFile(\n '../test_data/lotus/preprocessing/20190902/20190902_lotus_06_r3.time/20190902_lotus_06_r3.continuoustime.dat')", "/home/edeno/Documents/Github/rec_to_binaries/rec_to_binaries/read_binaries.py:70: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n return np.dtype(typearr)\n" ], [ "from rec_to_binaries.adjust_timestamps import _label_time_chunks, _regress_timestamps\n\ncontinuoustime_filename = '../test_data/lotus/preprocessing/20190902/20190902_lotus_06_r3.time/20190902_lotus_06_r3.continuoustime.dat'\n\ndata_file = readTrodesExtractedDataFile(continuoustime_filename)\nnew_data = (\n pd.DataFrame(data_file['data'])\n .assign(\n time_chunk_label=lambda df: _label_time_chunks(df.trodestime))\n .assign(\n adjusted_systime=lambda df: _regress_timestamps(df.trodestime,\n df.systime)))\nnew_data", "/home/edeno/Documents/Github/rec_to_binaries/rec_to_binaries/read_binaries.py:70: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n return np.dtype(typearr)\n" ], [ "from rec_to_binaries.create_system_time import infer_systime\n\nnew_data2 = infer_systime(data_file)\nnew_data2", "_____no_output_____" ], [ "new_data.dtypes, new_data2.dtypes", "_____no_output_____" ], [ "np.diff(new_data.adjusted_systime)", "_____no_output_____" ], [ "np.diff(new_data2.adjusted_systime)", "_____no_output_____" ], [ "new_data.adjusted_systime[0], new_data.systime[0]", "_____no_output_____" ], [ "new_data2.adjusted_systime[0]", "_____no_output_____" ], [ "pd.to_datetime(new_data2.adjusted_systime[0])", "_____no_output_____" ], [ "pd.to_datetime(new_data.adjusted_systime[0])", "_____no_output_____" ], [ "pd.to_datetime(new_data.systime[0])", "_____no_output_____" ], [ "pd.to_datetime(int(data_file['system_time_at_creation']), unit='ms')", "_____no_output_____" ], [ "t = pd.to_datetime(int(data_file['system_time_at_creation']), unit='ms')\nt", "_____no_output_____" ], [ "np.log(t.value)", "_____no_output_____" ], [ "pd.to_datetime", "_____no_output_____" ], [ "data_file['system_time_at_creation']", "_____no_output_____" ], [ "data_file", "_____no_output_____" ], [ "from rec_to_binaries.adjust_timestamps import _insert_new_data\n\n_insert_new_data(data_file, new_data)", "_____no_output_____" ], [ "from rec_to_binaries.adjust_timestamps import _insert_new_data\n\n_insert_new_data(data_file, new_data2)", "_____no_output_____" ], [ "'systime' not in data_file['data'].dtype.names", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e717835411e5fa09c23ce601c370d02b22278819
532,509
ipynb
Jupyter Notebook
Exploratory_Data_Analysis_on_Dataset_Terrorism_.ipynb
YashviKommidi/LETS-GROW-MORE
af219f2ab19b0b561b014a9471e36e1730fcfc23
[ "MIT" ]
null
null
null
Exploratory_Data_Analysis_on_Dataset_Terrorism_.ipynb
YashviKommidi/LETS-GROW-MORE
af219f2ab19b0b561b014a9471e36e1730fcfc23
[ "MIT" ]
null
null
null
Exploratory_Data_Analysis_on_Dataset_Terrorism_.ipynb
YashviKommidi/LETS-GROW-MORE
af219f2ab19b0b561b014a9471e36e1730fcfc23
[ "MIT" ]
null
null
null
113.251595
70,726
0.73807
[ [ [ "<a href=\"https://colab.research.google.com/github/YashviKommidi/LGMVIP-DataScience/blob/main/Exploratory_Data_Analysis_on_Dataset_Terrorism_.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# **EXPLORATORY DATA ANALYSIS ON DATASET TERRORISM INTERMEDIATE LEVEL TASK-1**\n", "_____no_output_____" ], [ "# importing the libraries", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport warnings\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ] ], [ [ "\n# Loading Data", "_____no_output_____" ] ], [ [ "terr=pd.read_csv(\"/content/globalterrorismdb_0718dist.csv\",encoding='latin1')", "_____no_output_____" ], [ "terr", "_____no_output_____" ], [ "terr.head(2)", "_____no_output_____" ] ], [ [ "# understanding the data ", "_____no_output_____" ] ], [ [ "terr.columns", "_____no_output_____" ], [ "terr.dtypes", "_____no_output_____" ], [ "terr.head()", "_____no_output_____" ], [ "terr.rename(columns={'iyear':'Year','imonth':'Month','iday':\"day\",'gname':'Group','country_txt':'Country','region_txt':'Region','provstate':'State','city':'City','latitude':'latitude',\n 'longitude':'longitude','summary':'summary','attacktype1_txt':'Attacktype','targtype1_txt':'Targettype','weaptype1_txt':'Weapon','nkill':'kill',\n 'nwound':'Wound'},inplace=True)", "_____no_output_____" ], [ "terr = terr[['Year','Month','day','Country','State','Region','City','latitude','longitude',\"Attacktype\",'kill',\n 'Wound','target1','summary','Group','Targettype','Weapon','motive']]", "_____no_output_____" ], [ "terr.shape", "_____no_output_____" ], [ "terr.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 181691 entries, 0 to 181690\nData columns (total 18 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Year 181691 non-null int64 \n 1 Month 181691 non-null int64 \n 2 day 181691 non-null int64 \n 3 Country 181691 non-null object \n 4 State 181270 non-null object \n 5 Region 181691 non-null object \n 6 City 181257 non-null object \n 7 latitude 177135 non-null float64\n 8 longitude 177134 non-null float64\n 9 Attacktype 181691 non-null object \n 10 kill 171378 non-null float64\n 11 Wound 165380 non-null float64\n 12 target1 181055 non-null object \n 13 summary 115562 non-null object \n 14 Group 181691 non-null object \n 15 Targettype 181691 non-null object \n 16 Weapon 181691 non-null object \n 17 motive 50561 non-null object \ndtypes: float64(4), int64(3), object(11)\nmemory usage: 25.0+ MB\n" ], [ "terr.isnull().sum()", "_____no_output_____" ], [ "terr.describe()", "_____no_output_____" ], [ "missing_values = (((terr.isnull().sum()).sum())/terr.size)*100\nmissing_values", "_____no_output_____" ], [ "for i in terr.columns:\n print(i,terr[i].nunique())", "Year 47\nMonth 13\nday 32\nCountry 205\nState 2855\nRegion 12\nCity 36674\nlatitude 48322\nlongitude 48039\nAttacktype 9\nkill 205\nWound 238\ntarget1 86006\nsummary 112492\nGroup 3537\nTargettype 22\nWeapon 12\nmotive 14490\n" ], [ "terr['Wound'] = terr['Wound'].fillna(0).astype(int)\nterr['kill'] = terr['kill'].fillna(0).astype(int)", "_____no_output_____" ], [ "terr.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 181691 entries, 0 to 181690\nData columns (total 18 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Year 181691 non-null int64 \n 1 Month 181691 non-null int64 \n 2 day 181691 non-null int64 \n 3 Country 181691 non-null object \n 4 State 181270 non-null object \n 5 Region 181691 non-null object \n 6 City 181257 non-null object \n 7 latitude 177135 non-null float64\n 8 longitude 177134 non-null float64\n 9 Attacktype 181691 non-null object \n 10 kill 181691 non-null int64 \n 11 Wound 181691 non-null int64 \n 12 target1 181055 non-null object \n 13 summary 115562 non-null object \n 14 Group 181691 non-null object \n 15 Targettype 181691 non-null object \n 16 Weapon 181691 non-null object \n 17 motive 50561 non-null object \ndtypes: float64(2), int64(5), object(11)\nmemory usage: 25.0+ MB\n" ], [ "terr.head(10)", "_____no_output_____" ] ], [ [ "# Representing Data", "_____no_output_____" ] ], [ [ "attack = terr.Country.value_counts()[:10]\nattack", "_____no_output_____" ], [ "terr['Country'].value_counts()", "_____no_output_____" ], [ "terr['State'].value_counts()", "_____no_output_____" ], [ "terr['Region'].value_counts()\n\n", "_____no_output_____" ], [ "terr['Year'].value_counts()", "_____no_output_____" ], [ "terr['City'].value_counts()", "_____no_output_____" ], [ "year = terr['Year'].unique()\nyears_count = terr['Year'].value_counts(dropna = False).sort_index()\nplt.figure(figsize = (18,10))\nsns.barplot(x = year,\n y = years_count,\n palette = \"tab10\")\nplt.xticks(rotation = 50)\nplt.xlabel('Attacking]Year',fontsize=20)\nplt.ylabel('Number of Attacks Each Year',fontsize=20)\nplt.title('Attacks In Years',fontsize=30)\nplt.show()\n", "_____no_output_____" ], [ "plt.figure(figsize = (11,6))\nsns.histplot(terr['Attacktype'], palette='cubehelix')\nplt.title('Attacktype',fontsize=15)\nplt.xticks(rotation=90)\nplt.show()", "_____no_output_____" ], [ "terr['Wound'] = terr['Wound'].fillna(0)\nterr['kill'] = terr['kill'].fillna(0)", "_____no_output_____" ], [ "terr['Casualities'] = terr['kill'] + terr['Wound']\n", "_____no_output_____" ], [ "terr.info()\n", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 181691 entries, 0 to 181690\nData columns (total 19 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Year 181691 non-null int64 \n 1 Month 181691 non-null int64 \n 2 day 181691 non-null int64 \n 3 Country 181691 non-null object \n 4 State 181270 non-null object \n 5 Region 181691 non-null object \n 6 City 181257 non-null object \n 7 latitude 177135 non-null float64\n 8 longitude 177134 non-null float64\n 9 Attacktype 181691 non-null object \n 10 kill 181691 non-null int64 \n 11 Wound 181691 non-null int64 \n 12 target1 181055 non-null object \n 13 summary 115562 non-null object \n 14 Group 181691 non-null object \n 15 Targettype 181691 non-null object \n 16 Weapon 181691 non-null object \n 17 motive 50561 non-null object \n 18 Casualities 181691 non-null int64 \ndtypes: float64(2), int64(6), object(11)\nmemory usage: 26.3+ MB\n" ], [ "terr.describe()", "_____no_output_____" ], [ "df = terr[['Year','kill']].groupby(['Year']).sum()\nfig, ax4 = plt.subplots(figsize=(60,20))\ndf.plot(kind='bar',alpha=0.7,ax=ax4)\nplt.xticks(rotation = 60)\nplt.title(\"People Died Due To Attack\",fontsize=25)\nplt.ylabel(\"Number of killed peope\",fontsize=20)\nplt.xlabel('Year',fontsize=30)\ntop_side = ax4.spines[\"top\"]\ntop_side.set_visible(False)\nright_side = ax4.spines[\"right\"]\nright_side.set_visible(False)", "_____no_output_____" ], [ "terr['City'].value_counts().to_frame().sort_values('City',axis=0,ascending=False).head(10).plot(kind='bar',figsize=(30,20),color='red')\nplt.xticks(rotation = 30)\nplt.xlabel(\"City\",fontsize=35)\nplt.ylabel(\"Number of attack\",fontsize=35)\nplt.title(\"Top 10 most effected city\",fontsize=30)\nplt.show()", "_____no_output_____" ], [ "terr['Attacktype'].value_counts().plot(kind='bar',figsize=(30,30),color='purple')\nplt.xticks(rotation = 30)\nplt.xlabel(\"Attacktype\",fontsize=35)\nplt.ylabel(\"Number of attack\",fontsize=35)\nplt.title(\"Name of attacktype\",fontsize=30)\nplt.show()", "_____no_output_____" ], [ "terr[['Attacktype','Wound']].groupby([\"Attacktype\"],axis=0).sum().plot(kind='bar',figsize=(20,10),color=['green'])\nplt.xticks(rotation=50)\nplt.title(\"Number of wounded \",fontsize=20)\nplt.ylabel('Number of people',fontsize=15)\nplt.xlabel('Attack type',fontsize=15)\nplt.show()", "_____no_output_____" ], [ "terr[['Attacktype','kill']].groupby([\"Attacktype\"],axis=0).sum().plot(kind='bar',figsize=(20,10),color=['orange'])\nplt.xticks(rotation=50)\nplt.title(\"Number of killed \",fontsize=30)\nplt.ylabel('Number of people',fontsize=35)\nplt.xlabel('Attack type',fontsize=35)\nplt.show()", "_____no_output_____" ], [ "data=terr[['Group','Country','kill']]\ndata=data.groupby(['Group','Country'],axis=0).sum().sort_values('kill',ascending=False).drop('Unknown').reset_index().head(10)\ndata", "_____no_output_____" ], [ "CKill = terr.pivot_table(columns='Country', values='kill', aggfunc='sum')\nCKill", "_____no_output_____" ], [ "tKill = terr.pivot_table(columns='Attacktype', values='kill', aggfunc='sum')\ntKill", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7178b40db2a2469b106c2d41953310ae4e746d7
44,032
ipynb
Jupyter Notebook
notebooks/Model Preparation.ipynb
radroid/which-fish-webapp
2088431176b4e21095726c2c97cee407df121861
[ "MIT" ]
null
null
null
notebooks/Model Preparation.ipynb
radroid/which-fish-webapp
2088431176b4e21095726c2c97cee407df121861
[ "MIT" ]
null
null
null
notebooks/Model Preparation.ipynb
radroid/which-fish-webapp
2088431176b4e21095726c2c97cee407df121861
[ "MIT" ]
null
null
null
73.755444
27,016
0.769077
[ [ [ "# Lab 4 - Fish Classier deplolyed on Heroku.\n\nIn this notebook, we will explore, pre-process and prepare data for model training. I will be approachig the problem as a classification problem. \n\n# 1. Business Problem\nFish markets are competitive to work in. The cut-throat competitive environment breeds innovation and frugal techniques to maximize sales. One of the fish market leaders in Vancouver decided to use machine learning to determine the fish type. This will be beneficial for the business and customers in the long-run.\n\n# 2. Load Data", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport pathlib\nimport os\n\n\ndata_dir = '../data/'\nfilename = 'Fish.csv'\npath_to_data = pathlib.Path(os.path.join(data_dir, filename))\n\nif path_to_data.exists():\n data_df = pd.read_csv(path_to_data)\nelse:\n raise FileNotFoundError(f'Please check the file path, the location'\n f'provided does not contain any file with the name: {filename}')\n data_df = None\n \ndata_df.tail()", "_____no_output_____" ] ], [ [ "# 3. Exploratory Data Analysis", "_____no_output_____" ] ], [ [ "data_df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 159 entries, 0 to 158\nData columns (total 7 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Species 159 non-null object \n 1 Weight 159 non-null float64\n 2 Length1 159 non-null float64\n 3 Length2 159 non-null float64\n 4 Length3 159 non-null float64\n 5 Height 159 non-null float64\n 6 Width 159 non-null float64\ndtypes: float64(6), object(1)\nmemory usage: 8.8+ KB\n" ] ], [ [ "The dataset has `159 samples`, `7 variables`, the target variable for the model to be built will be column **`0` Species**.", "_____no_output_____" ] ], [ [ "data_df.describe()", "_____no_output_____" ], [ "# check the number of classes/species in the dataset.\nlen(data_df['Species'].value_counts())", "_____no_output_____" ] ], [ [ "> There are `7` different species (classes).", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\n\n# Plot a graph to visualize the number of samples for each dependent variable category.\nfig, ax = plt.subplots(figsize=(10,7))\n\n# Count the number samples for each category and plot.\ndata_df['Species'].value_counts().plot(kind=\"bar\", ax=ax)\n\n# Other formatting settings.\nax.set_title(\"Number of Examples by `Species` - Dependent Variable\",\n fontdict={\"size\": 24, \"color\": \"#838383\"}, pad=20, loc=\"left\");\n\nax.set_ylabel(\"Number of Samples\", fontdict={\"size\": 16, \"color\":\"#727272\"}, labelpad=20);\nplt.xticks(fontsize=16, weight=\"bold\", color=\"#525252\", rotation=0);", "_____no_output_____" ], [ "# Create a sweetviz report for a more detailed comparison of variables in the dataset.\nimport sweetviz as sv\nimport warnings\n\n\nwarnings.filterwarnings('ignore')\npath_to_report = 'SWEETVIZ_REPORT.html'\n\nif not pathlib.Path(path_to_report).exists():\n report = sv.analyze(data_df)\n\nreport.show_html()", "_____no_output_____" ] ], [ [ "# 4. Data Preparation\n\n", "_____no_output_____" ] ], [ [ "# Define x and y variables for the algorithm\nX = data_df.drop('Species', axis=1)\ny = data_df['Species']", "_____no_output_____" ] ], [ [ "Setting the train-test split at **`80-20`**.", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n\n\n# Splitting data into train and test datasets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=100, stratify=y)", "_____no_output_____" ] ], [ [ "# 4. Model Creation and Training\n\nConsidering the insights developed in [`3. Exploratory Data Analysis`](#eda) section, the following three models were chosen to tackle the classification problem.\n> **Linear Support Vector Classifier**", "_____no_output_____" ] ], [ [ "from sklearn.svm import SVC\n\n\n# Defining and fitting a SVM instance\nmodel = SVC(kernel='linear', random_state=100)\nmodel.fit(X_train, y_train)", "_____no_output_____" ] ], [ [ "# 5. Model Testing and Exporting", "_____no_output_____" ] ], [ [ "model.score(X_test, y_test)", "_____no_output_____" ] ], [ [ "The model accuracy is high enough to be deployed.", "_____no_output_____" ] ], [ [ "# Save model\nimport pickle\n\npath_to_model = pathlib.Path('../models/linear-svc-base.pkl')\n\nwith path_to_model.open('wb') as f:\n pickle.dump(model, f)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e717a976e9d535ea21dbeff3d51f5ba681eb153a
128,374
ipynb
Jupyter Notebook
doc/source/notebooks/hyperopt.ipynb
Dragoncall/GPflowOpt
f1fe129f240e4f24d6a4b641f838640b56f5961b
[ "Apache-2.0" ]
258
2017-04-28T15:47:35.000Z
2022-03-31T08:44:40.000Z
doc/source/notebooks/hyperopt.ipynb
yanpei18345156216/GPflowOpt
f1c268e6b5dc4d7f458e06c59095901d55b73c32
[ "Apache-2.0" ]
123
2017-04-28T22:20:47.000Z
2021-10-01T16:29:47.000Z
doc/source/notebooks/hyperopt.ipynb
yanpei18345156216/GPflowOpt
f1c268e6b5dc4d7f458e06c59095901d55b73c32
[ "Apache-2.0" ]
69
2017-06-06T00:18:01.000Z
2022-02-25T21:43:39.000Z
383.20597
42,922
0.918184
[ [ [ "# Bayesian Optimization of Hyperparameters\n*Vincent Dutordoir, Joachim van der Herten*", "_____no_output_____" ], [ "## Introduction\n\nThe paper *Practical Bayesian Optimization of Machine Learning algorithms* by Snoek et al. 2012 describes the \nuse of Bayesian optimization for hyperparameter optimization. In this paper, the (at the time) state-of-the-art test error for convolutional neural networks on the CIFAR-10 dataset was improved significantly by optimizing the parameters of the training process with Bayesian optimization.\n\nIn this notebook we demonstrate the principle by optimizing the starting point of the maximum likelihood estimation of a GP. Note that we use a GP to optimize the initial hyperparameter values of another GP. ", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\n\n# Loading airline data\nimport numpy as np \ndata = np.load('airline.npz')\nX_train, Y_train = data['X_train'], data['Y_train']\nD = Y_train.shape[1];", "_____no_output_____" ] ], [ [ "## Data set\n\nThe data to illustrate hyperparameter optimization is the well-known airline passenger volume data. It is a one-dimensional time series of the passenger volumes of airlines over time. We wish to use it to make forecasts. Plotting the data below, it is clear that the data contains a pattern.", "_____no_output_____" ] ], [ [ "fig = plt.figure()\nax = fig.add_subplot(111)\nax.set_xlabel('Time (years)')\nax.set_ylabel('Airline passengers ($10^3$)')\nax.plot(X_train.flatten(),Y_train.flatten(), c='b')\nax.set_xticklabels([1949, 1952, 1955, 1958, 1961, 1964])\nplt.tight_layout()", "_____no_output_____" ] ], [ [ "## Modeling\n\nTo forecast this timeseries, we will pick up its pattern using a Spectral Mixture kernel (Wilson et al, 2013). Essentially, this kernel is a sum of $Q$ products of Cosine and RBF kernels. For this one-dimensional problem each term of the sum has 4 hyperparameters. To account for the upward trend, we also add a Linear and a Bias kernel.", "_____no_output_____" ] ], [ [ "from gpflow.kernels import RBF, Cosine, Linear, Bias, Matern52\nfrom gpflow import transforms\nfrom gpflow.gpr import GPR\n\nQ = 10 # nr of terms in the sum\nmax_iters = 1000\n\n# Trains a model with a spectral mixture kernel, given an ndarray of 2Q frequencies and lengthscales\ndef create_model(hypers):\n f = np.clip(hypers[:Q], 0, 5)\n weights = np.ones(Q) / Q\n lengths = hypers[Q:]\n \n kterms = []\n for i in range(Q):\n rbf = RBF(D, lengthscales=lengths[i], variance=1./Q)\n rbf.lengthscales.transform = transforms.Exp()\n cos = Cosine(D, lengthscales=f[i])\n kterms.append(rbf * cos)\n \n k = np.sum(kterms) + Linear(D) + Bias(D)\n m = GPR(X_train, Y_train, kern=k)\n return m\n\nX_test, X_complete = data['X_test'], data['X_complete']\ndef plotprediction(m):\n # Perform prediction\n mu, var = m.predict_f(X_complete)\n \n # Plot\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel('Time (years)')\n ax.set_ylabel('Airline passengers ($10^3$)')\n ax.set_xticklabels([1949, 1952, 1955, 1958, 1961, 1964, 1967, 1970, 1973])\n ax.plot(X_train.flatten(),Y_train.flatten(), c='b')\n ax.plot(X_complete.flatten(), mu.flatten(), c='g')\n lower = mu - 2*np.sqrt(var)\n upper = mu + 2*np.sqrt(var)\n ax.plot(X_complete, upper, 'g--', X_complete, lower, 'g--', lw=1.2)\n ax.fill_between(X_complete.flatten(), lower.flatten(), upper.flatten(),\n color='g', alpha=.1)\n plt.tight_layout()\n\nm = create_model(np.ones((2*Q,)))\nm.optimize(maxiter=max_iters)\nplotprediction(m)", "_____no_output_____" ] ], [ [ "In total, a lot of hyperparameters must be optimized. Furthermore, the optimization surface of the spectral mixture is highly multimodal. Starting from the default hyperparameter values the optimized GP is able to pick up the linear trend, and the RBF kernels perform local interpolation. However, the kernel is not able to extrapolate away from the data. In sum, with this starting point, the likelihood optimization ends in a local minimum.\n\n## Hyperparameter optimization\n\nThis issue is a known problem of the spectram mixture kernel, and several recommendations exist on how to improve the starting point. Here, we will use GPflowOpt to optimize the initial values for the lengthscales of the RBF and the Cosine kernel (i.e., the frequencies of the latter kernel). The other hyperparameters (rbf and cosine variances, likelihood variances and the linear and bias terms) are kept at their defaults and will be optimized by the standard likelihood optimization.\n\nFirst, we setup the objective function accepting proposed starting points. The objective function returns the negative log likelihood, obtained by optimizing the hyperparameters from the given starting point. Then, we setup the 20D optimization domain for the frequencies and lengthscales.", "_____no_output_____" ] ], [ [ "from gpflowopt.domain import ContinuousParameter\nfrom gpflowopt.objective import batch_apply\n\n# Objective function for our optimization\n# Input: N x 2Q ndarray, output: N x 1.\n# returns the negative log likelihood obtained by training with given frequencies and rbf lengthscales\n# Applies some tricks for stability similar to GPy's jitchol\n@batch_apply\ndef objectivefx(freq):\n m = create_model(freq) \n for i in [0] + [10**exponent for exponent in range(6,1,-1)]:\n try:\n mean_diag = np.mean(np.diag(m.kern.compute_K_symm(X_train)))\n m.likelihood.variance = 1 + mean_diag * i\n m.optimize(maxiter=max_iters)\n return -m.compute_log_likelihood()\n except:\n pass\n raise RuntimeError(\"Frequency combination failed indefinately.\")\n\n# Setting up optimization domain.\nlower = [0.]*Q\nupper = [5.]*int(Q)\ndf = np.sum([ContinuousParameter('freq{0}'.format(i), l, u) for i, l, u in zip(range(Q), lower, upper)])\n\nlower = [1e-5]*Q\nupper = [300]*int(Q)\ndl = np.sum([ContinuousParameter('l{0}'.format(i), l, u) for i, l, u in zip(range(Q), lower, upper)])\ndomain = df + dl\ndomain", "_____no_output_____" ] ], [ [ "High-dimensional Bayesian optimization is tricky, although the complexity of the problem is significantly reduced due to symmetry in the optimization domain (interchanging frequencies does not make a difference) and because we still optimize the likelihood given the starting point. Therefore, getting near a mode is sufficient. Furthermore we disable ARD of the kernel of the model approximating the objective function to avoid optimizing a lot of lengthscales with little data. We then use EI to pick new candidate starting points and evaluate our objective.", "_____no_output_____" ] ], [ [ "from gpflowopt.design import LatinHyperCube\nfrom gpflowopt.acquisition import ExpectedImprovement\nfrom gpflowopt import optim, BayesianOptimizer\ndesign = LatinHyperCube(6, domain)\nX = design.generate()\n\nY = objectivefx(X)\nm = GPR(X, Y, kern=Matern52(domain.size, ARD=False))\nei = ExpectedImprovement(m)\nopt = optim.StagedOptimizer([optim.MCOptimizer(domain, 5000), optim.SciPyOptimizer(domain)])\noptimizer = BayesianOptimizer(domain, ei, optimizer=opt)\nwith optimizer.silent():\n result = optimizer.optimize(objectivefx, n_iter=24)", "Warning: inf or nan in gradient: replacing with zeros\nWarning: inf or nan in gradient: replacing with zeros\nWarning: inf or nan in gradient: replacing with zeros\nWarning: inf or nan in gradient: replacing with zeros\n" ], [ "m = create_model(result.x[0,:])\nm.optimize()\nplotprediction(m)", "_____no_output_____" ] ], [ [ "Clearly, the optimization point identified with BO is a lot better than the default values. We now obtain a proper forecasting. By inspecting the evolution of the best likelihood value obtained so far, we see the solution is identified quickly.", "_____no_output_____" ] ], [ [ "f, axes = plt.subplots(1, 1, figsize=(7, 5))\nf = ei.data[1][:,0]\naxes.plot(np.arange(0, ei.data[0].shape[0]), np.minimum.accumulate(f))\naxes.set_ylabel('fmin')\naxes.set_xlabel('Number of evaluated points');", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
e717d0d89312b7c1f3edfbefa451146e0cdccd72
9,390
ipynb
Jupyter Notebook
Coursera/Serverless Machine Learning with Tensorflow on Google Cloud Platform/Lab 2 Getting Started with TensorFlow/a_tfstart.ipynb
helpthx/Path_through_Data_Science_2019
aa22333eae970506f2ce184551c55565b0be89fb
[ "MIT" ]
2
2019-02-06T09:30:44.000Z
2019-02-09T18:24:46.000Z
Coursera/Serverless Machine Learning with Tensorflow on Google Cloud Platform/Lab 2 Getting Started with TensorFlow/a_tfstart.ipynb
helpthx/Path_through_Data_Science_2019
aa22333eae970506f2ce184551c55565b0be89fb
[ "MIT" ]
11
2019-06-22T00:58:03.000Z
2019-07-27T14:59:21.000Z
Coursera/Serverless Machine Learning with Tensorflow on Google Cloud Platform/Lab 2 Getting Started with TensorFlow/a_tfstart.ipynb
helpthx/Path_through_Data_Science_2019
aa22333eae970506f2ce184551c55565b0be89fb
[ "MIT" ]
1
2020-12-03T21:10:43.000Z
2020-12-03T21:10:43.000Z
27.781065
553
0.531523
[ [ [ "<h1> Getting started with TensorFlow </h1>\n\nIn this notebook, you play around with the TensorFlow Python API.", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nimport numpy as np\n\nprint(tf.__version__)", "/usr/local/envs/py3env/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n" ] ], [ [ "<h2> Adding two tensors </h2>\n\nFirst, let's try doing this using numpy, the Python numeric package. numpy code is immediately evaluated.", "_____no_output_____" ] ], [ [ "a = np.array([5, 3, 8])\nb = np.array([3, -1, 2])\nc = np.add(a, b)\nprint(c)", "[ 8 2 10]\n" ] ], [ [ "The equivalent code in TensorFlow consists of two steps:\n<p>\n<h3> Step 1: Build the graph </h3>", "_____no_output_____" ] ], [ [ "a = tf.constant([5, 3, 8])\nb = tf.constant([3, -1, 2])\nc = tf.add(a, b)\nprint(c)", "Tensor(\"Add:0\", shape=(3,), dtype=int32)\n" ] ], [ [ "c is an Op (\"Add\") that returns a tensor of shape (3,) and holds int32. The shape is inferred from the computation graph.\n\nTry the following in the cell above:\n<ol>\n<li> Change the 5 to 5.0, and similarly the other five numbers. What happens when you run this cell? </li>\n<li> Add an extra number to a, but leave b at the original (3,) shape. What happens when you run this cell? </li>\n<li> Change the code back to a version that works </li>\n</ol>\n\n<p/>\n<h3> Step 2: Run the graph", "_____no_output_____" ] ], [ [ "with tf.Session() as sess:\n result = sess.run(c)\n print(result)", "[ 8 2 10]\n" ] ], [ [ "<h2> Using a feed_dict </h2>\n\nSame graph, but without hardcoding inputs at build stage", "_____no_output_____" ] ], [ [ "a = tf.placeholder(dtype=tf.int32, shape=(None,)) # batchsize x scalar\nb = tf.placeholder(dtype=tf.int32, shape=(None,))\nc = tf.add(a, b)\nwith tf.Session() as sess:\n result = sess.run(c, feed_dict={\n a: [3, 4, 5],\n b: [-1, 2, 3]\n })\n print(result)", "[2 6 8]\n" ] ], [ [ "<h2> Heron's Formula in TensorFlow </h2>\n\nThe area of triangle whose three sides are $(a, b, c)$ is $\\sqrt{s(s-a)(s-b)(s-c)}$ where $s=\\frac{a+b+c}{2}$ \n\nLook up the available operations at https://www.tensorflow.org/api_docs/python/tf", "_____no_output_____" ] ], [ [ "def compute_area(sides):\n # slice the input to get the sides\n a = sides[:,0] # 5.0, 2.3\n b = sides[:,1] # 3.0, 4.1\n c = sides[:,2] # 7.1, 4.8\n \n # Heron's formula\n s = (a + b + c) * 0.5 # (a + b) is a short-cut to tf.add(a, b)\n areasq = s * (s - a) * (s - b) * (s - c) # (a * b) is a short-cut to tf.multiply(a, b), not tf.matmul(a, b)\n return tf.sqrt(areasq)\n\nwith tf.Session() as sess:\n # pass in two triangles\n area = compute_area(tf.constant([\n [5.0, 3.0, 7.1],\n [2.3, 4.1, 4.8]\n ]))\n result = sess.run(area)\n print(result)", "[6.278497 4.709139]\n" ] ], [ [ "<h2> Placeholder and feed_dict </h2>\n\nMore common is to define the input to a program as a placeholder and then to feed in the inputs. The difference between the code below and the code above is whether the \"area\" graph is coded up with the input values or whether the \"area\" graph is coded up with a placeholder through which inputs will be passed in at run-time.", "_____no_output_____" ] ], [ [ "with tf.Session() as sess:\n sides = tf.placeholder(tf.float32, shape=(None, 3)) # batchsize number of triangles, 3 sides\n area = compute_area(sides)\n result = sess.run(area, feed_dict = {\n sides: [\n [5.0, 3.0, 7.1],\n [2.3, 4.1, 4.8]\n ]\n })\n print(result)", "[6.278497 4.709139]\n" ] ], [ [ "## tf.eager\n\ntf.eager allows you to avoid the build-then-run stages. However, most production code will follow the lazy evaluation paradigm because the lazy evaluation paradigm is what allows for multi-device support and distribution. \n<p>\nOne thing you could do is to develop using tf.eager and then comment out the eager execution and add in the session management code.\n\n<b> To run this block, you must first reset the notebook using Reset on the menu bar, then run this block only. </b>", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nfrom tensorflow.contrib.eager.python import tfe\n\ntfe.enable_eager_execution()\n\ndef compute_area(sides):\n # slice the input to get the sides\n a = sides[:,0] # 5.0, 2.3\n b = sides[:,1] # 3.0, 4.1\n c = sides[:,2] # 7.1, 4.8\n \n # Heron's formula\n s = (a + b + c) * 0.5 # (a + b) is a short-cut to tf.add(a, b)\n areasq = s * (s - a) * (s - b) * (s - c) # (a * b) is a short-cut to tf.multiply(a, b), not tf.matmul(a, b)\n return tf.sqrt(areasq)\n\narea = compute_area(tf.constant([\n [5.0, 3.0, 7.1],\n [2.3, 4.1, 4.8]\n ]))\n\nprint(area)", "/usr/local/envs/py3env/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n" ] ], [ [ "Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e717e7d1bcab26d09698ecb21ffd898425605b5c
48,944
ipynb
Jupyter Notebook
src/main/app-resources/notebook/libexec/input.ipynb
ec-better/ewf-ethz-03-01-01
5ca616e5c25bbba29013a7de248af4b69757921b
[ "Apache-2.0" ]
1
2021-09-23T02:20:11.000Z
2021-09-23T02:20:11.000Z
src/main/app-resources/notebook/libexec/input.ipynb
ec-better/ewf-ethz-03-01-01
5ca616e5c25bbba29013a7de248af4b69757921b
[ "Apache-2.0" ]
null
null
null
src/main/app-resources/notebook/libexec/input.ipynb
ec-better/ewf-ethz-03-01-01
5ca616e5c25bbba29013a7de248af4b69757921b
[ "Apache-2.0" ]
null
null
null
33.754483
342
0.535919
[ [ [ "## ETHZ-03-01-01 - Mapping landslides with Deep Learning algorithms applied to EO data", "_____no_output_____" ], [ "This application takes Sentinel-2 and ALO DEM to generate a landslide mask", "_____no_output_____" ], [ "### <a name=\"service\">Service definition", "_____no_output_____" ] ], [ [ "service = dict([('title', 'Landslide mapping with Deep Learning algorithms applied to EO data'),\n ('abstract', 'This application takes Sentinel-2 and ALO DEM to generate a landslide mask'),\n ('id', 'ewf-notebook-stagein-2')])", "_____no_output_____" ] ], [ [ "### Parameter Definition", "_____no_output_____" ] ], [ [ "#model = dict([('id', 'model'),\n# ('value', 'Oregon'),\n# ('title', 'Chose the model trained in: Oregon or Bhutan'),\n# ('abstract', 'Set the value of WKT Polygon')])", "_____no_output_____" ], [ "model = dict([('id', 'model'),\n ('value', 'Bhutan'),\n ('title', 'Chose the model trained in: Oregon or Bhutan'),\n ('abstract', 'Set the value of WKT Polygon')])", "_____no_output_____" ], [ "#aoi = dict([('id', 'aoi'),\n# ('value', 'POLYGON ((456037.4350113738 4837015.1639622, 456037.4350113738 4855174.452151849, 425620.7027991246 4855174.452151849, 425620.7027991246 4837015.1639622, 456037.4350113738 4837015.1639622))'),\n# ('title', 'WKT Polygon for the Bounding Box in EPSG:32610 (Oregon model) or EPSG:32646 (Bhutan model) '),\n# ('abstract', 'Set the value of WKT Polygon according to the model chosen')])", "_____no_output_____" ], [ "aoi = dict([('id', 'aoi'),\n ('value', 'POLYGON ((231100.37601192091824487 3032459.54463443346321583, 208550.73706856259377673 3032867.78160779643803835, 208550.73706856259377673 3097148.90503269853070378, 231100.37601192091824487 3096740.66805933555588126, 231100.37601192091824487 3032459.54463443346321583))'),\n ('title', 'WKT Polygon for the Bounding Box in EPSG:32610 (Oregon model) or EPSG:32646 (Bhutan model) '),\n ('abstract', 'Set the value of WKT Polygon according to the model chosen')])", "_____no_output_____" ] ], [ [ "### <a name=\"runtime\">Runtime parameter definition", "_____no_output_____" ], [ "**Input references**\n\nThis is the Sentinel-1 stack catalogue references", "_____no_output_____" ] ], [ [ "#input_references = ('https://catalog.terradue.com/sentinel2/search?format=atom&uid=S2B_MSIL1C_20200412T185909_N0209_R013_T10TDP_20200412T221853', 'https://catalog.terradue.com/alos-dem/search?format=atom&uid=N043W124')", "_____no_output_____" ], [ "input_references = ('https://catalog.terradue.com/sentinel2/search?format=atom&uid=S2B_MSIL1C_20191116T044039_N0208_R033_T46RBR_20191116T072925', 'https://catalog.terradue.com/alos-dem/search?format=atom&uid=N027E090') ", "_____no_output_____" ] ], [ [ "**Data path**\n\nThis path defines where the data is staged-in. ", "_____no_output_____" ] ], [ [ "data_path = \"/workspace/data\"", "_____no_output_____" ] ], [ [ "**Local path**\n\nThis path defines where the full path to each data product. ", "_____no_output_____" ] ], [ [ "import os", "_____no_output_____" ], [ "#local_data = ('/workspace/data/S2B_MSIL1C_20200412T185909_N0209_R013_T10TDP_20200412T221853', '/workspace/data/N043W124')", "_____no_output_____" ], [ "local_data = ('/workspace/data/S2B_MSIL1C_20191116T044039_N0208_R033_T46RBR_20191116T072925', '/workspace/data/N027E090')", "_____no_output_____" ] ], [ [ "## <a name=\"workflow\">Workflow", "_____no_output_____" ], [ "#### Import the packages required for processing the data", "_____no_output_____" ] ], [ [ "import os\nimport sys\nimport shutil\nsys.path.append(os.getcwd())\nsys.path.append('/application/notebook/libexec/')\n\nimport helpers as nx\n\n# Bin folder for the SAGA-GIS installation\nSAGA_PATH = \"/usr/local/bin\" \nSAGA_PATH = \"/home/davidcordeiro/saga/bin\" \nOG_PATH = os.environ['PATH']\nif SAGA_PATH not in OG_PATH:\n os.environ['PATH'] = \"{}:{}\".format(SAGA_PATH, OG_PATH)\n\nimport cioppy\nciop = cioppy.Cioppy()", "_____no_output_____" ] ], [ [ "## <a name=\"stage-in\">Stage-In", "_____no_output_____" ] ], [ [ "for path, reference in zip(local_data, input_references):\n search = ciop.search(end_point = reference,\n params = [],\n output_fields='platform', \n model='EOP')\n platform = search[0]['platform']\n if platform == 'S2B' or platform == 'S2A':\n SOURCE_SENTINEL_FOLDER = path\n elif platform == 'ALOS':\n SOURCE_DEM_FOLDER = path", "_____no_output_____" ] ], [ [ "##### DEM input", "_____no_output_____" ] ], [ [ "dsm_identifier = [f for f in os.listdir(SOURCE_DEM_FOLDER) if (os.path.isfile(os.path.join(SOURCE_DEM_FOLDER, f)) and f.endswith('DSM.tif'))]\nSOURCE_DEM_PATH = os.path.join(SOURCE_DEM_FOLDER, dsm_identifier[0])", "_____no_output_____" ] ], [ [ "##### Sentinel input", "_____no_output_____" ] ], [ [ "s2_identifier = os.path.basename(SOURCE_SENTINEL_FOLDER)\nSOURCE_S2_SAFE = os.path.join(SOURCE_SENTINEL_FOLDER, s2_identifier + \".SAFE\")\ns2_granule = os.path.join(SOURCE_S2_SAFE, 'GRANULE')\ns2_interfolder = [name for name in os.listdir(s2_granule)][0]\nSOURCE_S2_IMAGE_FOLDER = os.path.join(s2_granule, s2_interfolder, 'IMG_DATA')", "_____no_output_____" ] ], [ [ "## Pre-Processing constants", "_____no_output_____" ] ], [ [ "region = model['value']\nif region == 'Oregon':\n PROJECT_EPSG = 32610\n model_name = 'trainedModelOregon.hdf5'\n S2_RESOLUTION = [7]\n WORK_RES = 27\nelif region == 'Bhutan':\n PROJECT_EPSG = 32646\n model_name = 'trainedModelBhutan.hdf5'\n S2_RESOLUTION = [10]\n WORK_RES = 30", "_____no_output_____" ] ], [ [ "##### Folders paths", "_____no_output_____" ] ], [ [ "FEATURES_PATH = os.path.join(data_path, 'derivedFeatures', 'features')\nOPTICAL_PATH = os.path.join(data_path, 'derivedFeatures', 'fromOptical')\nDEM_PATH = os.path.join(data_path, 'derivedFeatures', 'multiScaleDEM')\nBOUNDARY_PATH = os.path.join(data_path, 'derivedFeatures', 'boundary')\nAOI_SHP_PATH = os.path.join(data_path, 'aoi', 'aoi.shp')\nBBOX_SHP_PATH = os.path.join(data_path, 'aoi', 'bbox.shp')", "_____no_output_____" ] ], [ [ "##### Ensure that all the folders exists", "_____no_output_____" ] ], [ [ "if os.path.isdir(os.path.dirname(FEATURES_PATH)):\n shutil.rmtree(os.path.dirname(FEATURES_PATH))\nif os.path.isdir(os.path.dirname(BBOX_SHP_PATH)):\n shutil.rmtree(os.path.dirname(BBOX_SHP_PATH))\nnx.ensure_dir(os.path.join(FEATURES_PATH, \"dummy\"))\nnx.ensure_dir(os.path.join(OPTICAL_PATH, \"dummy\"))\nnx.ensure_dir(os.path.join(DEM_PATH, \"dummy\"))\nnx.ensure_dir(os.path.join(BOUNDARY_PATH, \"dummy\"))\nnx.ensure_dir(BBOX_SHP_PATH)", "_____no_output_____" ] ], [ [ "##### DEM Scale Factors", "_____no_output_____" ] ], [ [ "NO_DATA_VALUE_DEM = -9999\nRESAMPLING_FACTOR_DEM = [1, 2, 3, 4]", "_____no_output_____" ] ], [ [ "##### Sentinel-2 Images Parameters", "_____no_output_____" ] ], [ [ "split_id = s2_identifier.split(\"_\")\nIMG_STRING = \"_\".join([split_id[-2], split_id[2]])", "_____no_output_____" ], [ "CREATE_RGB_RESCALED = True\nR_MIN_MAX_CH = [ 300 , 1200, 'R' ]\nB_MIN_MAX_CH = [ 800 , 1200, 'B' ]\nG_MIN_MAX_CH = [ 450 , 1100, 'G' ]\nIR_MIN_MAX_CH= [ 400 , 3800, '' ]", "_____no_output_____" ] ], [ [ "##### Wetness connected", "_____no_output_____" ] ], [ [ "WETNESS_T = 9\nCENTRE_OF_PIXEL_DISTANCE = 5 \nN_CONNECTED_PIXEL_MIN = 125\nCLOSE_T = 3", "_____no_output_____" ] ], [ [ "##### Region type", "_____no_output_____" ] ], [ [ "REGION_TYPE = 'Pred'", "_____no_output_____" ] ], [ [ "##### Pre-Processing outputs", "_____no_output_____" ] ], [ [ "region = model['value']\nif region == 'Oregon':\n inputImages = [[ os.path.join(FEATURES_PATH, 'DEM_007_HILLSHADE.tif'), 1/255.0 ],\n [ os.path.join(FEATURES_PATH, 'DEM_007_HILLSHADE_45.tif'), 1/255.0 ],\n [ os.path.join(FEATURES_PATH, 'DEM_007_HILLSHADE_180.tif'), 1/255.0 ],\n [ os.path.join(FEATURES_PATH, 'DEM_007_ROUGHNESS.tif'), 1/150.0 ],\n\n [ os.path.join(FEATURES_PATH, 'DEM_014_SLOPE.tif'), 1/90.0 ], \n\n [ os.path.join(FEATURES_PATH, 'DEM_027_WETNESS_connected.tif'), 1/255.0 ],\n\n [ os.path.join(OPTICAL_PATH, 'S2_NDVI_007_UINT8.tif'), 1/255.0 ],\n\n [ os.path.join(OPTICAL_PATH, 'S2_B03_007_UINT8.tif'), 1/255.0 ],\n [ os.path.join(OPTICAL_PATH, 'S2_B04_007_UINT8.tif'), 1/255.0 ],\n [ os.path.join(OPTICAL_PATH, 'S2_B08_007_UINT8.tif'), 1/255.0 ]]\n \nelif region == 'Bhutan':\n inputImages = [[ os.path.join(FEATURES_PATH, 'DEM_030_HILLSHADE.tif'), 1/255.0 ],\n [ os.path.join(FEATURES_PATH, 'DEM_030_HILLSHADE_45.tif'), 1/255.0 ],\n [ os.path.join(FEATURES_PATH, 'DEM_030_HILLSHADE_180.tif'), 1/255.0 ],\n [ os.path.join(FEATURES_PATH, 'DEM_030_ROUGHNESS.tif'), 1/150.0 ],\n\n [ os.path.join(FEATURES_PATH, 'DEM_030_SLOPE.tif'), 1/60.0 ], \n\n [ os.path.join(FEATURES_PATH, 'DEM_030_WETNESS_connected.tif'), 1/255.0 ],\n\n [ os.path.join(OPTICAL_PATH, 'S2_NDVI_010_UINT8.tif'), 1/255.0 ],\n\n [ os.path.join(OPTICAL_PATH, 'S2_B02_010_UINT8.tif'), 1/255.0 ],\n [ os.path.join(OPTICAL_PATH, 'S2_B03_010_UINT8.tif'), 1/255.0 ],\n [ os.path.join(OPTICAL_PATH, 'S2_B04_010_UINT8.tif'), 1/255.0 ],\n [ os.path.join(OPTICAL_PATH, 'S2_B08_010_UINT8.tif'), 1/255.0 ]]\n\nDL_INPUT_PATH = [ ]\nDL_SCALE_PARAMETER = [ ]\n\nfor i, j in inputImages:\n DL_INPUT_PATH.append(i)\n DL_SCALE_PARAMETER.append(j)", "_____no_output_____" ] ], [ [ "## Multi-resolution DEM", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom osgeo import gdal \nimport cv2\nimport math", "_____no_output_____" ], [ "ciop.log (\"INFO\", \"Conversion of WKT to SHP\")\ninterpolation_method = cv2.INTER_LINEAR\ndemPath_diff_px = os.path.join(DEM_PATH, 'temp_diff_px.tif')\ndemPath_xeqy = os.path.join(DEM_PATH, 'temp_xeqy.tif')\n\nnx.wkt2shp(aoi['value'], PROJECT_EPSG, BBOX_SHP_PATH, True)", "_____no_output_____" ], [ "ciop.log (\"INFO\", \"Intersecting DEM data with SHP\")\ngdalwrapCmd = \"/opt/anaconda/envs/p36-ethz-03-01-01/bin/gdalwarp -cutline \" + BBOX_SHP_PATH + \\\n \" -crop_to_cutline -t_srs EPSG:\" + str(PROJECT_EPSG) + \\\n \" -r bilinear -dstnodata \" + str(NO_DATA_VALUE_DEM) + \\\n \" -of GTiff -overwrite \" + str(SOURCE_DEM_PATH) + \" \" + str(demPath_diff_px) \nos.system(gdalwrapCmd)", "_____no_output_____" ], [ "region = model['value']\nif region == 'Oregon':\n gdaltranslateCmd = \"/opt/anaconda/envs/p36-ethz-03-01-01/bin/gdal_translate -tr 27 27 {} {}\"\nelif region == 'Bhutan':\n gdaltranslateCmd = \"/opt/anaconda/envs/p36-ethz-03-01-01/bin/gdal_translate -tr 30 30 {} {}\"\n\nos.system(gdaltranslateCmd.format(str(demPath_diff_px), str(demPath_xeqy)))", "_____no_output_____" ], [ "def get_image_transform(image_path):\n # Get information from image\n dataset = gdal.Open(image_path, gdal.GA_ReadOnly)\n projection = dataset.GetProjection()\n\n geotransform = dataset.GetGeoTransform()\n xSize = dataset.RasterXSize\n ySize = dataset.RasterYSize\n xResolution = geotransform[1]\n yResolution = geotransform[5]\n\n image = dataset.ReadAsArray()\n image = np.float32(image)\n image[image == NO_DATA_VALUE_DEM] = np.nan\n return image, geotransform, projection", "_____no_output_____" ], [ "diff_px_image, diff_px_transform, projection = get_image_transform(demPath_diff_px)\neq_px_image, eq_px_transform, projection = get_image_transform(demPath_xeqy)", "_____no_output_____" ], [ "def get_transformation_resolution(resolution):\n return str(math.ceil(resolution)).zfill(3)", "_____no_output_____" ], [ "ciop.log (\"INFO\", \"Resampling DEM data\")\nfor scale in RESAMPLING_FACTOR_DEM:\n image = eq_px_image if scale == 1 else diff_px_image\n geotransform = eq_px_transform if scale == 1 else diff_px_transform\n #https://medium.com/@wenrudong/what-is-opencvs-inter-area-actually-doing-282a626a09b3\n newData = cv2.resize(image, None, fx = scale, fy = scale, interpolation=interpolation_method) \n \n newGeotransform = list(geotransform)\n newGeotransform[1] = geotransform[1]/scale\n newGeotransform[5] = geotransform[5]/scale\n newGeotransform = tuple(newGeotransform)\n filename_template = 'DEM_{}.{}'\n newFileName = os.path.join(DEM_PATH, filename_template.format(get_transformation_resolution(newGeotransform[1]), 'tif'))\n nx.writeNumpyArr2Geotiff(newFileName, newData, geoTransform = newGeotransform, projection = projection, GDAL_dtype = gdal.GDT_Int16, noDataValue = -9999)\n print(\"Written:\", newFileName)\n newFileNameSaga = os.path.join(DEM_PATH, filename_template.format(get_transformation_resolution(newGeotransform[1]), 'sdat'))\n saga_convert = \"/opt/anaconda/envs/p36-ethz-03-01-01/bin/gdal_translate -of SAGA {} {}\".format(newFileName, newFileNameSaga)\n print(\"Running: {}\".format(saga_convert))\n os.system(saga_convert)", "_____no_output_____" ] ], [ [ "## Features from DEM", "_____no_output_____" ] ], [ [ "ciop.log (\"INFO\", \"Computing Hillshade, Roughness and Slope using GDAL\")\nrasterPaths = nx.getResolution(DEM_PATH, return_full_paths = True)\noptGDAL = ['hillshade', 'roughness', 'slope']\narguments = ['', '', '']\n\nfor rasterPath in rasterPaths:\n if not os.path.isfile(rasterPath):\n print('Unable to open input file ' + rasterPath)\n continue\n basename = '{}_{}{}'.format(os.path.basename(rasterPath)[:-4], '{}', '{}')\n for opt, arg in zip(optGDAL, arguments):\n outputPath = os.path.join(FEATURES_PATH, basename.format(opt.upper(), '.tif'))\n if outputPath in DL_INPUT_PATH:\n commandGDAL = '/opt/anaconda/envs/p36-ethz-03-01-01/bin/gdaldem ' + opt + ' ' + rasterPath + ' ' + outputPath + ' ' + str(arg)\n print('Running: ' + commandGDAL)\n os.system(commandGDAL)\n \n if opt == 'hillshade':\n for az in [45, 180]:\n variant_ext = \"_{}.{}\".format(str(az), \"tif\")\n outputPath = os.path.join(FEATURES_PATH, basename.format(opt.upper(), variant_ext))\n if outputPath in DL_INPUT_PATH:\n commandGDAL = '/opt/anaconda/envs/p36-ethz-03-01-01/bin/gdaldem ' + opt + ' ' + rasterPath + ' -az ' + str(az) + ' ' + outputPath + ' ' + str(arg)\n print('Running: ' + commandGDAL)\n os.system(commandGDAL)\n", "_____no_output_____" ], [ "ciop.log (\"INFO\", \"Computing Wetness using SAGA-GIS\")\n\ncommandCompound = \"saga_cmd ta_compound 0\"\noutputArgsCompound = ['-WETNESS']\n\nfor rasterPath in rasterPaths:\n INPUT = nx.joinStrArg( ' -ELEVATION' , rasterPath[:-4] + '.sgrd') \n outPath = []\n newArgs = []\n for arg in outputArgsCompound:\n partial_filename = 'DEM_{}'.format(str(WORK_RES).zfill(3))\n tmp_path = os.path.join(FEATURES_PATH, os.path.basename(rasterPath)[:-4] + '_' + arg[1:] + '.sdat')\n\n if partial_filename in os.path.basename(rasterPath)[:-4]:\n newArgs.append(arg)\n outPath.append(tmp_path)\n ARG = ''\n for x, y in zip(newArgs, outPath):\n ARG = nx.joinStrArg(ARG, nx.joinStrArg(x,y))\n if ARG:\n print(\"Running: \" + commandCompound + INPUT + ARG )\n os.system(commandCompound + INPUT + ARG)", "_____no_output_____" ] ], [ [ "## Rasterize AoI", "_____no_output_____" ] ], [ [ "from osgeo import gdal, ogr, osr\ngdal.UseExceptions()", "_____no_output_____" ], [ "resolution = nx.getResolution(DEM_PATH)\nnx.wkt2shp(aoi['value'], PROJECT_EPSG, AOI_SHP_PATH)\nvectorPath = AOI_SHP_PATH", "_____no_output_____" ], [ "ciop.log (\"INFO\", \"Rasterizing the Area of Interest\")\nfor res in resolution:\n rasterPath = os.path.join(DEM_PATH, 'DEM_' + str(res).zfill(3) + '.tif')\n outputPath = os.path.join(BOUNDARY_PATH, REGION_TYPE + '_' + str(res).zfill(3) + '.tif')\n\n try:\n ds = gdal.Open(rasterPath)\n except RuntimeError:\n print('Unable to open input file')\n sys.exit(1)\n\n geoTransform = ds.GetGeoTransform()\n projection = ds.GetProjection()\n srs = osr.SpatialReference(wkt=projection)\n nscn, npix = ds.RasterYSize, ds.RasterXSize \n if srs.IsProjected():\n print('PCS: ', srs.GetAttrValue('projcs'))\n else:\n print('GCS: ', srs.GetAttrValue('geogcs'))\n\n vs = ogr.Open(vectorPath)\n layer = vs.GetLayer()\n\n ds_new = gdal.GetDriverByName('GTiff').Create(outputPath, npix, nscn, 1, gdal.GDT_Byte)\n ds_new.SetGeoTransform(ds.GetGeoTransform())\n ds_new.SetProjection(ds.GetProjection())\n\n outBand = ds_new.GetRasterBand(1)\n outBand.Fill(0)\n ds_new.GetRasterBand(1).SetNoDataValue(0)\n\n # Rasterize the shapefile layer to our new dataset\n status = gdal.RasterizeLayer(ds_new, # output to our new dataset\n [1], # output to our new dataset's first band\n layer, # rasterize this layer\n None, None, # don't worry about transformations since we're in same projection\n [1], # burn value 1\n ['ALL_TOUCHED=TRUE']) # rasterize all pixels touched by polygons\n\n # Close dataset\n ds_new.FlushCache()\n ds_new = None\n outBand = None\n\n ds = None\n", "_____no_output_____" ] ], [ [ "## Crop Sentinel-2 Raster to DEM extent", "_____no_output_____" ] ], [ [ "TMP_S2_WORKSPACE = os.path.join(data_path, 'TMP_S2')\nif os.path.isdir(TMP_S2_WORKSPACE):\n shutil.rmtree(os.path.dirname(os.path.join(TMP_S2_WORKSPACE, \"dummy\")))\n", "_____no_output_____" ], [ "ciop.log (\"INFO\", \"Cropping Sentinel-2 Raster to DEM extent\")\nfor res in S2_RESOLUTION:\n\n #destRaster = os.path.join(DEM_PATH, 'DEM_' + str(res).zfill(3) + '.tif')\n destRaster = os.path.join(BOUNDARY_PATH, REGION_TYPE + '_' + str(res).zfill(3) + '.tif')\n noData = '-srcnodata 0 -dstnodata 0'\n inputOutputList = []\n for band in ['_B02', '_B03', '_B04', '_B08']:\n source_in = os.path.join(SOURCE_S2_IMAGE_FOLDER, IMG_STRING + band + '.jp2')\n opt_feat = os.path.join(OPTICAL_PATH, 'S2' + band + '_' + str(res).zfill(3) + '.tif')\n inputOutputList.append([source_in, opt_feat])\n \n \n raster_ds = gdal.Open(destRaster, gdal.GA_ReadOnly)\n \n projection = raster_ds.GetProjection()\n geoTransform = raster_ds.GetGeoTransform()\n epsg = int(nx.wkt2EPSG(projection).split(':')[1])\n \n tgt_srs = osr.SpatialReference()\n tgt_srs.ImportFromWkt(projection)\n \n cornerPoints = nx.getCornerCoordinates(raster_ds, False)\n \n npixDEM = raster_ds.RasterXSize\n nscnDEM = raster_ds.RasterYSize\n \n dem = nx.readGDAL2numpy(destRaster)\n \n # create ring\n ring = ogr.Geometry(ogr.wkbLinearRing)\n for point in cornerPoints:\n lat = point[0]\n long = point[1]\n ring.AddPoint(lat,long)\n \n #add first point again to close ring\n ring.AddPoint(cornerPoints[0][0], cornerPoints[0][1])\n \n #add ring to polygon\n extentPoly = ogr.Geometry(ogr.wkbPolygon)\n extentPoly.AddGeometry(ring)\n del ring\n \n ##Define a geographic coordinate system\n #gcs = osr.SpatialReference()\n #gcs.ImportFromEPSG(epsg)\n \n ##Assign Spatial Refrence to polygon\n extentPoly.AssignSpatialReference(tgt_srs)\n \n nx.ensure_dir(os.path.join(TMP_S2_WORKSPACE, \"dummy\"))\n \n driver = ogr.GetDriverByName('Esri Shapefile')\n ds = driver.CreateDataSource(os.path.join(TMP_S2_WORKSPACE, 'demExtent_EPSG' + str(epsg) + '.shp'))\n layer = ds.CreateLayer('', None, ogr.wkbPolygon)\n \n # Add an attribute\n layer.CreateField(ogr.FieldDefn('id', ogr.OFTInteger))\n defn = layer.GetLayerDefn()\n \n # Create a new feature\n feat = ogr.Feature(defn)\n feat.SetField('id', 123)\n feat.SetGeometry(extentPoly)\n \n layer.CreateFeature(feat)\n \n feat = None\n ds = layer = feat = None\n \n tgt_srs.MorphToESRI()\n \n file = open(os.path.join(TMP_S2_WORKSPACE, 'demExtent_EPSG' + str(epsg) + '.prj'), 'w')\n file.write(tgt_srs.ExportToWkt())\n file.close()\n \n for raster2clip, outputRasterFileName in inputOutputList:\n if outputRasterFileName is None or outputRasterFileName == '':\n outputRasterFileName = str(raster2clip[:-4]) + '_reProjected.tif'\n \n assert os.path.exists(raster2clip)\n \n cmdStr = '/opt/anaconda/envs/p36-ethz-03-01-01/bin/gdalwarp -overwrite -t_srs EPSG:' + str(epsg) + ' -r cubic ' + noData + ' -multi -q -cutline ' + os.path.join(TMP_S2_WORKSPACE, 'demExtent_EPSG' + str(epsg) + '.shp') + ' -crop_to_cutline -tr ' \\\n + str(geoTransform[1]) + ' ' + str(geoTransform[5]) + ' -of GTiff ' + str(raster2clip) + ' ' + str(outputRasterFileName)\n print('EXECUTE >> ' + str(cmdStr))\n os.system(cmdStr)\n nx.resizeToDEM(outputRasterFileName, (nscnDEM, npixDEM), geoTransform, projection, noData = 0)\n print\n shutil.rmtree(os.path.dirname(os.path.join(TMP_S2_WORKSPACE, \"dummy\")))", "_____no_output_____" ] ], [ [ "## Sentinel-2 Derive from Optical", "_____no_output_____" ] ], [ [ "ciop.log (\"INFO\", \"Computing optical features from Sentinel-2\")\nfor res in S2_RESOLUTION:\n BLUE = os.path.join(OPTICAL_PATH, 'S2_B02_' + str(res).zfill(3) + '.tif')\n GREEN = os.path.join(OPTICAL_PATH, 'S2_B03_' + str(res).zfill(3) + '.tif')\n RED = os.path.join(OPTICAL_PATH, 'S2_B04_' + str(res).zfill(3) + '.tif')\n IR = os.path.join(OPTICAL_PATH, 'S2_B08_' + str(res).zfill(3) + '.tif')\n \n def convert2Float(im):\n im = np.float32(im)\n im[im == 0] = np.nan\n return im \n \n def convert2Uint16(im):\n im[im is np.nan] = 0\n return np.uint16(im)\n \n def writeNumpyArr2Geotiff_RGBA(outputPath, data, geoTransform = None, projection = None, GDAL_dtype = gdal.GDT_Byte, noDataValue = None):\n b, g, r, a = data\n nscn, npix = b.shape\n \n if np.isnan(data).any() and noDataValue is not None:\n data[np.isnan(data)] = noDataValue\n \n ds_new = gdal.GetDriverByName('GTiff').Create(outputPath, npix, nscn, 1, GDAL_dtype)\n \n if geoTransform != None:\n ds_new.SetGeoTransform(geoTransform)\n \n if projection != None:\n ds_new.SetProjection(projection) \n \n outBand = ds_new.GetRasterBand(1)\n outBand.WriteArray(data)\n \n if noDataValue != None:\n ds_new.GetRasterBand(1).SetNoDataValue(noDataValue)\n \n # Close dataset\n ds_new.FlushCache()\n ds_new = None\n outBand = None\n \n \n ir, geoTransform, projection = nx.readGDAL2numpy(IR, True)\n ir = convert2Float(ir)\n \n b = convert2Float(nx.readGDAL2numpy(BLUE, False))\n g = convert2Float(nx.readGDAL2numpy(GREEN, False))\n r = convert2Float(nx.readGDAL2numpy(RED, False))\n \n ndvi = (ir - r)/(ir + r)\n \n nx.writeNumpyArr2Geotiff(os.path.join(OPTICAL_PATH, 'S2_NDVI_' + str(res).zfill(3) + '.tif'), ndvi, geoTransform = geoTransform, \\\n projection = projection, GDAL_dtype = gdal.GDT_Float32, noDataValue = -9999)\n \n \n ir = convert2Uint16(ir)\n r = convert2Uint16(r)\n b = convert2Uint16(b)\n g = convert2Uint16(g)\n \n nscn, npix = b.shape\n ds_new = gdal.GetDriverByName('GTiff').Create(os.path.join(OPTICAL_PATH, 'S2_RGBA_' + str(res).zfill(3) + '.tif'), npix, nscn, 4, gdal.GDT_UInt16)\n \n if geoTransform != None:\n ds_new.SetGeoTransform(geoTransform)\n \n if projection != None:\n ds_new.SetProjection(projection) \n \n outBand = ds_new.GetRasterBand(1)\n outBand.WriteArray(ir)\n ds_new.GetRasterBand(1).SetNoDataValue(0)\n \n outBand = ds_new.GetRasterBand(2)\n outBand.WriteArray(r)\n ds_new.GetRasterBand(2).SetNoDataValue(0)\n \n outBand = ds_new.GetRasterBand(3)\n outBand.WriteArray(g)\n ds_new.GetRasterBand(3).SetNoDataValue(0)\n \n outBand = ds_new.GetRasterBand(4)\n outBand.WriteArray(b)\n ds_new.GetRasterBand(4).SetNoDataValue(0)\n \n # Close dataset\n ds_new.FlushCache()\n ds_new = None\n outBand = None", "_____no_output_____" ] ], [ [ "## Rescale Optical Bands", "_____no_output_____" ] ], [ [ "ciop.log (\"INFO\", \"Rescaling optical features\")\nfor res in S2_RESOLUTION:\n \n data = [ \n [os.path.join(OPTICAL_PATH, 'S2_B02_' + str(res).zfill(3) + '.tif'), B_MIN_MAX_CH[0] , B_MIN_MAX_CH[1], B_MIN_MAX_CH[2]],\n [os.path.join(OPTICAL_PATH, 'S2_B03_' + str(res).zfill(3) + '.tif'), G_MIN_MAX_CH[0] , G_MIN_MAX_CH[1], G_MIN_MAX_CH[2]],\n [os.path.join(OPTICAL_PATH, 'S2_B04_' + str(res).zfill(3) + '.tif'), R_MIN_MAX_CH[0] , R_MIN_MAX_CH[1], R_MIN_MAX_CH[2]],\n [os.path.join(OPTICAL_PATH, 'S2_B08_' + str(res).zfill(3) + '.tif'), IR_MIN_MAX_CH[0] , IR_MIN_MAX_CH[1], IR_MIN_MAX_CH[2]] \n ]\n \n data_ndvi_path = os.path.join(OPTICAL_PATH, 'S2_NDVI_' + str(res).zfill(3) + '.tif')\n \n \n ####################### ############ #######################\n ####################### Rescale NDVI #######################\n ####################### ############ #######################\n \n image, geoT, proj = nx.readGDAL2numpy(data_ndvi_path, True)\n \n image = (image * 100).astype(np.int16)\n image[image <= 0 ] = 0\n image[image >= 80] = 80\n image = image.astype(np.uint16)\n \n image = nx.map_uint16_to_uint8(image, 0, 80)\n \n nx.writeNumpyArr2Geotiff(data_ndvi_path[:-4] + '_UINT8.tif', image, geoTransform = geoT, \\\n projection = proj, GDAL_dtype = gdal.GDT_Byte, noDataValue = None)\n \n del image, data_ndvi_path, geoT, proj\n \n \n ####################### ############# #######################\n ####################### Rescale bands #######################\n ####################### ############# #######################\n for imPath, loVal, hiVal, ch in data:\n \n image, geoT, proj = nx.readGDAL2numpy(imPath, True)\n image = nx.map_uint16_to_uint8(image, loVal, hiVal)\n \n nx.writeNumpyArr2Geotiff(imPath[:-4] + '_UINT8.tif', image, geoTransform = geoT, \\\n projection = proj, GDAL_dtype = gdal.GDT_Byte, noDataValue = None)\n \n \n if ch == 'R':\n r = image.copy()\n geoT2 = geoT\n proj2 = proj\n if ch == 'G':\n g = image.copy()\n if ch == 'B':\n b = image.copy()\n \n \n createRGB = 0\n \n \n for imPath, loVal, hiVal, ch in data:\n if ch == 'R' or ch == 'B' or ch == 'G':\n createRGB += 1\n \n if CREATE_RGB_RESCALED: \n if createRGB == 3:\n createRGB = True\n print(\"RGB image will be created\")\n else:\n createRGB = False\n print(\"RBG channels not correctly defined. RGB image will not be created.\") \n else:\n createRGB = False\n \n \n \n if createRGB:\n nscn, npix = b.shape\n ds_new = gdal.GetDriverByName('GTiff').Create(os.path.join(OPTICAL_PATH, 'S2_RGB_' + str(res).zfill(3) + '_UINT8.tif'), npix, nscn, 3, gdal.GDT_Byte)\n \n if geoT2 != None:\n ds_new.SetGeoTransform(geoT2)\n \n if proj2 != None:\n ds_new.SetProjection(proj2) \n \n \n outBand = ds_new.GetRasterBand(1)\n outBand.WriteArray(r)\n # ds_new.GetRasterBand(1).SetNoDataValue(0)\n \n outBand = ds_new.GetRasterBand(2)\n outBand.WriteArray(g)\n # ds_new.GetRasterBand(2).SetNoDataValue(0)\n \n outBand = ds_new.GetRasterBand(3)\n outBand.WriteArray(b)\n # ds_new.GetRasterBand(3).SetNoDataValue(0)\n \n # Close dataset\n ds_new.FlushCache()\n ds_new = None\n outBand = None \n \n del createRGB", "_____no_output_____" ] ], [ [ "## Clean Optical Features", "_____no_output_____" ] ], [ [ "files = [os.path.join(OPTICAL_PATH, f) for f in os.listdir(OPTICAL_PATH) if (os.path.isfile(os.path.join(OPTICAL_PATH, f)) and os.path.join(OPTICAL_PATH, f) not in DL_INPUT_PATH)]\nfor file in files:\n os.remove(file)", "_____no_output_____" ] ], [ [ "## Wetness Threshold", "_____no_output_____" ] ], [ [ "from skimage import morphology", "_____no_output_____" ], [ "ciop.log (\"INFO\", \"Computing wetness feature\")\nfilename_template = 'DEM_{}_WETNESS{}'.format(str(WORK_RES).zfill(3), '{}')\n\nfile_id = '.sdat'\nfilename = filename_template.format(file_id)\nfile_path = os.path.join(FEATURES_PATH, filename)\n\nwetness, geoTransform, projection = nx.readGDAL2numpy(file_path, True)\nnscn, npix = wetness.shape\nwetness[np.isnan(wetness)] = 0\n\nwetness[wetness < WETNESS_T] = 0\nwetness[wetness >= WETNESS_T] = 1\nwetness = np.array(wetness, np.uint8)\n\nfile_id = '_threshold.tif'\nfilename = filename_template.format(file_id)\nfile_path = os.path.join(FEATURES_PATH, filename)\nnx.writeNumpyArr2Geotiff(file_path, wetness * 255, geoTransform = geoTransform, projection = projection, GDAL_dtype = gdal.GDT_Byte, noDataValue = 0)\n\nwetness = morphology.remove_small_objects(wetness.astype(np.bool), min_size = N_CONNECTED_PIXEL_MIN, connectivity=CENTRE_OF_PIXEL_DISTANCE).astype(np.uint8)\nwetness = nx.closeCV(wetness, CLOSE_T)\n\nfile_id = '_connected.tif'\nfilename = filename_template.format(file_id)\nfile_path = os.path.join(FEATURES_PATH, filename)\nnx.writeNumpyArr2Geotiff(file_path, wetness * 255, geoTransform = geoTransform, projection = projection, GDAL_dtype = gdal.GDT_Byte, noDataValue = 0)", "_____no_output_____" ] ], [ [ "## Classification parameters", "_____no_output_____" ] ], [ [ "region = model['value']\nif region == 'Oregon':\n RESOLUTION = 7\nelif region == 'Bhutan':\n RESOLUTION = 10\nOUTPUT_FOLDER_NAME = data_path #os.path.join(data_path, 'outputFolder')\nmask_template = \"{}_{}.tif\"\nBOUNDARY_MASK = os.path.join(BOUNDARY_PATH, mask_template.format(REGION_TYPE, str(RESOLUTION).zfill(3)))\nLOCAL_MODEL_PATH = \"./{}\".format(model_name)\nAPP_MODEL_PATH = \"/application/notebook/libexec/{}\".format(model_name)\nSCALE_FACTOR = 1\nTILE_SIZE = 224\nTHRESHOLD = 50", "_____no_output_____" ], [ "import itertools\nfrom tensorflow.keras import models\nfrom tensorflow.keras import backend as K\nfrom tensorflow.python.keras import optimizers\nfrom losses import BCE_F_TI_LOSS, MCC, PRED_AREA, POD, POFD ", "_____no_output_____" ], [ "def bbox(img):\n scn = np.any(img, axis=1)\n pix = np.any(img, axis=0)\n scnMin, scnMax = np.where(scn)[0][[0, -1]]\n pixMin, pixMax = np.where(pix)[0][[0, -1]]\n return [scnMin, scnMax, pixMin, pixMax]\n\n\ndef getBoundingBox(resolution, regionType = REGION_TYPE, returnBinaryMask = False):\n rasterPath = os.path.join(BOUNDARY_PATH, REGION_TYPE + '_' + str(resolution).zfill(3) + '.tif')\n maskImage = nx.readGDAL2numpy(rasterPath, False)\n\n yMin, yMax, xMin, xMax = bbox(maskImage)\n\n if returnBinaryMask:\n return yMin, yMax, xMin, xMax , maskImage[yMin : yMax, xMin : xMax]\n else:\n return yMin, yMax, xMin, xMax", "_____no_output_____" ] ], [ [ "### TODO: Check if all required pre-processed files are available", "_____no_output_____" ] ], [ [ "ciop.log (\"INFO\", \"Loading features into memory\")\ninputChannel = len(DL_INPUT_PATH)\n\nmask, geoTransform, projection = nx.readGDAL2numpy(BOUNDARY_MASK, return_geoInformation = True)\ninputImages = np.zeros([mask.shape[0], mask.shape[1], inputChannel], dtype = np.float32)\nfor i in range(inputChannel):\n inputImages[ : , : , i] = cv2.resize(nx.readGDAL2numpy(DL_INPUT_PATH[i], return_geoInformation = False), (mask.shape[1], mask.shape[0]), interpolation=cv2.INTER_CUBIC)\n ciop.log (\"INFO\", DL_INPUT_PATH[i] + \" loaded in Memory.\")\ndel i\n\nyMin, yMax, xMin, xMax, binaryMask = getBoundingBox(resolution = RESOLUTION, regionType = REGION_TYPE, returnBinaryMask = True)\n\ntestImage = inputImages[yMin : yMax, xMin : xMax, :]\n\ndel inputImages, mask", "_____no_output_____" ], [ "fetchSize = int(TILE_SIZE / 2)\n\nY = [y for y in range(fetchSize, testImage.shape[0] - fetchSize, fetchSize)]\nX = [x for x in range(fetchSize, testImage.shape[1] - fetchSize, fetchSize)]\n\n\nnewGeoTransform = nx.newGeoTransform(geoTransform, {'xMin' : xMin, 'yMin' : yMin})\nshrinkGeoTransform = nx.shrinkGeoTransform(newGeoTransform, 1/SCALE_FACTOR)", "_____no_output_____" ], [ "ciop.log (\"INFO\", \"Loading model into memory\")\nif os.path.isfile(LOCAL_MODEL_PATH):\n model = models.load_model(LOCAL_MODEL_PATH, compile=False)\nelif os.path.isfile(APP_MODEL_PATH):\n model = models.load_model(APP_MODEL_PATH, compile=False)\nelse:\n sys.exit()", "_____no_output_____" ], [ "make_loss = BCE_F_TI_LOSS()\nmodel_optimizer = optimizers.Adam()\n\n\nmodel.compile(optimizer=model_optimizer, loss=make_loss, metrics=[MCC, PRED_AREA, POD, POFD])\n \nshrinkLabelImageShape = [np.int(testImage.shape[0]/SCALE_FACTOR), np.int(testImage.shape[1]/SCALE_FACTOR)]\n\noutputTileSize = int(TILE_SIZE / 2 * SCALE_FACTOR)\noutputFetchSize = int(outputTileSize/2)\n\npredictMask = np.zeros(shrinkLabelImageShape, np.uint8)\npredictStable = np.zeros(shrinkLabelImageShape, np.uint8)", "_____no_output_____" ], [ "ciop.log (\"INFO\", \"Computing prediction\")\nimg_shape = (TILE_SIZE, TILE_SIZE, inputChannel)\n\n\nlocXY = itertools.product(X,Y)\nfor x, y in locXY:\n img = testImage[y - fetchSize : y + fetchSize, x - fetchSize : x + fetchSize, :] * DL_SCALE_PARAMETER\n img = img.reshape(1, img_shape[0], img_shape[1], inputChannel)\n predicted_label = model.predict(img)[0]\n \n predictMask[int(y/SCALE_FACTOR) - outputFetchSize : int(y/SCALE_FACTOR) + outputFetchSize, int(x/SCALE_FACTOR) - outputFetchSize : int(x/SCALE_FACTOR) + outputFetchSize] = predicted_label[ outputTileSize-outputFetchSize : outputTileSize+outputFetchSize , outputTileSize-outputFetchSize: outputTileSize+outputFetchSize, 0] * 100 \n", "_____no_output_____" ], [ "ciop.log (\"INFO\", \"Saving results into file\")\nfilename = \"{}_LS_PROBABILITY.tif\".format(IMG_STRING)\nnx.writeNumpyArr2Geotiff(filename, predictMask, shrinkGeoTransform, projection, GDAL_dtype = gdal.GDT_Byte, noDataValue = 0)\n\ndel model\nK.clear_session()\n\ndel testImage, predictMask, predictStable, binaryMask, img ", "_____no_output_____" ], [ "landslide_Predict = nx.readGDAL2numpy(filename, False)\nlandslide_Predict = np.where(landslide_Predict > THRESHOLD, 1, 0)\nfilename = \"{}_LS_PREDICTION.tif\".format(IMG_STRING)\nnx.writeNumpyArr2Geotiff(filename, landslide_Predict, shrinkGeoTransform, projection, GDAL_dtype = gdal.GDT_Byte, noDataValue = 0)", "_____no_output_____" ] ], [ [ "## Clean Workspace", "_____no_output_____" ] ], [ [ "CLEAN_PARENT_FOLDER = [FEATURES_PATH, AOI_SHP_PATH]\nfor folder in CLEAN_PARENT_FOLDER:\n shutil.rmtree(os.path.dirname(folder))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
e717ef709b3657e5de11bdc7405e4d8aadb712b4
8,317
ipynb
Jupyter Notebook
barcode_detection/annotate_images.ipynb
SavanK/BarcodeScanner
69d79aeb6d861b22cee159d37579a306b78c98a5
[ "Apache-2.0" ]
null
null
null
barcode_detection/annotate_images.ipynb
SavanK/BarcodeScanner
69d79aeb6d861b22cee159d37579a306b78c98a5
[ "Apache-2.0" ]
null
null
null
barcode_detection/annotate_images.ipynb
SavanK/BarcodeScanner
69d79aeb6d861b22cee159d37579a306b78c98a5
[ "Apache-2.0" ]
null
null
null
36.47807
146
0.48984
[ [ [ "Annotate images by clicking around the 1D barcodes in the images. \n1. User has to click on {top-left, top-right, bottom-right, bottom-left} corners (in any order) of the barcode to annotate the images\n2. Press key 'N' to move to next image\n3. Press key 'C' to clear the saved coordinates\n4. Press key 'S' to flush the saved data to 'annotations.csv' file\n5. Press key 'Q' to quit\n\nAnnotation includes - \n1. bounding boxes {top, left, bottom, right} in integers\n2. orientation angles in degrees w.r.t x-axis", "_____no_output_____" ], [ "Import statements", "_____no_output_____" ] ], [ [ "import cv2\nimport pandas as pd\nfrom os import walk\nfrom os import path\nimport numpy as np\nimport math\nfrom ipython_exit import exit", "_____no_output_____" ] ], [ [ "Function to collect the user clicked coordinates, convert them into bounding boxes and orientation angles and there by annotating the image.", "_____no_output_____" ] ], [ [ "def collect_barcodes_bbox_for(directory, file, window_name, next_image_callback, save_output_callback):\n def onclick(event, x, y, flags, params):\n if event == cv2.EVENT_LBUTTONDOWN:\n #print('point: (', x, ',', y, ')')\n\n # displaying the coordinates on the image window\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.circle(img, (x, y), 1, (0, 0, 255), cv2.FILLED)\n cv2.putText(img, str(x) + ',' +\n str(y), (x + 2, y - 2), font,\n 0.5, (0, 0, 255), 1)\n cv2.imshow(window_name, img)\n\n # save coordinates\n saved_coords.append((x, y))\n\n saved_coords = []\n img = cv2.imread(directory + file, 1)\n cv2.imshow(window_name, img)\n cv2.setMouseCallback(window_name, onclick)\n\n while 1:\n key = cv2.waitKey(0)\n if key == ord('c'):\n # clear saved coordinates\n saved_coords.clear()\n img = cv2.imread(directory + file, 1)\n cv2.imshow(window_name, img)\n continue\n elif key == ord('n'):\n bboxes = ''\n angles = ''\n for b in range(int(len(saved_coords) / 4)):\n coords = np.array([saved_coords[b * 4], saved_coords[b * 4 + 1], saved_coords[b * 4 + 2],\n saved_coords[b * 4 + 3]])\n # calulate bbox\n x_sorted_coords = coords[coords[:, 0].argsort()]\n y_sorted_coords = coords[coords[:, 1].argsort()]\n bbox = [y_sorted_coords[0][1], x_sorted_coords[0][0], y_sorted_coords[3][1], x_sorted_coords[3][0]]\n\n # calculate orientation angle\n point_x1 = np.array([(x_sorted_coords[0][0] + x_sorted_coords[1][0]) / 2,\n (x_sorted_coords[0][1] + x_sorted_coords[1][1]) / 2])\n point_x2 = np.array([(x_sorted_coords[2][0] + x_sorted_coords[3][0]) / 2,\n (x_sorted_coords[2][1] + x_sorted_coords[3][1]) / 2])\n point_y1 = np.array([(y_sorted_coords[0][0] + y_sorted_coords[1][0]) / 2,\n (y_sorted_coords[0][1] + y_sorted_coords[1][1]) / 2])\n point_y2 = np.array([(y_sorted_coords[2][0] + y_sorted_coords[3][0]) / 2,\n (y_sorted_coords[2][1] + y_sorted_coords[3][1]) / 2])\n\n if np.linalg.norm(point_x1 - point_x2) > np.linalg.norm(point_y1 - point_y2):\n opp = point_x2[1] - point_x1[1]\n base = point_x2[0] - point_x1[0]\n else:\n opp = point_y2[1] - point_y1[1]\n base = point_y2[0] - point_y1[0]\n\n if base == 0:\n base = 0.0001\n angle = math.degrees(math.atan(opp / base))\n\n if bboxes == '':\n bboxes = str(bbox)\n else:\n bboxes = ',' + bboxes.join(str(bbox))\n\n if angles == '':\n angles = str(angle)\n else:\n angles = ',' + angles.join(str(angle))\n\n saved_coords.clear()\n data = pd.DataFrame([[file, bboxes, angles]], columns=['file', 'bounding_box', 'orientation_angle'])\n\n # send result and ask to load next image\n next_image_callback(data)\n continue\n elif key == ord('s'):\n print('save')\n\n save_output_callback()\n continue\n elif key == ord('q'):\n print('quit')\n\n # close the window\n cv2.destroyAllWindows()\n exit()", "_____no_output_____" ] ], [ [ "Main function to load the test images and save annotations result to file", "_____no_output_____" ] ], [ [ "window = 'test-image'\nimages_folder = 'Muenster_Barcode_Database/N95-2592x1944_scaledTo640x480bilinear/'\noutput: pd.DataFrame = None\noutput_file = 'Muenster_Barcode_Database/annotations.csv'\n_, _, image_files = next(walk(images_folder))\nfile_index = 0\n\n\ndef next_image(data: pd.DataFrame):\n print('next image')\n global output\n global file_index\n\n if output is None:\n output = data\n else:\n output = output.append(data, ignore_index=True)\n\n if file_index < len(image_files):\n file_index += 1\n collect_barcodes_bbox_for(images_folder, image_files[file_index], window, next_image, save_output)\n\n\ndef save_output():\n print('write output to file')\n global output\n\n output.to_csv(output_file, index_label='index')\n\n\n# if outfile already present, then reload the output\n# dataframe and continue from last saved index\nif path.exists(output_file):\n output = pd.read_csv(output_file, index_col=0)\n file_index = output.tail(1).index[0]\n\nif file_index < len(image_files):\n file_index += 1\n collect_barcodes_bbox_for(images_folder, image_files[file_index], window, next_image, save_output)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e717f6b74cf3c66787f5f2a234738ddbbc0be6c5
151,028
ipynb
Jupyter Notebook
Machine learning/RR_MatteoMoratello_1205720.ipynb
Morat96/Academic-Repository
de4fdc676bfa67d1a456e39f6a9cfb164061e966
[ "AFL-1.1" ]
null
null
null
Machine learning/RR_MatteoMoratello_1205720.ipynb
Morat96/Academic-Repository
de4fdc676bfa67d1a456e39f6a9cfb164061e966
[ "AFL-1.1" ]
null
null
null
Machine learning/RR_MatteoMoratello_1205720.ipynb
Morat96/Academic-Repository
de4fdc676bfa67d1a456e39f6a9cfb164061e966
[ "AFL-1.1" ]
null
null
null
116.086088
29,356
0.840374
[ [ [ "# Regression on House Pricing Dataset: Variable Selection & Regularization\nWe consider a reduced version of a dataset containing house sale prices for King County, which includes Seattle. It includes homes sold between May 2014 and May 2015.\n\n[https://www.kaggle.com/harlfoxem/housesalesprediction]\n\nFor each house we know 18 house features (e.g., number of bedrooms, number of bathrooms, etc.) plus its price, that is what we would like to predict.", "_____no_output_____" ], [ "## TO DO 1: insert your ID number (\"numero di matricola\") below", "_____no_output_____" ] ], [ [ "#put here your ``numero di matricola''\nnumero_di_matricola = 1205720", "_____no_output_____" ], [ "#import all packages needed\n%matplotlib inline\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "Load the data, remove data samples/points with missing values (NaN) and take a look at them.", "_____no_output_____" ] ], [ [ "#load the data\ndf = pd.read_csv('kc_house_data.csv', sep = ',')\n\n#remove the data samples with missing values (NaN)\ndf = df.dropna() \n\ndf.describe()", "_____no_output_____" ] ], [ [ "Extract input and output data. We want to predict the price by using features other than id as input.", "_____no_output_____" ] ], [ [ "Data = df.values\n\n# m = number of input samples\nm = 3164\nY = Data[:m,2]\nX = Data[:m,3:]\n", "_____no_output_____" ] ], [ [ "## Data Pre-Processing\n\nSplit the data into training set of $m_{train}=50$ samples, validation set of $m_{val}$ samples and a test set of $m_{test}:=m-m_{train}-m_{val}$ samples.", "_____no_output_____" ] ], [ [ "# Split data into train (50 samples) and test data (the rest)\nm_train = 20\n\nm_test = m - m_train \nfrom sklearn.model_selection import train_test_split\n\nXtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=m_test/m, random_state=numero_di_matricola)\n", "_____no_output_____" ] ], [ [ "Standardize the data.", "_____no_output_____" ] ], [ [ "# Data pre-processing\nfrom sklearn import preprocessing\nscaler = preprocessing.StandardScaler().fit(Xtrain)\nXtrain_scaled = scaler.transform(Xtrain)\nXtest_scaled = scaler.transform(Xtest)", "_____no_output_____" ] ], [ [ "## Linear Regression with Squared Loss Solution\n\nNow compute the solution for linear regression with squared loss (i.e., the Least-Squares estimate) using LinearRegression() in Scikit-learn, and print the corresponding average loss in training and test data.\n\nSince the average loss can be quite high, we also compute the coefficient of determination $R^2$ and look at $1 - R^{2}$ to have an idea of what the average loss amounts to. To compute the coefficient of determination you can use the \"score(...)\" function.", "_____no_output_____" ] ], [ [ "# Least-Squares\nfrom sklearn import linear_model \n#LR the linear regression model\nLR = linear_model.LinearRegression()\n\n#fit the model on training data\nLR.fit(Xtrain_scaled, Ytrain)\n\n#obtain predictions on training data\nYtrain_predicted = LR.predict(Xtrain_scaled)\n\n#obtain predictions on test data\nYtest_predicted = LR.predict(Xtest_scaled)\n\n#coefficients from the model\nw_LR = np.hstack((LR.intercept_, LR.coef_))\n\n#average error in training data\nloss_train = np.linalg.norm(Ytrain - Ytrain_predicted)**2/m_train\n\n#average error in test data\nloss_test = np.linalg.norm(Ytest - Ytest_predicted)**2/m_test\n\n#print average loss in training data and in test data\nprint(\"Average loss in training data:\"+str(loss_train))\nprint(\"Average loss in test data:\"+str(loss_test))\n\n#print 1 - coefficient of determination in training data and in test data\nprint(\"1 - coefficient of determination on training data:\"+str(1 - LR.score(Xtrain_scaled,Ytrain)))\nprint(\"1 - coefficient of determination on test data:\"+str(1 - LR.score(Xtest_scaled,Ytest)))", "Average loss in training data:2753423367.642774\nAverage loss in test data:305201847503.98944\n1 - coefficient of determination on training data:0.052346987946457646\n1 - coefficient of determination on test data:2.096129824603785\n" ] ], [ [ "### Confidence Intervals\n\nWe now compute the confidence interval for each coefficient.", "_____no_output_____" ] ], [ [ "# Least-Squares: Confidence Intervals\nfrom scipy.stats import t\n\nXtrain_im_testrcept = np.hstack((np.ones((Xtrain_scaled.shape[0],1)), Xtrain_scaled))\n\n#alpha for confidence im_testrvals\nalpha = 0.05\n\nd = Xtrain_scaled.shape[1]-1\n\n#quantile from t-student distribution\ntperc = t.ppf(1-alpha/2, m_train-d-1, loc=0, scale=1)\nsigma2 = np.linalg.norm(Ytrain-Ytrain_predicted)**2/(m_train-d-1)\n\nR = np.dot(Xtrain_im_testrcept.transpose(),Xtrain_im_testrcept)\nUr, Sr, Vr = np.linalg.svd(R, full_matrices=1, compute_uv=1)\n\n\nSri = 1/Sr\nSri = Sri*(Sri<1e10)\n\n#print(Sri)\n\nRi2 = np.dot(Ur,np.dot(np.diag(Sri),np.transpose(Ur)))\n\n#print(np.diag(Ri2))\n#print(sigma2)\nv = np.sqrt(np.diag(Ri2))\nDelta = np.sqrt(sigma2)*v*tperc\nCI = np.transpose(np.vstack((w_LR,w_LR))) + np.transpose(np.vstack((-Delta,+Delta) ))", "_____no_output_____" ] ], [ [ "Plot the LS coefficients and their confidence im_testrval.", "_____no_output_____" ] ], [ [ "# Plot confidence im_testrvals\nplt.figure(1)\nplt.plot(w_LR[1:], 'r', marker='o', ms=7.0)\nplt.plot(CI[1:,0], 'b--')\nplt.plot(CI[1:,1], 'b--')\nplt.plot(np.zeros(w_LR.shape[0],), 'k', linewidth=2.0)\nplt.xlabel('Coefficient Index')\nplt.ylabel('LR Coefficient')\nplt.title('Coefficients and Confidence Sets')\nplt.show()", "_____no_output_____" ] ], [ [ "### Question: based on the results above, if you had to choose at most 4 features for a linear regression model, which ones would you choose? Why?\nBased on the null hypothesis testing I would choose the features at index 3, 7, 14 and 16 because i consider the only ones that have the confidence interval which do not include the origin, and therefore are relevant for the prediction.\nIn particular the features with index 14, 16 and 3 (see code below) clearly do not include the origin, instead the feature with index 7 seem the most relevant of the remainings which all include 0 in their CI.\n### TO DO 2\nAnswer the question above (max 5 lines)", "_____no_output_____" ] ], [ [ "f = [4,8,15,17]\nfor w in range(len(f)):\n print(\"feature %i\"%(f[w]-1))\n print(CI[f[w]])", "feature 3\n[-1229794.32631503 1127671.89832095]\nfeature 7\n[-453017.85363505 517521.40591213]\nfeature 14\n[-265409.93518166 603491.34552557]\nfeature 16\n[-715546.95619975 553218.80710621]\n" ] ], [ [ "## Best-Subset Selection\n\nSplit the (previous) training data (i.e., the 50 samples chosen above) into a training data and validation dataset to perform best-subset selection. For splitting, put 50% of the data into the validation set.\n\nFor $k$ going from 1 to $n_{sub}=4$:\n1. Compute the best model for all the possible subsets of $k$ features\n2. Compute the prediction error on the validation dataset\n\nFinally we choose the subset of $k^*$ features giving the lowest validation error.\n", "_____no_output_____" ] ], [ [ "import itertools\nimport math \n\nm_trainBSS=int(math.ceil(m_train/2))\nm_valBSS=m_train-m_trainBSS\n\n\nXtrain_BSS = Xtrain_scaled[:m_trainBSS,:]\nYtrain_BSS = Ytrain[:m_trainBSS]\nXval_BSS = Xtrain_scaled[m_trainBSS:,:]\nYval_BSS = Ytrain[m_trainBSS:,]\n\nnsub = 4\nfeatures_idx_dict = {}\nvalidation_err_dict = {}\nvalidation_err_min = np.zeros(nsub,)\nvalidation_err_min_idx = np.zeros(nsub, dtype=np.int64)\nfor k in range(1,nsub+1):\n features_idx = list(itertools.combinations(range(Xtrain_BSS.shape[1]),k))\n validation_error = np.zeros(len(features_idx),)\n for j in range(len(features_idx)):\n LR_subset = linear_model.LinearRegression()\n LR_subset.fit(Xtrain_BSS[:,features_idx[j]], Ytrain_BSS)\n validation_error[j] = np.linalg.norm(Yval_BSS - LR_subset.predict(Xval_BSS[:,features_idx[j]]))**2/m_valBSS \n validation_err_min[k-1] = np.min(validation_error) \n validation_err_min_idx[k-1] = np.argmin(validation_error)\n features_idx_dict.update({k: features_idx})\n validation_err_dict.update({k: validation_error})\n\nprint(\"Validation error as a function of k (starting at k=2): \"+str(validation_err_min))", "Validation error as a function of k (starting at k=2): [7.77449742e+10 6.91638219e+10 6.43047537e+10 5.24460525e+10]\n" ] ], [ [ "Plot the validation error as a function of the number of retained features.", "_____no_output_____" ] ], [ [ "# Plot\nplt.figure(2)\nfor k in range(1,nsub+1):\n plt.scatter(k*np.ones(validation_err_dict[k].shape), validation_err_dict[k], color='k', alpha=0.5)\n #plt.scatter(k, validation_err_min[k-1], color='r', alpha=0.8)\n if k > 1:\n plt.plot([k-1, k], [validation_err_min[k-2], validation_err_min[k-1]], color='r',marker='o', \n markeredgecolor='k', markerfacecolor = 'r', markersize = 10)\nplt.xlabel('Number of retained features')\nplt.ylabel('Avg. validation error')\nplt.title('Best-Subset Selection')\nplt.show()", "_____no_output_____" ] ], [ [ "Compute the model using the selected subset of features.", "_____no_output_____" ], [ "### TO DO 3: pick the number of features for the best subset according to figure above, learn the model on the entire training data (i.e., the 50 samples chosen at the beginnin), and compute score on training and on test data", "_____no_output_____" ] ], [ [ "LR_best_subset = linear_model.LinearRegression()\n\n# now pick the number of features according to best subset\nopt_num_features = np.argmin(validation_err_min)+1\n\n#opt_features_idx contains the indices of the features from best subset\nopt_features_idx = features_idx[validation_err_min_idx[3]]\n\n#let's print the indices of the features from best subset\nprint(opt_features_idx)\n\n#fit the best subset on the entire training set\nLR_best_subset.fit(Xtrain_scaled[:,opt_features_idx], Ytrain)\n\n#obtain predictions on training data\nYtrain_predicted_best_subset = LR_best_subset.predict(Xtrain_scaled[:,opt_features_idx])\n\n#obtain predictions on test data\nYtest_predicted_best_subset = LR_best_subset.predict(Xtest_scaled[:,opt_features_idx])\n\n#average loss in training data\nloss_train_best_subset = np.linalg.norm(Ytrain - Ytrain_predicted_best_subset)**2/m_train\n\n#average loss in test data\nloss_test_best_subset = np.linalg.norm(Ytest - Ytest_predicted_best_subset)**2/m_test\n\n#print average loss in training data and in test data\nprint(\"Average loss in training data:\"+str(loss_train_best_subset))\nprint(\"Average loss in test data:\"+str(loss_test_best_subset))\n\n#now print 1- the coefficient of determination on training and on test data to get an idea to what the average\n#loss corresponds to\nprint(\"1 - coefficient of determination of best subset on training data: \"+str(1 - LR_best_subset.score(Xtrain_scaled[:,opt_features_idx],Ytrain)))\nprint(\"1 - coefficient of determination of best subset on test data: \"+str(1 - LR_best_subset.score(Xtest_scaled[:,opt_features_idx],Ytest)))", "(0, 2, 7, 14)\nAverage loss in training data:13177418592.58141\nAverage loss in test data:72744636871.77332\n1 - coefficient of determination of best subset on training data: 0.25052383165536496\n1 - coefficient of determination of best subset on test data: 0.49961100882557175\n" ] ], [ [ "### TO DO 4: do the features from best subset selection correspond to the ones you would have chosen based on confidence intervals for the linear regression coefficients? Comment (max 5 lines)", "_____no_output_____" ], [ "The features are very similar, the only different one is the first one: 3 with confidence intervals and 6 with best subset selection. Using the features chosen based on confidence intervals leads to have an average test loss is a bit worst than using the best subset selection. This mean that the four features above describe very well the prediction of houses price. A better result maybe can be obtained in terms of test loss using k-fold cross-validation instead of a simple validation technique because the data we use is very limited.", "_____no_output_____" ], [ "## Lasso\n\n### TO DO 5\nUse the routine *lasso_path* from *sklearn.linear_regression* to compute the \"lasso path\" for different values of the regularization parameter $\\lambda$. You should first fix a grid a possible values of lambda (the variable \"lasso_lams\"). For each entry of the vector \"lasso_lams\" you should compute the corresponding model (The i-th column of the vector \"lasso_coefs\" should contain the coefficients of the linear model computed using lasso_lams[i] as regularization parameter).\n\nBe careful that the grid should be chosen appropriately.\n\nNote that the parameter $\\lambda$ is called $\\alpha$ in the Lasso model from sklearn\n", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import lasso_path\n\n# select a grid of possible regularization parameters \n# (be carefull how this is chosen, you may have to refine the choice after having seen the results)\n\n#Note: lasso_lams is supposed to be a numpy array\nlasso_lams = np.logspace(1, 5, num = 150)\n# Use the function lasso_path to compute the \"lasso path\", passing in input the lambda values\n# you have specified in lasso_lams\nlasso_lams, lasso_coefs, _ = lasso_path(Xtrain_scaled,Ytrain,alphas=lasso_lams)", "_____no_output_____" ] ], [ [ "Evaluate the sparsity in the estimated coefficients as a function of the regularization parameter $\\lambda$: to this purpose, compute the number of non-zero entries in the estimated coefficient vector.", "_____no_output_____" ] ], [ [ "l0_coef_norm = np.zeros(len(lasso_lams),)\n\nfor i in range(len(lasso_lams)):\n l0_coef_norm[i] = sum(lasso_coefs[:,i]!=0)\n\n \nplt.figure(6)\nplt.plot(lasso_lams, l0_coef_norm, marker='o', markersize=5)\nplt.xlabel('Lambda')\nplt.ylabel('Number of non-zero coefficients')\nplt.title('Sparsity Degree')\nplt.show()", "_____no_output_____" ] ], [ [ "### TO DO 6: explain the results you observe in the figure above (max 5 lines)\n\nAs expected, increasing the value of lambda adds bias to the estimation and this leads to reduce the complexity of ||<b>w</b>|| decreasing the number of non-zero coefficients, thus simplifying the model. Using a logarithmic grid with 150 values of lambda allowed to find at least one lambda value for each number of non-zero coefficients.", "_____no_output_____" ], [ "### TO DO 7: Use k-fold Cross-Validation to fix the regularization parameter\n\nUse the scikit-learn built-in routine *Lasso* (from the *linear_regression* package) to compute the lasso coefficients.\n\nUse *KFold* from *sklearn.cross_validation* to split the data (i.e. Xtrain_scaled and Ytrain) into the desired number of folds.\n\nThe pick $lam\\_opt$ to be the chosen value for the regularization parameter.", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import KFold\nfrom sklearn.linear_model import Lasso\n\nnum_folds = 5\n\nkf = KFold(n_splits = num_folds)\n\n#loss_ridge_kfold will contain the value of the loss\nloss_lasso_kfold = np.zeros(len(lasso_lams),)\n\nfor i in range(len(lasso_lams)):\n \n #define a lasso model using Lasso() for the i-th value of lam_values\n lasso_kfold = Lasso(alpha=lasso_lams[i],max_iter=20000)\n for train_index, validation_index in kf.split(Xtrain_scaled):\n Xtrain_kfold, Xval_kfold = Xtrain_scaled[train_index], Xtrain_scaled[validation_index]\n Ytrain_kfold, Yval_kfold = Ytrain[train_index], Ytrain[validation_index]\n \n #learn the model using the training data from the k-fold\n \n lasso_kfold.fit(Xtrain_kfold, Ytrain_kfold)\n \n #compute the loss using the validation data from the k-fold\n \n Yval_kfold_predicted = lasso_kfold.predict(Xval_kfold)\n loss_lasso_kfold[i] += np.linalg.norm(Yval_kfold - lasso_kfold.predict(Xval_kfold))**2/Yval_kfold.shape[0]\n #loss_lasso_kfold[i] = np.linalg.norm(Yval_kfold - Yval_kfold_predicted)**2\n\n \n# loss_lass_kfold shoulw be the average loss observed in the folds\nloss_lasso_kfold /= num_folds\n\n#choose the regularization parameter that minimizes the loss\nlasso_lam_opt = lasso_lams[np.argmin(loss_lasso_kfold)]\nprint(\"Best value of the regularization parameter:\", lasso_lam_opt)", "/anaconda3/lib/python3.7/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n" ] ], [ [ "Plot the Cross-Validation estimate of the prediction error as a function of the regularization parameter", "_____no_output_____" ] ], [ [ "plt.figure(4)\nplt.xscale('log')\nplt.plot(lasso_lams, loss_lasso_kfold, color='b')\nplt.scatter(lasso_lams[np.argmin(loss_lasso_kfold)], loss_lasso_kfold[np.argmin(loss_lasso_kfold)], color='b', marker='o', linewidths=5)\nplt.xlabel('Lambda')\nplt.ylabel('Validation Error')\nplt.title('Lasso: choice of regularization parameter')\nplt.show()\nprint(\"Total number of coefficients:\"+str(len(lasso_kfold.coef_)))\n#print(\"Number of non-zero coefficients:\"+str(sum(lasso_kfold.coef_ != 0)))\nprint(\"Number of non-zero coefficients:\"+str(l0_coef_norm[np.nonzero(lasso_lams==lasso_lam_opt)[0][0]]))\nprint(\"Best value of regularization parameter:\"+str(lasso_lam_opt))", "_____no_output_____" ] ], [ [ "### TO DO 8 now estimate the lasso coefficients using all the training data and the optimal regularization parameter (chosen at previous step)", "_____no_output_____" ] ], [ [ "# Estimate Lasso Coefficients with all data (trainval) for the the optimal value lasso_lam_opt of the regularization paramter\n\n#define the model\nlasso_reg = linear_model.Lasso(alpha=lasso_lam_opt)\n\n#fit using the training data\n\nlasso_reg.fit(Xtrain_scaled,Ytrain)\npredict_train = lasso_reg.predict(Xtrain_scaled)\npredict_test = lasso_reg.predict(Xtest_scaled)\n\n#average loss on training data\nloss_train_lasso = np.linalg.norm(Ytrain - predict_train)**2/m_train\n#average loss on test data\nloss_test_lasso = np.linalg.norm(Ytest - predict_test)**2/m_test\n\n#print average loss in training data and in test data\nprint(\"Average loss in training data:\"+str(loss_train_lasso))\nprint(\"Average loss in test data:\"+str(loss_test_lasso))\n\n#now print 1- the coefficient of determination on training and on test data to get an idea to what the average\n#loss corresponds to\nprint(\"1 - coefficient of determination on training data:\"+str(1 - lasso_reg.score(Xtrain_scaled,Ytrain)))\nprint(\"1 - coefficient of determination on test data:\"+str(1 - lasso_reg.score(Xtest_scaled,Ytest)))", "Average loss in training data:39688172913.18077\nAverage loss in test data:114620294261.59744\n1 - coefficient of determination on training data:0.754535729418834\n1 - coefficient of determination on test data:0.7872135089334813\n" ] ], [ [ "Compare the LR and the Lasso coefficients.", "_____no_output_____" ] ], [ [ "# Compare LR and lasso coefficients\nind = np.arange(1,len(LR.coef_)+1) # the x locations for the groups\nwidth = 0.35 # the width of the bars\nfig, ax = plt.subplots()\nrects1 = ax.bar(ind, LR.coef_, width, color='r')\nrects2 = ax.bar(ind + width, lasso_reg.coef_, width, color='y')\nax.legend((rects1[0], rects2[0]), ('LR', 'Lasso'))\nplt.xlabel('Coefficient Idx')\nplt.ylabel('Coefficient Value')\nplt.title('LR and Lasso Coefficient')\nplt.show()", "_____no_output_____" ] ], [ [ "## Ridge Regression\n\n## TO DO 9\n### Use Ridge regression with cross-validation\n\nWe perform Ridge regression (i.e., linear regression with squared loss and L2 regularization) for different values of the regularization parameter $\\alpha$ (called $\\lambda$ in class), and use the Scikit-learn function to perform cross-validation (CV).\n\nIn Ridge regression for scikit learn, the objective function is:\n\n$$\n ||y - Xw||^2_2 + \\alpha * ||w||^2_2\n$$\n\nIn the code below:\n- use RidgeCV() to select the best value of $\\alpha$ with a 5-fold CV with L2 penalty;\n- use Ridge() to learn the best model for the best $\\alpha$ for ridge regression using the entire training set (i.e., the 50 samples chosen at the beginning)\n\nNote that RidgeCV() picks some default values of $\\alpha$ to try, but we decide to pass the same values used for the Lasso.\n\n\n", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import RidgeCV\nfrom sklearn.linear_model import Ridge\n\n#let's define the values of alpha to use\nridge_alphas = lasso_lams = np.logspace(1, 5, num = 150)\n\n#define the model using RidgeCV passing the vector of alpha values and the cv value (= number of folds)\nridge = RidgeCV(alphas=ridge_alphas,cv=5)\n\n#fit the model on training data\n\nridge.fit(Xtrain_scaled,Ytrain)\n\n# the attribute 'alpha_' contains the best value of alpha as identified by cross-validation;\n# let's print it\n\nprint(\"Best value of parameter alpha according to 5-fold Cross-Validation: \"+str(ridge.alpha_))\n\n#define the model using the best alpha; note that various solvers are availalbe, choose\n# an appropriate one\nridge_final = Ridge(alpha=ridge.alpha_,solver='lsqr')\n\n#fit the model using the best C on the entire training set\n\nridge_final.fit(Xtrain_scaled,Ytrain)\npredict_train = ridge_final.predict(Xtrain_scaled)\npredict_test = ridge_final.predict(Xtest_scaled)\n\n#average loss on training data\nloss_train_ridge = np.linalg.norm(Ytrain - predict_train)**2/m_train\n\n#average loss on test data\nloss_test_ridge = np.linalg.norm(Ytest - predict_test)**2/m_test\n\n#print average loss in training data and in test data\nprint(\"Average loss in training data:\"+str(loss_train_ridge))\nprint(\"Average loss in test data:\"+str(loss_test_ridge))\n\n#now print 1- the coefficient of determination on training and on test data to get an idea to what the average\n#loss corresponds to\nprint(\"1 - coefficient of determination on training data:\"+str(1 - ridge_final.score(Xtrain_scaled,Ytrain)))\nprint(\"1 - coefficient of determination on test data:\"+str(1 - ridge_final.score(Xtest_scaled,Ytest)))", "Best value of parameter alpha according to 5-fold Cross-Validation: 100000.0\nAverage loss in training data:52551651640.45825\nAverage loss in test data:149407746551.92368\n1 - coefficient of determination on training data:0.9990910614463899\n1 - coefficient of determination on test data:1.0261341342969317\n" ] ], [ [ "Compare LR, Lasso, and Ridge regression coefficients", "_____no_output_____" ] ], [ [ "# Compare LR and lasso coefficients\nind = np.arange(1,len(LR.coef_)+1) # the x locations for the groups\nwidth = 0.25 # the width of the bars\nfig, ax = plt.subplots()\nrects1 = ax.bar(ind, LR.coef_, width, color='r')\nrects2 = ax.bar(ind + width, lasso_reg.coef_, width, color='y')\nrects3 = ax.bar(ind + 2*width, ridge_final.coef_, width, color='b')\nax.legend((rects1[0], rects2[0], rects3[0]), ('LR', 'Lasso', 'Ridge'))\nplt.xlabel('Coefficient Idx')\nplt.ylabel('Coefficient Value')\nplt.title('LR, Lasso, and Ridge Coefficient')\nplt.show()", "_____no_output_____" ] ], [ [ "## TODO 10: comment on the coefficients obtained by the different methods and their comparison (max 5 lines)\nThe coefficients of all three methods are consistent. Lasso regression is used for feature selection, in this case using an optimal lambda found with k-fold Cross-Validation, the predictor uses nine features that works well in term of test loss, even better of LR with best subset. The LR and Ridge coefficients behave in a similar way, but the Ridge model define better the weights giving what appears to be a more fine-tuned model, as proven below examining the test loss. In fact it is also the best model found.", "_____no_output_____" ], [ "\n\n## Comparison of models: evaluation of the performance on the test set\n\n", "_____no_output_____" ] ], [ [ "print(\"Average loss of LR on test data:\"+str(loss_test))\nprint(\"Average loss of LR with subset selection on test data:\"+str(loss_test_best_subset))\nprint(\"Average loss of LASSO on test data:\"+str(loss_test_lasso))\nprint(\"Average loss of Ridge regression on test data:\"+str(loss_test_ridge))\n\nprint(\"1 - coefficient of determination of LR on test data:\"+str(1 - LR.score(Xtest_scaled,Ytest)))\nprint(\"1 - coefficient of determination of LR with best subset on test data: \"+str(1 - LR_best_subset.score(Xtest_scaled[:,opt_features_idx],Ytest)))\nprint(\"1 - coefficient of determination of LASSO on test data:\"+str(1 - lasso_reg.score(Xtest_scaled,Ytest)))\nprint(\"1 - coefficient of determination of Ridge regression on test data:\"+str(1 - ridge_final.score(Xtest_scaled,Ytest)))", "Average loss of LR on test data:305201847503.98944\nAverage loss of LR with subset selection on test data:72744636871.77332\nAverage loss of LASSO on test data:114620294261.59744\nAverage loss of Ridge regression on test data:149407746551.92368\n1 - coefficient of determination of LR on test data:2.096129824603785\n1 - coefficient of determination of LR with best subset on test data: 0.49961100882557175\n1 - coefficient of determination of LASSO on test data:0.7872135089334813\n1 - coefficient of determination of Ridge regression on test data:1.0261341342969317\n" ] ], [ [ "## TODO 11: comment and compare the results obtained by the different methods (max 5 lines)\nBy comparing these results one can see that the average losses of all models are very similar. The worst one is LR that do not work very well with the small data for training that we have, and so the predictor has difficulty to predict generalized data (training error is significantly small). The LASSO and LR with best subset works very well,in fact they choose an optimal subset of features that describe well the houses features for prediction of their pricing. The difference of precision between the two previous models and Ridge is not substantial,but the Ridge regression with this dataset give the best results in term of test loss.", "_____no_output_____" ], [ "## TODO 12: using your final model of choice (write which one you choose), what are the features that seem more relevant for the prices of houses? Does this match your intuition?\nThe final model of choice is Ridge Regression, since it performs better than the others. Considering the features with higher weights, the ones that are considered relevant are: 3,9,10,16 and 18. Two of them, 3 and 16, are the features that i selected at the beginning, by using hypothesis testing, considered relevant for subset selection. In fact, among the features that seems more relevant there is the overall grade of the house, the square footage of house, the square footage of the lot, all factor that describe very well the condition of the house and accordingly its price.", "_____no_output_____" ], [ "### SUGGESTION (not compulsory): repeat the entire analysis above using a different data size, and try to understand the differences that you observe\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
e717fdee011d7bc1581e7ae975a0d9e459956e36
19,890
ipynb
Jupyter Notebook
jupyter_notebooks/papermill/etl/Extract.ipynb
manual123/Nacho-Jupyter-Notebooks
e75523434b1a90313a6b44e32b056f63de8a7135
[ "MIT" ]
2
2021-02-13T05:52:05.000Z
2022-02-08T09:52:35.000Z
papermill/etl/Extract.ipynb
manual123/Nacho-Jupyter-Notebooks
e75523434b1a90313a6b44e32b056f63de8a7135
[ "MIT" ]
null
null
null
papermill/etl/Extract.ipynb
manual123/Nacho-Jupyter-Notebooks
e75523434b1a90313a6b44e32b056f63de8a7135
[ "MIT" ]
null
null
null
44.496644
118
0.36189
[ [ [ "# Extract Task", "_____no_output_____" ] ], [ [ "import io\nimport pandas as pd\nimport requests\npd.options.display.max_rows=1000\npd.options.display.max_columns=100", "_____no_output_____" ] ], [ [ "### Download Excel file directly from website, keep it in memory via BytesIO, and then read it into a dataframe:", "_____no_output_____" ] ], [ [ "URL = 'http://www.tos.ohio.gov/Documents/Transparency/2017-2018-Teacher-Data-for-website.xlsx'\nresp = requests.get(URL)\nfile_obj = io.BytesIO()\nfile_obj.write(resp.content)\nfile_obj.seek(0) # Move file pointer to the beginning or else pandas will think the file is empty\ndf = pd.read_excel(file_obj, index=False)", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ] ], [ [ "### I don't like spaces in column names, so I will replace them with underscores:", "_____no_output_____" ] ], [ [ "df.columns = [column.replace(' ','_') for column in df.columns]", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ] ], [ [ "### Let's see what job descriptions there are and get counts:", "_____no_output_____" ] ], [ [ "df['JOB_DESCRIPTION'].value_counts()", "_____no_output_____" ] ], [ [ "### Let's only do analysis on teachers (\"Teacher Assignment\"):", "_____no_output_____" ] ], [ [ "teachers = df.query(\"JOB_DESCRIPTION == 'Teacher Assignment'\")", "_____no_output_____" ] ], [ [ "### Save our filtered data set as a csv file:", "_____no_output_____" ] ], [ [ "teachers.to_csv('/home/pybokeh/Downloads/teachers.csv', index=False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e71809cbcd1f08af6c8475eb2cb2c84fdecd9150
970
ipynb
Jupyter Notebook
Practice.ipynb
Yash0411/SDL-Assignments-TE-Comp
95e1d4214d8cc810a244da669bf0e58646bab31d
[ "MIT" ]
null
null
null
Practice.ipynb
Yash0411/SDL-Assignments-TE-Comp
95e1d4214d8cc810a244da669bf0e58646bab31d
[ "MIT" ]
null
null
null
Practice.ipynb
Yash0411/SDL-Assignments-TE-Comp
95e1d4214d8cc810a244da669bf0e58646bab31d
[ "MIT" ]
null
null
null
23.658537
239
0.502062
[ [ [ "<a href=\"https://colab.research.google.com/github/Yash0411/SDL-Assignments-TE-Comp/blob/master/Practice.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
e7180f90359f5da83c8faf9c8913e9f428ce2781
30,005
ipynb
Jupyter Notebook
module08_advanced_programming_techniques/08_03_exceptions.ipynb
alan-turing-institute/rse-course
193cd0f497319751820ad720a04298554dd86d0c
[ "CC-BY-3.0" ]
43
2021-11-09T09:24:24.000Z
2022-02-16T12:45:07.000Z
module08_advanced_programming_techniques/08_03_exceptions.ipynb
alan-turing-institute/rse-course
193cd0f497319751820ad720a04298554dd86d0c
[ "CC-BY-3.0" ]
38
2021-11-08T18:02:03.000Z
2022-02-16T16:34:36.000Z
module08_advanced_programming_techniques/08_03_exceptions.ipynb
alan-turing-institute/rse-course
193cd0f497319751820ad720a04298554dd86d0c
[ "CC-BY-3.0" ]
29
2021-11-29T10:19:44.000Z
2022-02-20T10:43:12.000Z
30.277497
959
0.544743
[ [ [ "# Exceptions", "_____no_output_____" ], [ "\nWhen we learned about testing, we saw that Python complains when things go wrong by raising an \"Exception\" naming a type of error:\n\n\n", "_____no_output_____" ] ], [ [ "1 / 0", "_____no_output_____" ] ], [ [ "Exceptions are objects, forming a [class hierarchy](https://docs.python.org/3/library/exceptions.html#exception-hierarchy). We just raised an instance\nof the `ZeroDivisionError` class, making the program crash. If we want more\ninformation about where this class fits in the hierarchy, we can use [Python's\n`inspect` module](https://docs.python.org/3/library/inspect.html) to get a chain of classes, from `ZeroDivisionError` up to `object`:", "_____no_output_____" ] ], [ [ "import inspect\n\ninspect.getmro(ZeroDivisionError)", "_____no_output_____" ] ], [ [ "\n\nSo we can see that a zero division error is a particular kind of Arithmetic Error.\n\n\n", "_____no_output_____" ] ], [ [ "x = 1\n\nfor y in x:\n print(y)", "_____no_output_____" ], [ "inspect.getmro(TypeError)", "_____no_output_____" ] ], [ [ "## Create your own Exception", "_____no_output_____" ], [ "When we were looking at testing, we saw that it is important for code to crash with a meaningful exception type when something is wrong.\nWe raise an Exception with `raise`. Often, we can look for an appropriate exception from the standard set to raise. \n\nHowever, we may want to define our own exceptions. Doing this is as simple as inheriting from Exception (or one of its subclasses):", "_____no_output_____" ] ], [ [ "class MyCustomErrorType(ArithmeticError):\n pass\n\n\nraise (MyCustomErrorType(\"Problem\"))", "_____no_output_____" ] ], [ [ "\n\nYou can add custom data to your exception:\n\n\n", "_____no_output_____" ] ], [ [ "class MyCustomErrorType(Exception):\n def __init__(self, category=None):\n self.category = category\n\n def __str__(self):\n return f\"Error, category {self.category}\"\n\n\nraise (MyCustomErrorType(404))", "_____no_output_____" ] ], [ [ "\n\nThe real power of exceptions comes, however, not in letting them crash the program, but in letting your program handle them. We say that an exception has been \"thrown\" and then \"caught\".\n\n\n", "_____no_output_____" ] ], [ [ "import yaml\n\ntry:\n config = yaml.safe_load(open(\"datasource.yaml\"))\n user = config[\"userid\"]\n password = config[\"password\"]\n\nexcept FileNotFoundError:\n print(\"No password file found, using anonymous user.\")\n user = \"anonymous\"\n password = None\n\n\nprint(user)", "No password file found, using anonymous user.\nanonymous\n" ] ], [ [ "\n\nNote that we specify only the error we expect to happen and want to handle. Sometimes you see code that catches everything:\n\n\n", "_____no_output_____" ] ], [ [ "try:\n config = yaml.safe_lod(open(\"datasource.yaml\"))\n user = config[\"userid\"]\n password = config[\"password\"]\nexcept:\n user = \"anonymous\"\n password = None\n\nprint(user)", "anonymous\n" ] ], [ [ "This can be dangerous and can make it hard to find errors! There was a mistyped function name there ('`safe_lod`'), but we did not notice the error, as the generic except caught it. \nTherefore, we should be specific and catch only the type of error we want.", "_____no_output_____" ], [ "## Managing multiple exceptions", "_____no_output_____" ], [ "Let's create two credential files to read", "_____no_output_____" ] ], [ [ "with open(\"datasource2.yaml\", \"w\") as outfile:\n outfile.write(\"userid: eidle\\n\")\n outfile.write(\"password: secret\\n\")\n\nwith open(\"datasource3.yaml\", \"w\") as outfile:\n outfile.write(\"user: eidle\\n\")\n outfile.write(\"password: secret\\n\")", "_____no_output_____" ] ], [ [ "And create a function that reads credentials files and returns the username and password to use.", "_____no_output_____" ] ], [ [ "def read_credentials(source):\n try:\n datasource = open(source)\n config = yaml.safe_load(datasource)\n user = config[\"userid\"]\n password = config[\"password\"]\n datasource.close()\n except FileNotFoundError:\n print(\"Password file missing\")\n user = \"anonymous\"\n password = None\n except KeyError:\n print(\"Expected keys not found in file\")\n user = \"anonymous\"\n password = None\n return user, password", "_____no_output_____" ], [ "print(read_credentials(\"datasource2.yaml\"))", "('eidle', 'secret')\n" ], [ "print(read_credentials(\"datasource.yaml\"))", "Password file missing\n('anonymous', None)\n" ], [ "print(read_credentials(\"datasource3.yaml\"))", "Expected keys not found in file\n('anonymous', None)\n" ] ], [ [ "This last code has a flaw: the file was successfully opened, the missing key was noticed, but not explicitly closed. It's normally OK, as Python will close the file as soon as it notices there are no longer any references to datasource in memory, after the function exits. But this is not good practice, you should keep a file handle for as short a time as possible.", "_____no_output_____" ] ], [ [ "def read_credentials(source):\n try:\n datasource = open(source)\n config = yaml.safe_load(datasource)\n\n try:\n print(\"File loaded, trying to extract credentials\")\n user = config[\"userid\"]\n password = config[\"password\"]\n except KeyError:\n print(\"Expected keys not found in file\")\n user = \"anonymous\"\n password = None\n finally:\n # Runs irrespective of whether keys found\n print(\"Closing file\")\n datasource.close()\n\n except FileNotFoundError:\n print(\"Password file missing\")\n user = \"anonymous\"\n password = None\n\n return user, password", "_____no_output_____" ] ], [ [ "The `finally` clause is executed whether or not an exception occurs.\n\nThe last optional clause of a `try` statement, an `else` clause is called only if an exception is NOT raised. It can be a better place than the `try` clause to put code other than that which you expect to raise the error, and which you do not want to be executed if the error is raised. It is executed in the same circumstances as code put in the end of the `try` block, the only difference is that errors raised during the `else` clause are not caught.", "_____no_output_____" ] ], [ [ "def read_credentials(source):\n try:\n datasource = open(source)\n\n except FileNotFoundError:\n print(\"Password file missing\")\n user = \"anonymous\"\n password = None\n\n else:\n # Runs only if opening the file was successful\n config = yaml.safe_load(datasource)\n try:\n print(\"File loaded, trying to extract credentials\")\n user = config[\"userid\"]\n password = config[\"password\"]\n except KeyError:\n print(\"Expected keys not found in file\")\n user = \"anonymous\"\n password = None\n finally:\n # Runs irrespective of whether keys found\n print(\"Closing file\")\n datasource.close()\n\n return user, password", "_____no_output_____" ] ], [ [ "Don't worry if `else` seems useless to you; most languages' implementations of try/except don't support such a clause. An alternative way of avoiding leaving the file open in the original implementation (and without using `else` or `finally`) is to use a context manager:", "_____no_output_____" ] ], [ [ "def read_credentials(source):\n try:\n with open(source) as datasource: # closes the file when done\n config = yaml.safe_load(datasource)\n user = config[\"userid\"]\n password = config[\"password\"]\n except FileNotFoundError:\n print(\"Password file missing\")\n user = \"anonymous\"\n password = None\n except KeyError:\n print(\"Expected keys not found in file\")\n user = \"anonymous\"\n password = None\n return user, password", "_____no_output_____" ] ], [ [ "## Catching Exceptions Elsewhere\n\nExceptions do not have to be caught close to the part of the program calling\nthem. They can be caught anywhere \"above\" the calling point in\nthe call stack: control can jump arbitrarily far in the program: up to the `except` clause of the \"highest\" containing try statement.\n\n\n", "_____no_output_____" ] ], [ [ "def f4(x):\n if x == 0:\n return\n if x == 1:\n raise ArithmeticError()\n if x == 2:\n raise SyntaxError()\n if x == 3:\n raise TypeError()", "_____no_output_____" ], [ "def f3(x):\n try:\n print(\"F3Before\")\n f4(x)\n print(\"F3After\")\n except ArithmeticError:\n print(\"F3Except (💣)\")", "_____no_output_____" ], [ "def f2(x):\n try:\n print(\"F2Before\")\n f3(x)\n print(\"F2After\")\n except SyntaxError:\n print(\"F2Except (💣)\")", "_____no_output_____" ], [ "def f1(x):\n try:\n print(\"F1Before\")\n f2(x)\n print(\"F1After\")\n except TypeError:\n print(\"F1Except (💣)\")", "_____no_output_____" ], [ "f1(0)", "F1Before\nF2Before\nF3Before\nF3After\nF2After\nF1After\n" ], [ "f1(1)", "F1Before\nF2Before\nF3Before\nF3Except (💣)\nF2After\nF1After\n" ], [ "f1(2)", "F1Before\nF2Before\nF3Before\nF2Except (💣)\nF1After\n" ], [ "f1(3)", "F1Before\nF2Before\nF3Before\nF1Except (💣)\n" ] ], [ [ "## Design with Exceptions", "_____no_output_____" ], [ "\nNow we know how exceptions work, we need to think about the design implications... How best to use them.\n\nTraditional software design theory will tell you that they should only be used\nto describe and recover from **exceptional** conditions: things going wrong.\nNormal program flow shouldn't use them.\n\nPython's designers take a different view: use of exceptions in normal flow is\nconsidered OK. For example, all iterators raise a `StopIteration` exception to\nindicate the iteration is complete.\n\nA commonly recommended Python design pattern is to use exceptions to determine\nwhether an object implements a protocol (concept/interface), rather than testing\non type.\n\nFor example, we might want a function which can be supplied *either* a data\nseries *or* a path to a location on disk where data can be found. We can\nexamine the type of the supplied content:", "_____no_output_____" ] ], [ [ "import yaml\n\n\ndef analysis(source):\n if type(source) == dict:\n name = source[\"modelname\"]\n else:\n content = open(source)\n source = yaml.safe_load(content)\n name = source[\"modelname\"]\n print(name)", "_____no_output_____" ], [ "analysis({\"modelname\": \"Super\"})", "Super\n" ], [ "with open(\"example.yaml\", \"w\") as outfile:\n outfile.write(\"modelname: brilliant\\n\")", "_____no_output_____" ], [ "analysis(\"example.yaml\")", "brilliant\n" ] ], [ [ "\n\n\nHowever, we can also use the try-it-and-handle-exceptions approach to this. \n\n\n", "_____no_output_____" ] ], [ [ "def analysis(source):\n try:\n name = source[\"modelname\"]\n except TypeError:\n content = open(source)\n source = yaml.safe_load(content)\n name = source[\"modelname\"]\n print(name)\n\n\nanalysis(\"example.yaml\")", "brilliant\n" ] ], [ [ "This approach is more extensible, and **behaves properly if we give it some\nother data-source which responds like a dictionary or string.**", "_____no_output_____" ] ], [ [ "def analysis(source):\n try:\n name = source[\"modelname\"]\n except TypeError:\n # Source was not a dictionary-like object\n # Maybe it is a file path\n try:\n content = open(source)\n source = yaml.safe_load(content)\n name = source[\"modelname\"]\n except IOError:\n # Maybe it was already raw YAML content\n source = yaml.safe_load(source)\n name = source[\"modelname\"]\n print(name)\n\n\nanalysis(\"modelname: Amazing\")", "Amazing\n" ] ], [ [ "## Re-Raising Exceptions\n\nSometimes we want to catch an error, partially handle it, perhaps add some\nextra data to the exception, and then re-raise to be caught again further up\nthe call stack. \n\nThe keyword \"`raise`\" with no argument in an `except:` clause will cause the\ncaught error to be re-thrown. Doing this is the only circumstance where it is\nsafe to do `except:` without catching a specific type of error.", "_____no_output_____" ] ], [ [ "try:\n # Something\n pass\nexcept:\n # Do this code here if anything goes wrong\n raise", "_____no_output_____" ] ], [ [ "If you want to be more explicit about where the error came from, you can use the `raise from` syntax, which will create a chain of exceptions:", "_____no_output_____" ] ], [ [ "def lower_function():\n raise ValueError(\"Error in lower function!\")\n\n\ndef higher_function():\n try:\n lower_function()\n except ValueError as e:\n raise RuntimeError(\"Error in higher function!\") from e\n\n\nhigher_function()", "_____no_output_____" ] ], [ [ "\n\nIt can be useful to catch and re-throw an error as you go up the chain, doing any clean-up needed for each layer of a program.\n\nThe error will finally be caught and not re-thrown only at a higher program\nlayer that knows how to recover. This is known as the \"throw low catch high\"\nprinciple.\n\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e71814e779e261cb60ee4019a96d970ae768cc78
7,809
ipynb
Jupyter Notebook
examples/01_complex_ML_models.ipynb
tommartensen/tic
58a0b3b47d4536a242effa339fda114b68477dbb
[ "MIT" ]
2
2019-10-04T14:12:24.000Z
2022-02-28T12:09:01.000Z
examples/01_complex_ML_models.ipynb
tommartensen/tic
58a0b3b47d4536a242effa339fda114b68477dbb
[ "MIT" ]
11
2019-10-04T14:25:36.000Z
2021-11-15T17:49:25.000Z
examples/01_complex_ML_models.ipynb
tommartensen/tic
58a0b3b47d4536a242effa339fda114b68477dbb
[ "MIT" ]
null
null
null
28.922222
179
0.557946
[ [ [ "# Complex Machine Learning Models\n\nAdvanced machine learning models are not implicitly interpretable. \nA human with use-case specific knowledge cannot see the inner workings of a neural network, random forest, etc. with one glimpse. \nThis notebooks builds such \"complex machine learning models\" from the \n[Breast cancer wisconsin (diagnostic) dataset](https://scikit-learn.org/stable/datasets/index.html#breast-cancer-wisconsin-diagnostic-dataset) provided with scikit-learn.\n\nBecause this is just an example, this notebook contains two classifiers:\n\n* Logistic Regression\n* Random Forest Classifier\n* Multi-Layer Perceptron or Artificial Neural Network\n\nThese are trained on the aforementioned dataset that contains 30 features and 569 total samples.\nThe classifiers are stored on the local disk for downstream analysis.\nThese two tables contain general information on the dataset and value statistics for the features.\n\n| | |\n|-|-|\n| Classes | 2 |\n| Samples per class | 212(M),357(B) |\n| Samples total | 569 |\n| Dimensionality | 30 |\n| Features | real, positive |\n\n| Feature | Average | Deviation |\n|-|-|-|\n| radius (mean) | 6.981 | 28.11 |\n| texture (mean) | 9.71 | 39.28 |\n| perimeter (mean) | 43.79 | 188.5 |\n| area (mean) | 143.5 | 2501.0 |\n| smoothness (mean) | 0.053 | 0.163 |\n| compactness (mean) | 0.019 | 0.345 |\n| concavity (mean) | 0.0 | 0.427 |\n| concave points (mean) | 0.0 | 0.201 |\n| symmetry (mean) | 0.106 | 0.304 |\n| fractal dimension (mean) | 0.05 | 0.097 |\n| radius (standard error) | 0.112 | 2.873 |\n| texture (standard error) | 0.36 | 4.885 |\n| perimeter (standard error) | 0.757 | 21.98 |\n| area (standard error) | 6.802 | 542.2 |\n| smoothness (standard error) | 0.002 | 0.031 |\n| compactness (standard error) | 0.002 | 0.135 |\n| concavity (standard error) | 0.0 | 0.396 |\n| concave points (standard error) | 0.0 | 0.053 |\n| symmetry (standard error) | 0.008 | 0.079 |\n| fractal dimension (standard error) | 0.001 | 0.03 |\n| radius (worst) | 7.93 | 36.04 |\n| texture (worst) | 12.02 | 49.54 |\n| perimeter (worst) | 50.41 | 251.2 |\n| area (worst) | 185.2 | 4254.0 |\n| smoothness (worst) | 0.071 | 0.223 |\n| compactness (worst) | 0.027 | 1.058 |\n| concavity (worst) | 0.0 | 1.252 |\n| concave points (worst) | 0.0 | 0.291 |\n| symmetry (worst) | 0.156 | 0.664 |\n| fractal dimension (worst) | 0.055 | 0.208 |", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "import os\nimport pickle\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import (\n precision_score, \n recall_score, \n accuracy_score, \n roc_auc_score\n)\nfrom sklearn.neural_network import MLPClassifier\nfrom tic import load_test_data", "_____no_output_____" ] ], [ [ "## Preparing the data\n\nHere, we load the data and create the datasets for training and testing.", "_____no_output_____" ] ], [ [ "data = load_test_data()\nX_train, X_test, y_train, y_test = data['dataset'].values()", "_____no_output_____" ] ], [ [ "## Building Classifiers\n\nIn this part, we train two classifiers:\n\n* Logistic Regression\n* Random Forest\n* Multi-Layer Perceptron\n\nto predict whether a breast cancer tumor is benign or malignant.", "_____no_output_____" ], [ "## Logistic Regression", "_____no_output_____" ] ], [ [ "clf_lr = LogisticRegression(solver='lbfgs', max_iter=10000)\nclf_lr.fit(X_train, y_train)\ny_pred_lr = clf_lr.predict(X_test)", "_____no_output_____" ] ], [ [ "### Random Forest Classifier", "_____no_output_____" ] ], [ [ "clf_rf = RandomForestClassifier(n_estimators=100)\nclf_rf.fit(X_train, y_train)\ny_pred_rf = clf_rf.predict(X_test)", "_____no_output_____" ] ], [ [ "### Multi-Layer Perceptron Classifier", "_____no_output_____" ] ], [ [ "clf_mlp = MLPClassifier()\nclf_mlp.fit(X_train, y_train)\ny_pred_mlp = clf_mlp.predict(X_test)", "_____no_output_____" ] ], [ [ "## Evaluation", "_____no_output_____" ] ], [ [ "print(f'''\nLogistic Regression:\n------------------------------\nAccuracy: {accuracy_score(y_test, y_pred_lr)}\nPrecision: {precision_score(y_test, y_pred_lr)}\nRecall: {recall_score(y_test, y_pred_lr)}\nAUROC: {roc_auc_score(y_test, y_pred_lr)}\n\nRandom Forest Classifier:\n------------------------------\nAccuracy: {accuracy_score(y_test, y_pred_rf)}\nPrecision: {precision_score(y_test, y_pred_rf)}\nRecall: {recall_score(y_test, y_pred_rf)}\nAUROC: {roc_auc_score(y_test, y_pred_rf)}\n\n\nMulti-Layer Perceptron Classifier:\n----------------------------------\nAccuracy: {accuracy_score(y_test, y_pred_mlp)}\nPrecision: {precision_score(y_test, y_pred_mlp)}\nRecall: {recall_score(y_test, y_pred_mlp)}\nAUROC: {roc_auc_score(y_test, y_pred_mlp)}\n''')", "_____no_output_____" ] ], [ [ "## Persisting the classifiers\n\nFor downstream interpretability analysis, the classifiers are persisted.", "_____no_output_____" ] ], [ [ "directory = '.classifiers'\nif not os.path.exists(directory):\n os.makedirs(directory)\n\npickle.dump(clf_lr, open(f'{directory}/logistic_regression.clf', 'wb'))\npickle.dump(clf_rf, open(f'{directory}/random_forest.clf', 'wb'))\npickle.dump(clf_mlp, open(f'{directory}/multi_layer_perceptron.clf', 'wb'))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e71815347e0e3900c264bf1aff16fa1dc120ea3d
3,408
ipynb
Jupyter Notebook
legacy/01_Introduction.ipynb
pvkraju80/leo
f6ed6f4ee6eb34c47581d40d4d3fea69f42140f3
[ "CNRI-Python" ]
99
2016-11-02T14:29:41.000Z
2022-03-02T23:53:02.000Z
legacy/01_Introduction.ipynb
pvkraju80/leo
f6ed6f4ee6eb34c47581d40d4d3fea69f42140f3
[ "CNRI-Python" ]
null
null
null
legacy/01_Introduction.ipynb
pvkraju80/leo
f6ed6f4ee6eb34c47581d40d4d3fea69f42140f3
[ "CNRI-Python" ]
86
2016-11-05T15:39:08.000Z
2022-03-14T04:37:17.000Z
21.846154
185
0.518486
[ [ [ "<img src=\"http://hilpisch.com/tpq_logo.png\" alt=\"The Python Quants\" width=\"35%\" align=\"right\" border=\"0\"><br><br><br>", "_____no_output_____" ], [ "# Listed Volatility and Variance Derivatives\n\n**Dr. Yves J. Hilpisch &mdash; Wiley Finance (2016)**\n\n<img src=\"http://hilpisch.com/images/lvvd_cover.png\" alt=\"Derivatives Analytics with Python\" width=\"30%\" align=\"left\" border=\"0\">", "_____no_output_____" ], [ "# Derivatives, Volatility and Variance", "_____no_output_____" ], [ "## Option Pricing and Hedging", "_____no_output_____" ], [ "## Notions of Volatility and Variance", "_____no_output_____" ], [ "## Listed Volatility and Variance Derivatives", "_____no_output_____" ], [ "### The US History", "_____no_output_____" ], [ "### The European History", "_____no_output_____" ], [ "### Volatility of Volatility Indexes", "_____no_output_____" ], [ "### Products Covered in this Book", "_____no_output_____" ], [ "## Volatility and Variance Trading", "_____no_output_____" ], [ "### Volatility Trading", "_____no_output_____" ], [ "### Variance Trading", "_____no_output_____" ], [ "## Python as Our Tool of Choice", "_____no_output_____" ], [ "## Quick Guide Through Rest of the Book", "_____no_output_____" ], [ "<img src=\"http://hilpisch.com/tpq_logo.png\" alt=\"The Python Quants\" width=\"35%\" align=\"right\" border=\"0\"><br>\n\n<a href=\"http://tpq.io\" target=\"_blank\">http://tpq.io</a> | <a href=\"http://twitter.com/dyjh\" target=\"_blank\">@dyjh</a> | <a href=\"mailto:[email protected]\">[email protected]</a>\n\n**DX Analytics** |\n<a href=\"http://dx-analytics.com\">http://dx-analytics.com</a>\n\n**Quant Platform** |\n<a href=\"http://quant-platform.com\">http://quant-platform.com</a>\n\n**Python for Finance Books** |\n<a href=\"http://books.tpq.io\" target=\"_blank\">http://books.tpq.io</a>\n\n**Python for Finance Training** |\n<a href=\"http://training.tpq.io\" target=\"_blank\">http://training.tpq.io</a>", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
e71832f1c57e0c58694925f32900fec8602cffa4
13,558
ipynb
Jupyter Notebook
Code/News Article Topics Classifier/Data Processing/TopicFoundInText.ipynb
Sophia1998/Misinformation-Project---Microsoft
f4b868ad440bb4f290f9abad109c5c1cb6dd0266
[ "MIT" ]
null
null
null
Code/News Article Topics Classifier/Data Processing/TopicFoundInText.ipynb
Sophia1998/Misinformation-Project---Microsoft
f4b868ad440bb4f290f9abad109c5c1cb6dd0266
[ "MIT" ]
null
null
null
Code/News Article Topics Classifier/Data Processing/TopicFoundInText.ipynb
Sophia1998/Misinformation-Project---Microsoft
f4b868ad440bb4f290f9abad109c5c1cb6dd0266
[ "MIT" ]
null
null
null
42.905063
509
0.410311
[ [ [ "import pandas as pd\nimport numpy as np\nimport requests as rq\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm\n\nimport os\nfrom google.colab import drive\ndrive.mount('/content/drive')\nos.chdir(\"/content/drive/MyDrive/Colab_Notebooks/\")", "Mounted at /content/drive\n" ], [ "# Enter file path for the GDI million links csv. The file is in GDI folder under partner data on SharePoint.\nfilepath = 'scraped_fulltext.csv'\n\n# Enter the search term list you'd like to find in texts\ntopics = ['voter fraud','white supremacy','anti-latinx','biden','big tech', \\\n 'anti-muslim','abortion','president biden','coronavirus',\\\n 'anti-lgbt','misogyny','anti-immigrant','anti-black',\\\n 'antivaxx','pseudoscience','qanon','5g','critical race theory','aliens']\n\n# Data transformation to return the 84785 urls that have GDI topics.\nGDI_links = pd.read_csv(filepath)\n# GDI_links = GDI_links[GDI_links['error'] != 'Error']\nGDI_links = GDI_links[['uid', 'url', 'keywords', 'classifiers', 'full_text']]\nGDI_links = GDI_links.dropna(axis = 0, how = 'any')", "_____no_output_____" ], [ "# Topics from the topic list found in the 'full_text' column, stored in the'topic_found' column.\ntexts = GDI_links['full_text']\nfinal = []\n\nfor i in texts:\n try:\n result = ''\n for t in topics:\n if i.lower().find(t.lower()) != -1:\n result = result + t + \"|\"\n result = result[:-1]\n final.append(result)\n except:\n final.append(np.nan)\n\nGDI_links['topic_found'] = final", "_____no_output_____" ], [ "# Enter filepath/name of output csv.\ndestination = 'TopicFoundInText.csv'\n\nGDI_links.to_csv(destination)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
e7183ae80ee667ddaeebf5559f634a910b7da4e0
62,298
ipynb
Jupyter Notebook
CompStats/effect_size.ipynb
sunny2309/scipy_conf_notebooks
30a85d5137db95e01461ad21519bc1bdf294044b
[ "MIT" ]
2
2021-01-09T15:57:26.000Z
2021-11-29T01:44:21.000Z
CompStats/effect_size.ipynb
sunny2309/scipy_conf_notebooks
30a85d5137db95e01461ad21519bc1bdf294044b
[ "MIT" ]
5
2019-11-15T02:00:26.000Z
2021-01-06T04:26:40.000Z
CompStats/effect_size.ipynb
sunny2309/scipy_conf_notebooks
30a85d5137db95e01461ad21519bc1bdf294044b
[ "MIT" ]
null
null
null
78.362264
21,370
0.823847
[ [ [ "Effect Size\n===\n\nExamples and exercises for a tutorial on statistical inference.\n\nCopyright 2016 Allen Downey\n\nLicense: [Creative Commons Attribution 4.0 International](http://creativecommons.org/licenses/by/4.0/)", "_____no_output_____" ] ], [ [ "from __future__ import print_function, division\n\nimport numpy\nimport scipy.stats\n\nimport matplotlib.pyplot as pyplot\n\nfrom ipywidgets import interact, interactive, fixed\nimport ipywidgets as widgets\n\n# seed the random number generator so we all get the same results\nnumpy.random.seed(17)\n\n# some nice colors from http://colorbrewer2.org/\nCOLOR1 = '#7fc97f'\nCOLOR2 = '#beaed4'\nCOLOR3 = '#fdc086'\nCOLOR4 = '#ffff99'\nCOLOR5 = '#386cb0'\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Part One\n\nTo explore statistics that quantify effect size, we'll look at the difference in height between men and women. I used data from the Behavioral Risk Factor Surveillance System (BRFSS) to estimate the mean and standard deviation of height in cm for adult women and men in the U.S.\n\nI'll use `scipy.stats.norm` to represent the distributions. The result is an `rv` object (which stands for random variable).", "_____no_output_____" ] ], [ [ "mu1, sig1 = 178, 7.7\nmale_height = scipy.stats.norm(mu1, sig1)\nmale_height", "_____no_output_____" ], [ "mu2, sig2 = 163, 7.3\nfemale_height = scipy.stats.norm(mu2, sig2)\nfemale_height", "_____no_output_____" ] ], [ [ "The following function evaluates the normal (Gaussian) probability density function (PDF) within 4 standard deviations of the mean. It takes and rv object and returns a pair of NumPy arrays.", "_____no_output_____" ] ], [ [ "def eval_pdf(rv, num=4):\n mean, std = rv.mean(), rv.std()\n xs = numpy.linspace(mean - num*std, mean + num*std, 100)\n ys = rv.pdf(xs)\n return xs, ys", "_____no_output_____" ] ], [ [ "Here's what the two distributions look like.", "_____no_output_____" ] ], [ [ "xs, ys = eval_pdf(male_height)\npyplot.plot(xs, ys, label='male', linewidth=4, color=COLOR2)\n\nxs, ys = eval_pdf(female_height)\npyplot.plot(xs, ys, label='female', linewidth=4, color=COLOR3)\npyplot.xlabel('height (cm)')\nNone", "_____no_output_____" ] ], [ [ "Let's assume for now that those are the true distributions for the population.\n\nI'll use `rvs` to generate random samples from the population distributions. Note that these are totally random, totally representative samples, with no measurement error!", "_____no_output_____" ] ], [ [ "male_sample = male_height.rvs(1000)\nscipy.stats.norm?", "_____no_output_____" ], [ "female_sample = female_height.rvs(1000)", "_____no_output_____" ] ], [ [ "Both samples are NumPy arrays. Now we can compute sample statistics like the mean and standard deviation.", "_____no_output_____" ] ], [ [ "mean1, std1 = male_sample.mean(), male_sample.std()\nmean1, std1", "_____no_output_____" ] ], [ [ "The sample mean is close to the population mean, but not exact, as expected.", "_____no_output_____" ] ], [ [ "mean2, std2 = female_sample.mean(), female_sample.std()\nmean2, std2", "_____no_output_____" ] ], [ [ "And the results are similar for the female sample.\n\nNow, there are many ways to describe the magnitude of the difference between these distributions. An obvious one is the difference in the means:", "_____no_output_____" ] ], [ [ "difference_in_means = male_sample.mean() - female_sample.mean()\ndifference_in_means # in cm", "_____no_output_____" ] ], [ [ "On average, men are 14--15 centimeters taller. For some applications, that would be a good way to describe the difference, but there are a few problems:\n\n* Without knowing more about the distributions (like the standard deviations) it's hard to interpret whether a difference like 15 cm is a lot or not.\n\n* The magnitude of the difference depends on the units of measure, making it hard to compare across different studies.\n\nThere are a number of ways to quantify the difference between distributions. A simple option is to express the difference as a percentage of the mean.\n\n**Exercise 1**: what is the relative difference in means, expressed as a percentage?", "_____no_output_____" ] ], [ [ "# Solution goes here\nrelative_diff1 = (difference_in_means/male_sample.mean())*100\n#print relative_diff1\n#relative_diff2 = (difference_in_means/female_sample.mean())*100\n#print relative_diff2", "_____no_output_____" ] ], [ [ "**STOP HERE**: We'll regroup and discuss before you move on.", "_____no_output_____" ], [ "## Part Two\n\nAn alternative way to express the difference between distributions is to see how much they overlap. To define overlap, we choose a threshold between the two means. The simple threshold is the midpoint between the means:", "_____no_output_____" ] ], [ [ "simple_thresh = (mean1 + mean2) / 2\nsimple_thresh", "_____no_output_____" ] ], [ [ "A better, but slightly more complicated threshold is the place where the PDFs cross.", "_____no_output_____" ] ], [ [ "thresh = (std1 * mean2 + std2 * mean1) / (std1 + std2)\nthresh", "_____no_output_____" ] ], [ [ "In this example, there's not much difference between the two thresholds.\n\nNow we can count how many men are below the threshold:", "_____no_output_____" ] ], [ [ "male_below_thresh = sum(male_sample < thresh)\nmale_below_thresh", "_____no_output_____" ] ], [ [ "And how many women are above it:", "_____no_output_____" ] ], [ [ "female_above_thresh = sum(female_sample > thresh)\nfemale_above_thresh", "_____no_output_____" ] ], [ [ "The \"overlap\" is the area under the curves that ends up on the wrong side of the threshold.", "_____no_output_____" ] ], [ [ "male_overlap = male_below_thresh / len(male_sample)\nfemale_overlap = female_above_thresh / len(female_sample)\nmale_overlap, female_overlap", "_____no_output_____" ] ], [ [ "In practical terms, you might report the fraction of people who would be misclassified if you tried to use height to guess sex, which is the average of the male and female overlap rates:", "_____no_output_____" ] ], [ [ "misclassification_rate = (male_overlap + female_overlap) / 2\nmisclassification_rate", "_____no_output_____" ] ], [ [ "Another way to quantify the difference between distributions is what's called \"probability of superiority\", which is a problematic term, but in this context it's the probability that a randomly-chosen man is taller than a randomly-chosen woman.\n\n**Exercise 2**: Suppose I choose a man and a woman at random. What is the probability that the man is taller?\n\nHINT: You can `zip` the two samples together and count the number of pairs where the male is taller, or use NumPy array operations.", "_____no_output_____" ] ], [ [ "# Solution goes here\nnumpy.sum(male_sample > female_sample)/male_sample.size", "_____no_output_____" ], [ "# Solution goes here\n(male_sample>female_sample).mean()", "_____no_output_____" ] ], [ [ "Overlap (or misclassification rate) and \"probability of superiority\" have two good properties:\n\n* As probabilities, they don't depend on units of measure, so they are comparable between studies.\n\n* They are expressed in operational terms, so a reader has a sense of what practical effect the difference makes.\n\n### Cohen's effect size\n\nThere is one other common way to express the difference between distributions. Cohen's $d$ is the difference in means, standardized by dividing by the standard deviation. Here's the math notation:\n\n$ d = \\frac{\\bar{x}_1 - \\bar{x}_2} s $\n\nwhere $s$ is the pooled standard deviation:\n\n$s = \\sqrt{\\frac{(n_1-1)s^2_1 + (n_2-1)s^2_2}{n_1+n_2 - 2}}$\n\nHere's a function that computes it:\n", "_____no_output_____" ] ], [ [ "def CohenEffectSize(group1, group2):\n \"\"\"Compute Cohen's d.\n\n group1: Series or NumPy array\n group2: Series or NumPy array\n\n returns: float\n \"\"\"\n diff = group1.mean() - group2.mean()\n\n n1, n2 = len(group1), len(group2)\n var1 = group1.var()\n var2 = group2.var()\n\n pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)\n d = diff / numpy.sqrt(pooled_var)\n return d", "_____no_output_____" ] ], [ [ "Computing the denominator is a little complicated; in fact, people have proposed several ways to do it. This implementation uses the \"pooled standard deviation\", which is a weighted average of the standard deviations of the two groups.\n\nAnd here's the result for the difference in height between men and women.", "_____no_output_____" ] ], [ [ "CohenEffectSize(male_sample, female_sample)", "_____no_output_____" ] ], [ [ "Most people don't have a good sense of how big $d=1.9$ is, so let's make a visualization to get calibrated.\n\nHere's a function that encapsulates the code we already saw for computing overlap and probability of superiority.", "_____no_output_____" ] ], [ [ "def overlap_superiority(control, treatment, n=1000):\n \"\"\"Estimates overlap and superiority based on a sample.\n \n control: scipy.stats rv object\n treatment: scipy.stats rv object\n n: sample size\n \"\"\"\n control_sample = control.rvs(n)\n treatment_sample = treatment.rvs(n)\n thresh = (control.mean() + treatment.mean()) / 2\n \n control_above = sum(control_sample > thresh)\n treatment_below = sum(treatment_sample < thresh)\n overlap = (control_above + treatment_below) / n\n \n superiority = (treatment_sample > control_sample).mean()\n return overlap, superiority", "_____no_output_____" ] ], [ [ "Here's the function that takes Cohen's $d$, plots normal distributions with the given effect size, and prints their overlap and superiority.", "_____no_output_____" ] ], [ [ "def plot_pdfs(cohen_d=2):\n \"\"\"Plot PDFs for distributions that differ by some number of stds.\n \n cohen_d: number of standard deviations between the means\n \"\"\"\n control = scipy.stats.norm(0, 1)\n treatment = scipy.stats.norm(cohen_d, 1)\n xs, ys = eval_pdf(control)\n pyplot.fill_between(xs, ys, label='control', color=COLOR3, alpha=0.7)\n\n xs, ys = eval_pdf(treatment)\n pyplot.fill_between(xs, ys, label='treatment', color=COLOR2, alpha=0.7)\n \n o, s = overlap_superiority(control, treatment)\n pyplot.text(0, 0.05, 'overlap ' + str(o))\n pyplot.text(0, 0.15, 'superiority ' + str(s))\n pyplot.show()\n #print('overlap', o)\n #print('superiority', s)", "_____no_output_____" ] ], [ [ "Here's an example that demonstrates the function:", "_____no_output_____" ] ], [ [ "plot_pdfs(2)", "_____no_output_____" ] ], [ [ "And an interactive widget you can use to visualize what different values of $d$ mean:", "_____no_output_____" ] ], [ [ "slider = widgets.FloatSlider(min=0, max=4, value=2)\ninteract(plot_pdfs, cohen_d=slider)\nNone", "_____no_output_____" ] ], [ [ "Cohen's $d$ has a few nice properties:\n\n* Because mean and standard deviation have the same units, their ratio is dimensionless, so we can compare $d$ across different studies.\n\n* In fields that commonly use $d$, people are calibrated to know what values should be considered big, surprising, or important.\n\n* Given $d$ (and the assumption that the distributions are normal), you can compute overlap, superiority, and related statistics.", "_____no_output_____" ], [ "In summary, the best way to report effect size depends on the audience and your goals. There is often a tradeoff between summary statistics that have good technical properties and statistics that are meaningful to a general audience.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e71847cfe78ebd1b7c23922a1c07dd0bbc78551e
61,260
ipynb
Jupyter Notebook
catboost/old_notebooks/catboost_por_reg.ipynb
ThomasMGeo/LewisML
1d1ae36bea2bef5d74e1b3db1e30454c2736139c
[ "Apache-2.0" ]
1
2022-01-24T22:46:26.000Z
2022-01-24T22:46:26.000Z
catboost/old_notebooks/catboost_por_reg.ipynb
ThomasMGeo/LewisML
1d1ae36bea2bef5d74e1b3db1e30454c2736139c
[ "Apache-2.0" ]
null
null
null
catboost/old_notebooks/catboost_por_reg.ipynb
ThomasMGeo/LewisML
1d1ae36bea2bef5d74e1b3db1e30454c2736139c
[ "Apache-2.0" ]
null
null
null
61,260
61,260
0.572249
[ [ [ "!pip install catboost\n!pip install scikit-learn --upgrade", "Collecting catboost\n Downloading catboost-0.26.1-cp37-none-manylinux1_x86_64.whl (67.4 MB)\n\u001b[K |████████████████████████████████| 67.4 MB 49 kB/s \n\u001b[?25hRequirement already satisfied: numpy>=1.16.0 in /usr/local/lib/python3.7/dist-packages (from catboost) (1.19.5)\nRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from catboost) (1.15.0)\nRequirement already satisfied: graphviz in /usr/local/lib/python3.7/dist-packages (from catboost) (0.10.1)\nRequirement already satisfied: pandas>=0.24.0 in /usr/local/lib/python3.7/dist-packages (from catboost) (1.1.5)\nRequirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from catboost) (1.4.1)\nRequirement already satisfied: plotly in /usr/local/lib/python3.7/dist-packages (from catboost) (4.4.1)\nRequirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (from catboost) (3.2.2)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.24.0->catboost) (2.8.2)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.24.0->catboost) (2018.9)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib->catboost) (0.10.0)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->catboost) (1.3.1)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->catboost) (2.4.7)\nRequirement already satisfied: retrying>=1.3.3 in /usr/local/lib/python3.7/dist-packages (from plotly->catboost) (1.3.3)\nInstalling collected packages: catboost\nSuccessfully installed catboost-0.26.1\nRequirement already satisfied: scikit-learn in /usr/local/lib/python3.7/dist-packages (0.22.2.post1)\nCollecting scikit-learn\n Downloading scikit_learn-0.24.2-cp37-cp37m-manylinux2010_x86_64.whl (22.3 MB)\n\u001b[K |████████████████████████████████| 22.3 MB 4.6 MB/s \n\u001b[?25hRequirement already satisfied: scipy>=0.19.1 in /usr/local/lib/python3.7/dist-packages (from scikit-learn) (1.4.1)\nRequirement already satisfied: numpy>=1.13.3 in /usr/local/lib/python3.7/dist-packages (from scikit-learn) (1.19.5)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn) (1.0.1)\nCollecting threadpoolctl>=2.0.0\n Downloading threadpoolctl-2.2.0-py3-none-any.whl (12 kB)\nInstalling collected packages: threadpoolctl, scikit-learn\n Attempting uninstall: scikit-learn\n Found existing installation: scikit-learn 0.22.2.post1\n Uninstalling scikit-learn-0.22.2.post1:\n Successfully uninstalled scikit-learn-0.22.2.post1\nSuccessfully installed scikit-learn-0.24.2 threadpoolctl-2.2.0\n" ] ], [ [ "# Dataframes", "_____no_output_____" ] ], [ [ "# If you have installation questions, please reach out\nimport seaborn as sns\nimport pandas as pd # data storage\nimport catboost as cats # graident boosting \n\nfrom catboost import CatBoostRegressor, Pool\n\nimport numpy as np # math and stuff\nimport matplotlib.pyplot as plt # plotting utility\nimport sklearn # ML and stats\n\nprint('catboost ver:', cats.__version__)\nprint('scikit ver:', sklearn.__version__)\n\nimport datetime\n\nfrom sklearn.preprocessing import MinMaxScaler, RobustScaler\nfrom sklearn.model_selection import cross_val_score, KFold, train_test_split\nfrom sklearn.utils.class_weight import compute_sample_weight\nfrom sklearn.metrics import max_error, mean_squared_error, median_absolute_error", "catboost ver: 0.26.1\nscikit ver: 0.24.2\n" ], [ "df = pd.read_csv('drive/My Drive/1_lewis_research/core_to_wl_merge/OS0_Merged_dataset_imputed_08_23_2021.csv')\n\ndf2 = pd.read_csv('drive/My Drive/1_lewis_research/core_to_wl_merge/OS1_Merged_dataset_imputed_08_23_2021.csv')\n\ndf3 = pd.read_csv('drive/My Drive/1_lewis_research/core_to_wl_merge/OS2_Merged_dataset_imputed_08_23_2021.csv')", "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py:2718: DtypeWarning: Columns (17) have mixed types.Specify dtype option on import or set low_memory=False.\n interactivity=interactivity, compiler=compiler, result=result)\n" ], [ "df = df.drop(['Unnamed: 0', 'Unnamed: 0.1', 'LiveTime2','ScanTime2', 'LiveTime1','ScanTime1',\n 'ref_num', 'API', 'well_name', 'sample_num' ], axis=1)\n\nprint(df.columns.values) # printing all column names\n\ndf.describe()", "['depth_ft' 'USGS_ID' 'CAL' 'GR' 'DT' 'SP' 'DENS' 'PE' 'RESD' 'PHIN'\n 'PHID' 'PE_smooth' 'GR_smooth' 'gz_linear_interp' 'gz_pchip_interp'\n 'perm_air_md' 'perm_klink_md' 'He_por' 'horz_perm_md' 'effective_por'\n 'por_percent' 'stress_swanson_perm_md' 'sample' 'Na' 'Na Err' 'Mg'\n 'Mg Err' 'Al' 'Al Err' 'Si' 'Si Err' 'P' 'P Err' 'S' 'S Err' 'K' 'K Err'\n 'Ca' 'Ca Err' 'Ti' 'Ti Err' 'Cr' 'Cr Err' 'Mn' 'Mn Err' 'Fe' 'Fe Err'\n 'Co' 'Co Err' 'Ni' 'Ni Err' 'Cu' 'Cu Err' 'Zn' 'Zn Err' 'Ba' 'Ba Err']\n" ], [ "df = df[df.He_por >= 0]\n\ndf = df[df.USGS_ID != 'E997']", "_____no_output_____" ], [ "dataset = df[[\n 'CAL', 'GR', 'DT', 'SP', 'DENS', 'PE',\n 'RESD', 'PHIN', 'PHID', \n 'GR_smooth', \n 'PE_smooth','He_por'\n]]", "_____no_output_____" ], [ "dataset.replace('NaN',np.nan, regex=True, inplace=True)# \n#dataset = dataset.dropna()\nnp.shape(dataset)", "/usr/local/lib/python3.7/dist-packages/pandas/core/frame.py:4389: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n method=method,\n" ], [ "X = dataset[['CAL', 'GR', 'DT', 'SP', 'DENS', 'PE',\n 'RESD', 'PHIN', 'PHID', \n 'GR_smooth', \n 'PE_smooth']]\n\nY = dataset[['He_por']]\n\nY_array = np.array(Y.values)", "_____no_output_____" ], [ "seed = 42 # random seed is only used if you want to compare exact answers with friends \ntest_size = 0.25 # how much data you want to withold, .15 - 0.3 is a good starting point\n\nX_train, X_test, y_train, y_test = train_test_split(X.values, Y_array, test_size=test_size)", "_____no_output_____" ], [ "preds = []\npreds2 = []\n\ndef catboost_he_por(X_train, X_test, y_train, y_test, export_name='TEST', max_iter = 200):\n model = CatBoostRegressor(objective='RMSE', iterations=max_iter)\n\n model.fit(X_train, y_train, verbose=max_iter )\n\n preds = model.predict(X_test)\n\n rmse = mean_squared_error(y_test, preds, squared=False)\n print(\"Root Mean Squared Error: %f\" % (rmse))\n max = max_error(y_test, preds)\n print(\"Max Error: %f\" % (max))\n MAE = median_absolute_error(y_test, preds)\n print(\"Median Abs Error: %f\" % (MAE))\n\n grid = {'learning_rate': [ 0.05, 0.1, 0.2, 0.3],\n 'depth': [ 2, 4, 6, 8, 10],\n 'l2_leaf_reg': [1, 2, 3, 4, 5, 7]}\n\n model_grid = CatBoostRegressor(objective='RMSE', \n iterations=max_iter, \n verbose=False)\n\n # Grid Search\n grid_search_result = model_grid.grid_search(grid, \n X=X_train, \n y=y_train, \n cv=5,\n verbose=False)\n \n model2 = CatBoostRegressor(objective='RMSE',\n depth=grid_search_result['params']['depth'],\n l2_leaf_reg=grid_search_result['params']['l2_leaf_reg'],\n learning_rate=grid_search_result['params']['learning_rate'],\n iterations=max_iter)\n\n model2.fit(X_train, y_train, verbose=500 )\n\n preds2 = model2.predict(X_test)\n\n rmse2 = mean_squared_error(y_test, preds2, squared=False)\n print(\"Root Mean Squared Error: %f\" % (rmse2))\n max2 = max_error(y_test, preds2)\n print(\"Max Error: %f\" % (max2))\n MAE2= median_absolute_error(y_test, preds2)\n print(\"Median Abs Error: %f\" % (MAE2))\n\n x = datetime.datetime.now()\n\n d = {'target': [Y.columns.values, Y.columns.values],\n 'MSE': [rmse, rmse2],\n 'MAE': [MAE, MAE2],\n 'MaxError': [max, max2], \n 'iter':[max_iter, max_iter],\n 'day': [x.day, x.day], \n 'month':[x.month, x.month], \n 'year':[x.year, x.year],\n 'model':['catboost', 'catboost'],\n 'version':[cats.__version__, cats.__version__ ]}\n \n filepath = 'drive/My Drive/1_lewis_research/analysis/experiments/catboost/catboost_results/'\n\n results = pd.DataFrame(data=d)\n\n results.to_csv(filepath+export_name)\n\n return results", "_____no_output_____" ] ], [ [ "# Iterations", "_____no_output_____" ] ], [ [ "iter = 250", "_____no_output_____" ] ], [ [ "#Offset 0", "_____no_output_____" ] ], [ [ "catboost_he_por(X_train, X_test, y_train, y_test,'OS0_por_cat.csv', iter)", "Learning rate set to 0.088488\n0:\tlearn: 1.8305738\ttotal: 54.1ms\tremaining: 13.5s\n249:\tlearn: 0.4084589\ttotal: 933ms\tremaining: 0us\nRoot Mean Squared Error: 1.977440\nMax Error: 6.111462\nMedian Abs Error: 0.735670\n\nbestTest = 1.565874484\nbestIteration = 235\n\n\nbestTest = 1.537453096\nbestIteration = 174\n\n\nbestTest = 1.450125548\nbestIteration = 87\n\n\nbestTest = 1.621591809\nbestIteration = 139\n\n\nbestTest = 1.586922401\nbestIteration = 242\n\n\nbestTest = 1.504840932\nbestIteration = 162\n\n\nbestTest = 1.522671841\nbestIteration = 106\n\n\nbestTest = 1.540544781\nbestIteration = 59\n\n\nbestTest = 1.573682193\nbestIteration = 249\n\n\nbestTest = 1.540012964\nbestIteration = 156\n\n\nbestTest = 1.444953007\nbestIteration = 127\n\n\nbestTest = 1.517337576\nbestIteration = 108\n\n\nbestTest = 1.573236715\nbestIteration = 238\n\n\nbestTest = 1.526911959\nbestIteration = 241\n\n\nbestTest = 1.45610791\nbestIteration = 142\n\n\nbestTest = 1.514175891\nbestIteration = 58\n\n\nbestTest = 1.600336711\nbestIteration = 238\n\n\nbestTest = 1.553106732\nbestIteration = 232\n\n\nbestTest = 1.464750031\nbestIteration = 183\n\n\nbestTest = 1.542277218\nbestIteration = 42\n\n\nbestTest = 1.568486324\nbestIteration = 249\n\n\nbestTest = 1.538577193\nbestIteration = 183\n\n\nbestTest = 1.515803936\nbestIteration = 196\n\n\nbestTest = 1.513612003\nbestIteration = 128\n\n\nbestTest = 1.513229078\nbestIteration = 187\n\n\nbestTest = 1.492352546\nbestIteration = 74\n\n\nbestTest = 1.497408214\nbestIteration = 39\n\n\nbestTest = 1.51624123\nbestIteration = 58\n\n\nbestTest = 1.497816575\nbestIteration = 175\n\n\nbestTest = 1.446218284\nbestIteration = 120\n\n\nbestTest = 1.514984858\nbestIteration = 36\n\n\nbestTest = 1.552297506\nbestIteration = 17\n\n\nbestTest = 1.495735234\nbestIteration = 165\n\n\nbestTest = 1.53924161\nbestIteration = 90\n\n\nbestTest = 1.532013893\nbestIteration = 84\n\n\nbestTest = 1.53962114\nbestIteration = 42\n\n\nbestTest = 1.526128667\nbestIteration = 192\n\n\nbestTest = 1.516030115\nbestIteration = 97\n\n\nbestTest = 1.525474217\nbestIteration = 141\n\n\nbestTest = 1.495882512\nbestIteration = 131\n\n\nbestTest = 1.535286008\nbestIteration = 243\n\n\nbestTest = 1.467093006\nbestIteration = 175\n\n\nbestTest = 1.556501526\nbestIteration = 62\n\n\nbestTest = 1.576567165\nbestIteration = 33\n\n\nbestTest = 1.547757112\nbestIteration = 232\n\n\nbestTest = 1.49518787\nbestIteration = 245\n\n\nbestTest = 1.57584396\nbestIteration = 81\n\n\nbestTest = 1.488202857\nbestIteration = 86\n\n\nbestTest = 1.496081592\nbestIteration = 147\n\n\nbestTest = 1.426657448\nbestIteration = 225\n\n\nbestTest = 1.516913495\nbestIteration = 42\n\n\nbestTest = 1.567341475\nbestIteration = 47\n\n\nbestTest = 1.516531295\nbestIteration = 248\n\n\nbestTest = 1.445052395\nbestIteration = 159\n\n\nbestTest = 1.482011348\nbestIteration = 182\n\n\nbestTest = 1.588196739\nbestIteration = 52\n\n\nbestTest = 1.520829121\nbestIteration = 242\n\n\nbestTest = 1.456034125\nbestIteration = 170\n\n\nbestTest = 1.550144184\nbestIteration = 158\n\n\nbestTest = 1.576685499\nbestIteration = 74\n\n\nbestTest = 1.509360074\nbestIteration = 243\n\n\nbestTest = 1.512587481\nbestIteration = 249\n\n\nbestTest = 1.445592208\nbestIteration = 248\n\n\nbestTest = 1.608083834\nbestIteration = 38\n\n\nbestTest = 1.523100313\nbestIteration = 249\n\n\nbestTest = 1.471651974\nbestIteration = 233\n\n\nbestTest = 1.514861194\nbestIteration = 88\n\n\nbestTest = 1.559538444\nbestIteration = 54\n\n\nbestTest = 1.548444324\nbestIteration = 239\n\n\nbestTest = 1.493572377\nbestIteration = 215\n\n\nbestTest = 1.484320355\nbestIteration = 162\n\n\nbestTest = 1.628981706\nbestIteration = 39\n\n\nbestTest = 1.63992718\nbestIteration = 216\n\n\nbestTest = 1.571255222\nbestIteration = 202\n\n\nbestTest = 1.665280048\nbestIteration = 228\n\n\nbestTest = 1.678735275\nbestIteration = 56\n\n\nbestTest = 1.649577551\nbestIteration = 243\n\n\nbestTest = 1.571553377\nbestIteration = 233\n\n\nbestTest = 1.622918425\nbestIteration = 60\n\n\nbestTest = 1.614734952\nbestIteration = 213\n\n\nbestTest = 1.666070497\nbestIteration = 248\n\n\nbestTest = 1.691899727\nbestIteration = 204\n\n\nbestTest = 1.596426403\nbestIteration = 247\n\n\nbestTest = 1.595884494\nbestIteration = 227\n\n\nbestTest = 1.698669265\nbestIteration = 246\n\n\nbestTest = 1.660075208\nbestIteration = 219\n\n\nbestTest = 1.614617866\nbestIteration = 135\n\n\nbestTest = 1.51257535\nbestIteration = 221\n\n\nbestTest = 1.706587406\nbestIteration = 249\n\n\nbestTest = 1.753357135\nbestIteration = 236\n\n\nbestTest = 1.568415084\nbestIteration = 122\n\n\nbestTest = 1.558782782\nbestIteration = 95\n\n\nbestTest = 1.700348685\nbestIteration = 248\n\n\nbestTest = 1.712679157\nbestIteration = 217\n\n\nbestTest = 1.732038347\nbestIteration = 97\n\n\nbestTest = 1.770506349\nbestIteration = 88\n\n\nbestTest = 1.870299797\nbestIteration = 186\n\n\nbestTest = 1.880168432\nbestIteration = 144\n\n\nbestTest = 1.824304395\nbestIteration = 125\n\n\nbestTest = 1.675209284\nbestIteration = 20\n\n\nbestTest = 1.894638366\nbestIteration = 249\n\n\nbestTest = 1.852178386\nbestIteration = 249\n\n\nbestTest = 1.88668361\nbestIteration = 247\n\n\nbestTest = 1.878135046\nbestIteration = 91\n\n\nbestTest = 1.867744613\nbestIteration = 249\n\n\nbestTest = 1.861648235\nbestIteration = 248\n\n\nbestTest = 1.742402941\nbestIteration = 154\n\n\nbestTest = 1.941351386\nbestIteration = 221\n\n\nbestTest = 1.888752063\nbestIteration = 249\n\n\nbestTest = 1.841069562\nbestIteration = 220\n\n\nbestTest = 1.930808056\nbestIteration = 235\n\n\nbestTest = 1.846550175\nbestIteration = 201\n\n\nbestTest = 1.920254446\nbestIteration = 248\n\n\nbestTest = 1.890177285\nbestIteration = 246\n\n\nbestTest = 1.908693057\nbestIteration = 248\n\n\nbestTest = 1.761281884\nbestIteration = 249\n\n\nbestTest = 1.956317825\nbestIteration = 249\n\n\nbestTest = 1.902209242\nbestIteration = 249\n\n\nbestTest = 1.85061143\nbestIteration = 249\n\n\nbestTest = 1.998059023\nbestIteration = 246\n\n0:\tlearn: 1.8105858\ttotal: 3.25ms\tremaining: 810ms\n249:\tlearn: 0.2065582\ttotal: 786ms\tremaining: 0us\nRoot Mean Squared Error: 1.920989\nMax Error: 5.958920\nMedian Abs Error: 0.697805\n" ] ], [ [ "# Offset 1", "_____no_output_____" ] ], [ [ "df2 = df2.drop(['Unnamed: 0', 'Unnamed: 0.1', 'LiveTime2','ScanTime2', 'LiveTime1','ScanTime1',\n 'ref_num', 'API', 'well_name', 'sample_num' ], axis=1)\n\ndf2 = df2[df2.He_por >= 0]\ndf2 = df2[df2.USGS_ID != 'E997'] # removing E997\n\ndataset2 = df2[[\n 'CAL', 'GR', 'DT', 'SP', 'DENS', 'PE',\n 'RESD', 'PHIN', 'PHID', \n 'GR_smooth', \n 'PE_smooth','He_por']]\n\n# Features we will use for prediction\nX2 = dataset2[['CAL', 'GR', 'DT', 'SP', 'DENS', 'PE',\n 'RESD', 'PHIN', 'PHID', \n 'GR_smooth', \n 'PE_smooth']]\n\n# What we are trying to predict\nY = dataset2[['He_por']]\n\nY_array2 = np.array(Y.values)\n\nX_train2, X_test2, y_train2, y_test2 = train_test_split(X2.values, Y_array2, test_size=test_size)", "_____no_output_____" ], [ "catboost_he_por(X_train2, X_test2, y_train2, y_test2,'OS1_por_cat2.csv', iter)", "Learning rate set to 0.088488\n0:\tlearn: 1.8107742\ttotal: 4ms\tremaining: 996ms\n249:\tlearn: 0.3487992\ttotal: 811ms\tremaining: 0us\nRoot Mean Squared Error: 1.753633\nMax Error: 5.642539\nMedian Abs Error: 0.595588\n\nbestTest = 1.365663226\nbestIteration = 249\n\n\nbestTest = 1.351988521\nbestIteration = 158\n\n\nbestTest = 1.308408043\nbestIteration = 85\n\n\nbestTest = 1.288816689\nbestIteration = 66\n\n\nbestTest = 1.31785958\nbestIteration = 249\n\n\nbestTest = 1.291730184\nbestIteration = 200\n\n\nbestTest = 1.348195193\nbestIteration = 94\n\n\nbestTest = 1.405906665\nbestIteration = 51\n\n\nbestTest = 1.410938079\nbestIteration = 249\n\n\nbestTest = 1.296229625\nbestIteration = 210\n\n\nbestTest = 1.30185777\nbestIteration = 101\n\n\nbestTest = 1.226083874\nbestIteration = 65\n\n\nbestTest = 1.431585845\nbestIteration = 249\n\n\nbestTest = 1.23690651\nbestIteration = 230\n\n\nbestTest = 1.350011927\nbestIteration = 98\n\n\nbestTest = 1.219725107\nbestIteration = 95\n\n\nbestTest = 1.427227825\nbestIteration = 248\n\n\nbestTest = 1.261447453\nbestIteration = 249\n\n\nbestTest = 1.29206355\nbestIteration = 132\n\n\nbestTest = 1.250443756\nbestIteration = 76\n\n\nbestTest = 1.49258307\nbestIteration = 249\n\n\nbestTest = 1.276183173\nbestIteration = 246\n\n\nbestTest = 1.256921988\nbestIteration = 121\n\n\nbestTest = 1.261813575\nbestIteration = 97\n\n\nbestTest = 1.235175379\nbestIteration = 248\n\n\nbestTest = 1.273549022\nbestIteration = 115\n\n\nbestTest = 1.296438502\nbestIteration = 63\n\n\nbestTest = 1.284697019\nbestIteration = 38\n\n\nbestTest = 1.241000932\nbestIteration = 239\n\n\nbestTest = 1.251364805\nbestIteration = 115\n\n\nbestTest = 1.247385708\nbestIteration = 101\n\n\nbestTest = 1.450062102\nbestIteration = 59\n\n\nbestTest = 1.224111162\nbestIteration = 249\n\n\nbestTest = 1.228631168\nbestIteration = 177\n\n\nbestTest = 1.274723434\nbestIteration = 87\n\n\nbestTest = 1.417190773\nbestIteration = 61\n\n\nbestTest = 1.257352085\nbestIteration = 249\n\n\nbestTest = 1.293757559\nbestIteration = 182\n\n\nbestTest = 1.303108342\nbestIteration = 102\n\n\nbestTest = 1.424825053\nbestIteration = 59\n\n\nbestTest = 1.278076844\nbestIteration = 247\n\n\nbestTest = 1.330641207\nbestIteration = 185\n\n\nbestTest = 1.354542949\nbestIteration = 115\n\n\nbestTest = 1.320886172\nbestIteration = 71\n\n\nbestTest = 1.330778295\nbestIteration = 249\n\n\nbestTest = 1.317037838\nbestIteration = 202\n\n\nbestTest = 1.336071814\nbestIteration = 149\n\n\nbestTest = 1.453051759\nbestIteration = 159\n\n\nbestTest = 1.311902123\nbestIteration = 163\n\n\nbestTest = 1.364466795\nbestIteration = 102\n\n\nbestTest = 1.299194888\nbestIteration = 38\n\n\nbestTest = 1.311049492\nbestIteration = 24\n\n\nbestTest = 1.333217991\nbestIteration = 229\n\n\nbestTest = 1.332357043\nbestIteration = 90\n\n\nbestTest = 1.313589016\nbestIteration = 46\n\n\nbestTest = 1.344157656\nbestIteration = 24\n\n\nbestTest = 1.32514877\nbestIteration = 248\n\n\nbestTest = 1.318363833\nbestIteration = 137\n\n\nbestTest = 1.323192923\nbestIteration = 36\n\n\nbestTest = 1.393734423\nbestIteration = 45\n\n\nbestTest = 1.310088368\nbestIteration = 248\n\n\nbestTest = 1.381872501\nbestIteration = 110\n\n\nbestTest = 1.356992786\nbestIteration = 76\n\n\nbestTest = 1.405728724\nbestIteration = 27\n\n\nbestTest = 1.316139564\nbestIteration = 249\n\n\nbestTest = 1.38567418\nbestIteration = 153\n\n\nbestTest = 1.337549699\nbestIteration = 89\n\n\nbestTest = 1.308003783\nbestIteration = 67\n\n\nbestTest = 1.343001284\nbestIteration = 248\n\n\nbestTest = 1.325406418\nbestIteration = 227\n\n\nbestTest = 1.27468594\nbestIteration = 120\n\n\nbestTest = 1.304589888\nbestIteration = 62\n\n\nbestTest = 1.460085583\nbestIteration = 185\n\n\nbestTest = 1.38505903\nbestIteration = 101\n\n\nbestTest = 1.449082754\nbestIteration = 28\n\n\nbestTest = 1.60267072\nbestIteration = 18\n\n\nbestTest = 1.456512989\nbestIteration = 166\n\n\nbestTest = 1.393923717\nbestIteration = 108\n\n\nbestTest = 1.410305105\nbestIteration = 33\n\n\nbestTest = 1.39249563\nbestIteration = 28\n\n\nbestTest = 1.4665878\nbestIteration = 234\n\n\nbestTest = 1.459902859\nbestIteration = 146\n\n\nbestTest = 1.4501301\nbestIteration = 31\n\n\nbestTest = 1.434016044\nbestIteration = 37\n\n\nbestTest = 1.489164469\nbestIteration = 242\n\n\nbestTest = 1.445196139\nbestIteration = 137\n\n\nbestTest = 1.479631693\nbestIteration = 36\n\n\nbestTest = 1.360808016\nbestIteration = 37\n\n\nbestTest = 1.571043167\nbestIteration = 248\n\n\nbestTest = 1.427899008\nbestIteration = 182\n\n\nbestTest = 1.447676391\nbestIteration = 75\n\n\nbestTest = 1.447511366\nbestIteration = 29\n\n\nbestTest = 1.52673674\nbestIteration = 247\n\n\nbestTest = 1.509424848\nbestIteration = 176\n\n\nbestTest = 1.579489754\nbestIteration = 122\n\n\nbestTest = 1.658003613\nbestIteration = 72\n\n\nbestTest = 1.48602237\nbestIteration = 169\n\n\nbestTest = 1.567057882\nbestIteration = 79\n\n\nbestTest = 1.65058395\nbestIteration = 58\n\n\nbestTest = 1.60043505\nbestIteration = 29\n\n\nbestTest = 1.580135537\nbestIteration = 171\n\n\nbestTest = 1.553041595\nbestIteration = 80\n\n\nbestTest = 1.632655211\nbestIteration = 53\n\n\nbestTest = 1.642549694\nbestIteration = 23\n\n\nbestTest = 1.612450957\nbestIteration = 211\n\n\nbestTest = 1.620144559\nbestIteration = 78\n\n\nbestTest = 1.592873433\nbestIteration = 51\n\n\nbestTest = 1.77736712\nbestIteration = 39\n\n\nbestTest = 1.58837579\nbestIteration = 177\n\n\nbestTest = 1.649141221\nbestIteration = 93\n\n\nbestTest = 1.65907089\nbestIteration = 61\n\n\nbestTest = 1.776759694\nbestIteration = 42\n\n\nbestTest = 1.598369296\nbestIteration = 235\n\n\nbestTest = 1.640583962\nbestIteration = 114\n\n\nbestTest = 1.66565265\nbestIteration = 66\n\n\nbestTest = 1.786401802\nbestIteration = 89\n\n\nbestTest = 1.644732743\nbestIteration = 244\n\n\nbestTest = 1.643668789\nbestIteration = 113\n\n\nbestTest = 1.620348491\nbestIteration = 160\n\n\nbestTest = 1.704796488\nbestIteration = 70\n\n0:\tlearn: 1.7995317\ttotal: 668us\tremaining: 166ms\n249:\tlearn: 0.4986972\ttotal: 137ms\tremaining: 0us\nRoot Mean Squared Error: 1.864200\nMax Error: 5.688028\nMedian Abs Error: 0.816273\n" ] ], [ [ "# Offset 2", "_____no_output_____" ] ], [ [ "df3 = df3.drop(['Unnamed: 0', 'Unnamed: 0.1', 'LiveTime2','ScanTime2', 'LiveTime1','ScanTime1',\n 'ref_num', 'API', 'well_name', 'sample_num' ], axis=1)\n\ndf3 = df3[df3.He_por >= 0]\ndf3 = df3[df3.USGS_ID != 'E997'] # removing E997\n\ndataset3 = df3[[\n 'CAL', 'GR', 'DT', 'SP', 'DENS', 'PE',\n 'RESD', 'PHIN', 'PHID', \n 'GR_smooth', \n 'PE_smooth','He_por']]\n\n# Features we will use for prediction\nX3 = dataset3[['CAL', 'GR', 'DT', 'SP', 'DENS', 'PE',\n 'RESD', 'PHIN', 'PHID', \n 'GR_smooth', \n 'PE_smooth']]\n\n# What we are trying to predict\nY3 = dataset3[['He_por']]\n\nY_array3 = np.array(Y3.values)\n\nX_train3, X_test3, y_train3, y_test3 = train_test_split(X3.values, Y_array3, test_size=test_size)", "_____no_output_____" ], [ "catboost_he_por(X_train3, X_test3, y_train3, y_test3,'OS2_por_cat2.csv', iter)", "Learning rate set to 0.088488\n0:\tlearn: 1.8714361\ttotal: 3.13ms\tremaining: 778ms\n249:\tlearn: 0.3593508\ttotal: 914ms\tremaining: 0us\nRoot Mean Squared Error: 1.490842\nMax Error: 5.296719\nMedian Abs Error: 0.736927\n\nbestTest = 1.373358257\nbestIteration = 232\n\n\nbestTest = 1.250193082\nbestIteration = 242\n\n\nbestTest = 1.315228685\nbestIteration = 162\n\n\nbestTest = 1.282903537\nbestIteration = 125\n\n\nbestTest = 1.360849376\nbestIteration = 248\n\n\nbestTest = 1.223036118\nbestIteration = 242\n\n\nbestTest = 1.251215647\nbestIteration = 247\n\n\nbestTest = 1.31271285\nbestIteration = 99\n\n\nbestTest = 1.413093245\nbestIteration = 248\n\n\nbestTest = 1.229949326\nbestIteration = 249\n\n\nbestTest = 1.353474607\nbestIteration = 143\n\n\nbestTest = 1.246314248\nbestIteration = 75\n\n\nbestTest = 1.38039621\nbestIteration = 244\n\n\nbestTest = 1.241719384\nbestIteration = 238\n\n\nbestTest = 1.34799675\nbestIteration = 195\n\n\nbestTest = 1.255615065\nbestIteration = 73\n\n\nbestTest = 1.440808115\nbestIteration = 248\n\n\nbestTest = 1.281205269\nbestIteration = 242\n\n\nbestTest = 1.343976838\nbestIteration = 156\n\n\nbestTest = 1.236655037\nbestIteration = 101\n\n\nbestTest = 1.506212811\nbestIteration = 249\n\n\nbestTest = 1.301891659\nbestIteration = 247\n\n\nbestTest = 1.26788334\nbestIteration = 245\n\n\nbestTest = 1.285892816\nbestIteration = 166\n\n\nbestTest = 1.308117224\nbestIteration = 248\n\n\nbestTest = 1.337126875\nbestIteration = 231\n\n\nbestTest = 1.226957886\nbestIteration = 54\n\n\nbestTest = 1.298505608\nbestIteration = 53\n\n\nbestTest = 1.280474782\nbestIteration = 241\n\n\nbestTest = 1.26776741\nbestIteration = 213\n\n\nbestTest = 1.265162858\nbestIteration = 169\n\n\nbestTest = 1.379566245\nbestIteration = 49\n\n\nbestTest = 1.25316488\nbestIteration = 249\n\n\nbestTest = 1.202083347\nbestIteration = 220\n\n\nbestTest = 1.295132729\nbestIteration = 249\n\n\nbestTest = 1.282389715\nbestIteration = 72\n\n\nbestTest = 1.308209295\nbestIteration = 245\n\n\nbestTest = 1.295913307\nbestIteration = 225\n\n\nbestTest = 1.210702397\nbestIteration = 150\n\n\nbestTest = 1.306105345\nbestIteration = 181\n\n\nbestTest = 1.279854944\nbestIteration = 249\n\n\nbestTest = 1.256586855\nbestIteration = 248\n\n\nbestTest = 1.315485878\nbestIteration = 170\n\n\nbestTest = 1.244561938\nbestIteration = 76\n\n\nbestTest = 1.363276436\nbestIteration = 249\n\n\nbestTest = 1.271710938\nbestIteration = 249\n\n\nbestTest = 1.266809358\nbestIteration = 227\n\n\nbestTest = 1.420263159\nbestIteration = 246\n\n\nbestTest = 1.313804204\nbestIteration = 236\n\n\nbestTest = 1.236707398\nbestIteration = 233\n\n\nbestTest = 1.305940539\nbestIteration = 135\n\n\nbestTest = 1.464256531\nbestIteration = 50\n\n\nbestTest = 1.324514957\nbestIteration = 234\n\n\nbestTest = 1.278661154\nbestIteration = 227\n\n\nbestTest = 1.278023201\nbestIteration = 246\n\n\nbestTest = 1.326588185\nbestIteration = 229\n\n\nbestTest = 1.345763062\nbestIteration = 243\n\n\nbestTest = 1.278705574\nbestIteration = 238\n\n\nbestTest = 1.251067331\nbestIteration = 205\n\n\nbestTest = 1.38045731\nbestIteration = 209\n\n\nbestTest = 1.292501068\nbestIteration = 249\n\n\nbestTest = 1.27607756\nbestIteration = 243\n\n\nbestTest = 1.334189144\nbestIteration = 243\n\n\nbestTest = 1.37193557\nbestIteration = 121\n\n\nbestTest = 1.341485178\nbestIteration = 247\n\n\nbestTest = 1.285949655\nbestIteration = 206\n\n\nbestTest = 1.376818101\nbestIteration = 245\n\n\nbestTest = 1.29365644\nbestIteration = 249\n\n\nbestTest = 1.378896122\nbestIteration = 249\n\n\nbestTest = 1.287893469\nbestIteration = 249\n\n\nbestTest = 1.246502611\nbestIteration = 242\n\n\nbestTest = 1.376469078\nbestIteration = 121\n\n\nbestTest = 1.428416116\nbestIteration = 249\n\n\nbestTest = 1.470291526\nbestIteration = 198\n\n\nbestTest = 1.596878809\nbestIteration = 52\n\n\nbestTest = 1.35558894\nbestIteration = 94\n\n\nbestTest = 1.503059292\nbestIteration = 249\n\n\nbestTest = 1.412907289\nbestIteration = 242\n\n\nbestTest = 1.450492528\nbestIteration = 99\n\n\nbestTest = 1.34403743\nbestIteration = 63\n\n\nbestTest = 1.400978205\nbestIteration = 246\n\n\nbestTest = 1.450375127\nbestIteration = 127\n\n\nbestTest = 1.515778194\nbestIteration = 88\n\n\nbestTest = 1.369359797\nbestIteration = 248\n\n\nbestTest = 1.435227358\nbestIteration = 249\n\n\nbestTest = 1.42830543\nbestIteration = 248\n\n\nbestTest = 1.491426781\nbestIteration = 95\n\n\nbestTest = 1.437413065\nbestIteration = 249\n\n\nbestTest = 1.399213903\nbestIteration = 249\n\n\nbestTest = 1.425323264\nbestIteration = 238\n\n\nbestTest = 1.50377863\nbestIteration = 126\n\n\nbestTest = 1.459449318\nbestIteration = 100\n\n\nbestTest = 1.397655958\nbestIteration = 249\n\n\nbestTest = 1.388740118\nbestIteration = 239\n\n\nbestTest = 1.487237894\nbestIteration = 120\n\n\nbestTest = 1.284834839\nbestIteration = 99\n\n\nbestTest = 1.643862092\nbestIteration = 236\n\n\nbestTest = 1.730523417\nbestIteration = 243\n\n\nbestTest = 1.861841454\nbestIteration = 57\n\n\nbestTest = 2.007534043\nbestIteration = 62\n\n\nbestTest = 1.605349388\nbestIteration = 231\n\n\nbestTest = 1.641761403\nbestIteration = 239\n\n\nbestTest = 1.822922114\nbestIteration = 172\n\n\nbestTest = 1.885916342\nbestIteration = 91\n\n\nbestTest = 1.589114804\nbestIteration = 249\n\n\nbestTest = 1.694644626\nbestIteration = 240\n\n\nbestTest = 1.759890511\nbestIteration = 248\n\n\nbestTest = 1.924587481\nbestIteration = 248\n\n\nbestTest = 1.597845732\nbestIteration = 249\n\n\nbestTest = 1.710749235\nbestIteration = 244\n\n\nbestTest = 1.794875135\nbestIteration = 227\n\n\nbestTest = 1.880479528\nbestIteration = 152\n\n\nbestTest = 1.622406722\nbestIteration = 248\n\n\nbestTest = 1.725784934\nbestIteration = 240\n\n\nbestTest = 1.775648933\nbestIteration = 218\n\n\nbestTest = 1.831362292\nbestIteration = 201\n\n\nbestTest = 1.607231542\nbestIteration = 249\n\n\nbestTest = 1.70097982\nbestIteration = 249\n\n\nbestTest = 1.715093344\nbestIteration = 214\n\n\nbestTest = 1.860859819\nbestIteration = 242\n\n0:\tlearn: 1.8686049\ttotal: 1.32ms\tremaining: 329ms\n249:\tlearn: 0.4901016\ttotal: 337ms\tremaining: 0us\nRoot Mean Squared Error: 1.437098\nMax Error: 4.612079\nMedian Abs Error: 0.631690\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
e71848fa367f32c37433e27ae9fd31ebbe33db94
150,220
ipynb
Jupyter Notebook
.ipynb_checkpoints/baseline_new-0729-checkpoint.ipynb
songxxiao/predict-future-sales
3a8af516c0ea4e27cca14c0a998b8a4a8810802d
[ "MIT" ]
2
2021-06-17T08:28:18.000Z
2021-12-24T06:58:54.000Z
old_versions/baseline_new-0729.ipynb
songxxiao/predict-future-sales
3a8af516c0ea4e27cca14c0a998b8a4a8810802d
[ "MIT" ]
2
2020-09-09T03:05:16.000Z
2020-09-12T03:12:56.000Z
old_versions/baseline_new-0729.ipynb
songxxiao/predict-future-sales
3a8af516c0ea4e27cca14c0a998b8a4a8810802d
[ "MIT" ]
null
null
null
40.567108
353
0.357369
[ [ [ "sales_train.csv - the training set. Daily historical data from January 2013 to October 2015. \ntest.csv - the test set. You need to forecast the sales for these shops and products for November 2015. \nsample_submission.csv - a sample submission file in the correct format. \nitems.csv - supplemental information about the items/products. \nitem_categories.csv - supplemental information about the items categories. \nshops.csv- supplemental information about the shops.\n\n教训,将数据类型改为int8会导致负数的出现\n\n<style>\ncode, kbd, pre, samp {\n font-family:'consolas', Lucida Console, SimSun, Fira Code, Monaco !important;\n font-size: 11pt !important;\n}\n\ndiv.output_area pre {\n font-family: 'consolas', Lucida Console, SimSun, Fira Code, Monaco !important;\n font-size: 10pt !important;\n}\n\ndiv.output_area img, div.output_area svg {\n background-color: #FFFFFF !important;\n}\n</style>", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport time\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 100)\nfrom sklearn.preprocessing import LabelEncoder\nimport gc\nfrom time_series_pipeline import *\nfrom tqdm import tqdm\nfrom scipy import stats\nfrom catboost import CatBoostRegressor\nimport lightgbm as lgb\nfrom sklearn import preprocessing, metrics\nfrom sklearn.model_selection import TimeSeriesSplit\nfrom itertools import product\n\nitems, shops, cats, train, test = load_data()", "items has 22170 rows and 3 columns\nshops has 60 rows and 2 columns\ncats has 84 rows and 2 columns\ntrain has 2935849 rows and 6 columns\ntest has 214200 rows and 2 columns\nloading data costs 1.49 seconds\n" ], [ "def data_transform(items, shops, cats, train, test):\n '''data transformation\n '''\n start = time.time()\n train = train[(train['item_price'] < 300000 ) & (train['item_cnt_day'] < 1000)]\n train = train[train['item_price'] > 0]\n median = train[(train['shop_id'] == 32)&(train['item_id'] == 2973)&(train['date_block_num'] == 4)&(train['item_price'] > 0)].item_price.median()\n train.loc[train['item_price'] < 0, 'item_price'] = median\n train.loc[train['item_cnt_day'] < 1, 'item_cnt_day'] = 0\n train.loc[train['shop_id'] == 0, 'shop_id'] = 57\n test.loc[test['shop_id'] == 0, 'shop_id'] = 57\n # Якутск ТЦ \"Центральный\"\n train.loc[train['shop_id'] == 1, 'shop_id'] = 58\n test.loc[test['shop_id'] == 1, 'shop_id'] = 58\n # Жуковский ул. Чкалова 39м²\n train.loc[train['shop_id'] == 10, 'shop_id'] = 11\n test.loc[test['shop_id'] == 10, 'shop_id'] = 11\n test['id'] = test['shop_id'].astype(str) + '_' + test['item_id'].astype(str)\n\n shops.loc[shops['shop_name'] == 'Сергиев Посад ТЦ \"7Я\"', 'shop_name'] = 'СергиевПосад ТЦ \"7Я\"'\n shops['city'] = shops['shop_name'].str.split(' ').transform(lambda x: x[0])\n shops.loc[shops['city'] == '!Якутск', 'city'] = 'Якутск'\n shops['city_code'] = LabelEncoder().fit_transform(shops['city'])\n shops = shops[['shop_id','city_code']]\n\n cats['split'] = cats['item_category_name'].str.split('-')\n cats['type'] = cats['split'].transform(lambda x: x[0].strip())\n cats['type_code'] = LabelEncoder().fit_transform(cats['type']) # 类型\n cats['subtype'] = cats['split'].map(lambda x: x[1].strip() if len(x) > 1 else x[0].strip())\n cats['subtype_code'] = LabelEncoder().fit_transform(cats['subtype']) # 子类型\n cats = cats[['item_category_id','type_code', 'subtype_code']]\n items.drop(['item_name'], axis = 1, inplace = True)\n ##################### 数据增强\n matrix = [] \n cols = ['date_block_num','shop_id','item_id']\n for i in range(34):\n sales = train[train.date_block_num==i]\n matrix.append(np.array(list(product([i], \n sales.shop_id.unique(), \n sales.item_id.unique())), \n dtype = 'int16'))\n \n matrix = pd.DataFrame(np.vstack(matrix), columns=cols)\n matrix.sort_values(cols,inplace = True)\n matrix['id'] = matrix['shop_id'].astype(str) + '_' + matrix['item_id'].astype(str)\n ###########加入测试集\n test['date_block_num'] = 34\n test['date_block_num'] = test['date_block_num'].astype(np.int8)\n test['shop_id'] = test['shop_id'].astype(np.int8)\n test['item_id'] = test['item_id'].astype(np.int16)\n matrix = pd.concat([matrix, test], ignore_index = True, sort = False)\n #matrix.fillna(0, inplace = True)\n # 将日数据汇总为月数据\n df = pd.DataFrame() \n grouped = train.groupby(['date_block_num','shop_id','item_id'])\n df['item_cnt_month'] = grouped['item_cnt_day'].sum()\n df.reset_index(inplace = True) \n matrix = pd.merge(matrix, df, on = cols, how = 'left')\n matrix['item_cnt_month'] = (matrix['item_cnt_month']\n .fillna(0)\n .clip(0,20) \n .astype(np.float16))\n \n matrix = pd.merge(matrix, shops, on = ['shop_id'], how = 'left')\n matrix = pd.merge(matrix, items, on = ['item_id'], how = 'left')\n matrix = pd.merge(matrix, cats, on = ['item_category_id'], how = 'left')\n \n grouped = train.groupby(['date_block_num','shop_id','item_id'])['item_price'].mean()\n grouped = pd.DataFrame(grouped)\n grouped.reset_index(inplace = True)\n matrix = pd.merge(matrix, grouped, on = ['date_block_num','shop_id','item_id'], how = 'left')\n matrix['item_price'] = matrix.groupby(['id'])['item_price'].transform(lambda x: x.fillna(x.median()))\n matrix['item_price'] = matrix['item_price'].astype(np.float32)\n del cats, grouped, items, sales, shops, test, train\n gc.collect()\n print('data has {} rows and {} columns'.format(df.shape[0], df.shape[1]))\n print('The program costs %.2f seconds'%(time.time() - start))\n return matrix", "_____no_output_____" ], [ "df = data_transform(items, shops, cats, train, test)\ndel items, shops, cats, train, test\ngc.collect()\ndf", "data has 1609122 rows and 4 columns\nThe program costs 562.76 seconds\n" ], [ "df.sort_values(by = ['shop_id','item_id','date_block_num'], inplace = True)\ndf.reset_index(inplace = True)", "_____no_output_____" ], [ "#df.reset_index(inplace=True)\ndef groupby_shift(df, col, groupcol, shift_n, fill_na = np.nan):\n '''\n apply fast groupby shift\n df: data \n col: column need to be shift \n shift: n\n fill_na: na filled value\n '''\n rown = df.groupby(groupcol).size().cumsum()\n rowno = list(df.groupby(groupcol).size().cumsum()) # 获取每分组第一个元素的index\n lagged_col = df[col].shift(shift_n) # 不分组滚动\n na_rows = [i for i in range(shift_n)] # 初始化为缺失值的index\n #print(na_rows)\n for i in rowno:\n if i == rowno[len(rowno)-1]: # 最后一个index直接跳过不然会超出最大index\n continue \n else:\n new = [i + j for j in range(shift_n)] # 将每组最开始的shift_n个值变成nan\n na_rows.extend(new) # 加入列表\n na_rows = list(set(na_rows)) # 去除重复值\n na_rows = [i for i in na_rows if i <= len(lagged_col) - 1] # 防止超出最大index\n #print(na_rows)\n lagged_col.iloc[na_rows] = fill_na # 变成nan\n return lagged_col\n\nstart = time.time()\n\ndf['lag_1'] = groupby_shift(df, 'item_cnt_month', ['shop_id','item_id'], 1)\ndf['lag_2'] = groupby_shift(df, 'item_cnt_month', ['shop_id','item_id'], 2)\ndf['lag_3'] = groupby_shift(df, 'item_cnt_month', ['shop_id','item_id'], 3)\ndf['lag_6'] = groupby_shift(df, 'item_cnt_month', ['shop_id','item_id'], 6)\ndf['lag_12'] = groupby_shift(df, 'item_cnt_month', ['shop_id','item_id'], 12)\n\n#df['shift_3_roll_avg_3'] = df['shift_3'].rolling(3).mean().astype(np.float32)\n#df['shift_3_roll_avg_6'] = df['shift_3'].rolling(6).mean().astype(np.float32)\n#df['shift_12_roll_avg_6'] = df['shift_12'].rolling(6).mean().astype(np.float32)\n\ndf['price_shift_1'] = groupby_shift(df, 'item_price', ['shop_id','item_id'], 1)\ndf['price_shift_2'] = groupby_shift(df, 'item_price', ['shop_id','item_id'], 2)\ndf['price_shift_3'] = groupby_shift(df, 'item_price', ['shop_id','item_id'], 3)\ndf['price_shift_6'] = groupby_shift(df, 'item_price', ['shop_id','item_id'], 6)\ndf['price_shift_12'] = groupby_shift(df, 'item_price', ['shop_id','item_id'], 12)\n\n'''\ndf['mon_avg_item_cnt'] = groupby_shift(df, 'item_cnt_month', 'date_block_num', 1)\ndf['mon_avg_item_cnt'] = df.groupby(['date_block_num'])['mon_avg_item_cnt'].transform(lambda x: x.mean())\n\ngroup = ['date_block_num', 'item_id']\ndf['mon_item_avg_1'] = groupby_shift(df, 'item_cnt_month', group, 1)\ndf['mon_item_avg_1'] = df.groupby(group)['mon_item_avg_1'].transform(lambda x: x.mean())\ndf['mon_item_avg_2'] = groupby_shift(df, 'item_cnt_month', group, 2)\ndf['mon_item_avg_2'] = df.groupby(group)['mon_item_avg_2'].transform(lambda x: x.mean())\ndf['mon_item_avg_6'] = groupby_shift(df, 'item_cnt_month', group, 6)\ndf['mon_item_avg_6'] = df.groupby(group)['mon_item_avg_6'].transform(lambda x: x.mean())\n\ngroup = ['date_block_num', 'shop_id']\ndf['mon_shop_1'] = groupby_shift(df, 'item_cnt_month', group, 1)\ndf['mon_shop_1'] = df.groupby(group)['mon_shop_1'].transform(lambda x: x.mean())\n\ngroup = ['date_block_num', 'shop_id', 'item_category_id']\ndf['mon_shop_item_1'] = groupby_shift(df, 'item_cnt_month', group, 1)\ndf['mon_shop_item_1'] = df.groupby(group)['mon_shop_item_1'].transform(lambda x: x.mean())\n\ngroup = ['date_block_num', 'shop_id', 'subtype_code']\ndf['mon_shop_sub_1'] = groupby_shift(df, 'item_cnt_month', group, 1)\ndf['mon_shop_sub_1'] = df.groupby(group)['mon_shop_sub_1'].transform(lambda x: x.mean())\n\ngroup = ['date_block_num', 'city_code']\ndf['mon_city_avg_1'] = groupby_shift(df, 'item_cnt_month', group, 1)\ndf['mon_city_avg_1'] = df.groupby(group)['mon_city_avg_1'].transform(lambda x: x.mean())\n\ngroup = ['date_block_num', 'item_id', 'city_code']\ndf['mon_item_city_avg_1'] = groupby_shift(df, 'item_cnt_month', group, 1)\ndf['mon_item_city_avg_1'] = df.groupby(group)['mon_item_city_avg_1'].transform(lambda x: x.mean())\n\ngroup = ['date_block_num', 'type_code']\ndf['mon_type_avg_1'] = groupby_shift(df, 'item_cnt_month', group, 1)\ndf['mon_type_avg_1'] = df.groupby(group)['mon_type_avg_1'].transform(lambda x: x.mean())\n\ngroup = ['date_block_num', 'subtype_code']\ndf['mon_subtype_avg_1'] = groupby_shift(df, 'item_cnt_month', group, 1)\ndf['mon_subtype_avg_1'] = df.groupby(group)['mon_subtype_avg_1'].transform(lambda x: x.mean())\n'''\n#df['shift6_rolling6_mean'] = df.groupby(['id'])['item_cnt_month'].transform(lambda x: x.shift(6).rolling(6).mean())\n#df['shift2_rolling2_mean'] = df.groupby(['id'])['item_cnt_month'].transform(lambda x: x.shift(2).rolling(2).mean())\n#df['shift3_rolling1_mean'] = df.groupby(['id'])['item_cnt_month'].transform(lambda x: x.shift(3).rolling(1).mean()) \n#df['shift3_rolling2_mean'] = df.groupby(['id'])['item_cnt_month'].transform(lambda x: x.shift(3).rolling(2).mean())\n#df['shift6_rolling6_mean'] = df.groupby(['id'])['item_cnt_month'].transform(lambda x: x.shift(6).rolling(6).mean())\nprint('The program costs %.2f seconds'%(time.time() - start))", "The program costs 103.85 seconds\n" ], [ "def lag_feature(df, lags, col):\n tmp = df[['date_block_num','shop_id','item_id',col]]\n for i in lags:\n shifted = tmp.copy()\n shifted.columns = ['date_block_num','shop_id','item_id', col+'_lag_'+str(i)]\n shifted['date_block_num'] += i\n df = pd.merge(df, shifted, on=['date_block_num','shop_id','item_id'], how='left')\n return df\n\ngroup = df.groupby(['date_block_num']).agg({'item_cnt_month': ['mean']})\ngroup.columns = ['date_cnt']\ngroup.reset_index(inplace = True)\n\ndf = pd.merge(df, group, on = ['date_block_num'], how = 'left')\ndf['date_cnt'] = df['date_cnt'].astype(np.float16)\ndf = lag_feature(df, [1], 'date_cnt')\ndf.drop(['date_cnt'], axis = 1, inplace = True)", "_____no_output_____" ], [ "group = df.groupby(['date_block_num', 'item_id']).agg({'item_cnt_month': ['mean']})\ngroup.columns = ['date_item']\ngroup.reset_index(inplace = True)\n\ndf = pd.merge(df, group, on = ['date_block_num','item_id'], how = 'left')\ndf['date_item'] = df['date_item'].astype(np.float16)\ndf = lag_feature(df, [1,2,3,6,12], 'date_item')\ndf.drop(['date_item'], axis = 1, inplace = True)\n\ngroup = df.groupby(['date_block_num', 'shop_id']).agg({'item_cnt_month': ['mean']})\ngroup.columns = ['date_shop']\ngroup.reset_index(inplace=True)\n\ndf = pd.merge(df, group, on=['date_block_num','shop_id'], how='left')\ndf['date_shop'] = df['date_shop'].astype(np.float16)\ndf = lag_feature(df, [1,2,3,6,12], 'date_shop')\ndf.drop(['date_shop'], axis = 1, inplace = True)\n\ngroup = df.groupby(['date_block_num', 'item_category_id']).agg({'item_cnt_month': ['mean']})\ngroup.columns = ['date_cat']\ngroup.reset_index(inplace=True)\n\ndf = pd.merge(df, group, on=['date_block_num','item_category_id'], how='left')\ndf['date_cat'] = df['date_cat'].astype(np.float16)\ndf = lag_feature(df, [1], 'date_cat')\ndf.drop(['date_cat'], axis = 1, inplace=True)", "_____no_output_____" ], [ "def fill_na(df):\n for col in df.columns:\n if ('_lag_' in col):\n df[col].fillna(0, inplace=True) \n if ('price' in col):\n df[col] = df[col].transform(lambda x: x.fillna(x.median())) \n return df\n\ndf = fill_na(df)", "_____no_output_____" ], [ "df[df['id'] == '59_22088']", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ], [ "\n#group = ['item_id','date_block_num']\n#df.sort_values(by = group, inplace = True)\n#df\n#df['mon_item_avg_1'] = groupby_shift(df, 'item_cnt_month', 'item_id', 1)", "_____no_output_____" ], [ "'''\ndf['price_shift_1'] = groupby_shift(df, 'item_price', 'id', 1)\ndf['price_shift_2'] = groupby_shift(df, 'item_price', 'id', 2)\ndf['price_shift_3'] = groupby_shift(df, 'item_price', 'id', 3)\ndf['price_shift_6'] = groupby_shift(df, 'item_price', 'id', 6)\ndf['price_shift_12'] = groupby_shift(df, 'item_price', 'id', 12)\n'''", "_____no_output_____" ], [ "\n'''\ndef recode_na(df, cols):\n #recode na value by grouped average\n #\n for i in tqdm(cols):\n df[i] = df[i].transform(lambda x: x.fillna(x.median()))\n return df\n\ncolz = ['shift_1', 'shift_2', 'shift_3', 'shift_6', 'shift_12',\n 'shift_3_roll_avg_3', 'shift_3_roll_avg_6', 'shift_12_roll_avg_6',\n 'price_lag_1', 'price_lag_2', 'price_lag_3', 'price_lag_6',\n 'price_lag_12']\n\ndf = recode_na(df, colz)\n'''\n \ndf['month'] = df['date_block_num'] % 12\ndf = df[df['date_block_num'] > 11]\nprint('data has {} rows and {} columns'.format(df.shape[0], df.shape[1]))", "data has 6639294 rows and 36 columns\n" ], [ "df.columns", "_____no_output_____" ], [ "features = ['date_block_num',\n 'month',\n 'shop_id',\n 'item_id',\n 'city_code', \n 'item_category_id', \n 'type_code', \n 'subtype_code',\n 'lag_1', 'lag_2', 'lag_3', 'lag_6', 'lag_12',\n 'price_shift_1', 'price_shift_2', 'price_shift_3', 'price_shift_6',\n 'price_shift_12', 'date_cnt_x', 'date_cnt_y', 'date_cnt_lag_1',\n 'date_item_lag_1', 'date_item_lag_2', 'date_item_lag_3',\n 'date_item_lag_6', 'date_item_lag_12', 'date_shop_lag_1',\n 'date_shop_lag_2', 'date_shop_lag_3', 'date_shop_lag_6',\n 'date_shop_lag_12', 'date_cat_lag_1']\ncat_features = ['month', 'shop_id','item_id','city_code', 'item_category_id', 'type_code', 'subtype_code']", "_____no_output_____" ] ], [ [ "https://catboost.ai/docs/concepts/parameter-tuning.html\nhttps://catboost.ai/docs/concepts/python-reference_catboostregressor.html", "_____no_output_____" ] ], [ [ "def train_catboost(df):\n '''train a catboost\n '''\n df.sort_values(['date_block_num'], inplace = True)\n x_train = df[df['date_block_num'] < 34]\n y_train = x_train['item_cnt_month'].astype(np.float32)\n test = df[df['date_block_num'] == 34]\n \n folds = TimeSeriesSplit(n_splits = 3) # use TimeSeriesSplit cv\n splits = folds.split(x_train, y_train)\n val_pred = np.zeros(len(x_train))\n test_pred = np.zeros(len(test))\n for fold, (trn_idx, val_idx) in enumerate(splits):\n print(f'Training fold {fold + 1}')\n \n train_set = x_train.iloc[trn_idx][features]\n y_tra = y_train.iloc[trn_idx]\n val_set = x_train.iloc[val_idx][features]\n y_val = y_train.iloc[val_idx]\n\n model = CatBoostRegressor(iterations = 1500,\n learning_rate = 0.01,\n depth = 4,\n loss_function = 'RMSE',\n eval_metric = 'RMSE',\n random_seed = 42,\n bagging_temperature = 0.3,\n od_type = 'Iter',\n metric_period = 50,\n od_wait = 20)\n model.fit(train_set, y_tra, \n eval_set = (val_set, y_val),\n use_best_model = True, \n cat_features = cat_features,\n verbose = 50)\n \n val_pred[val_idx] = model.predict(x_train.iloc[val_idx][features]) # prediction\n test_pred += model.predict(test[features]) / 3 # calculate mean prediction value of 3 models\n print('-' * 50)\n print('\\n')\n \n val_rmse = np.sqrt(metrics.mean_squared_error(y_train, val_pred))\n print('Our out of folds rmse is {:.4f}'.format(val_rmse))\n return test_pred\n\ndef train_lightgbm(df):\n '''train a lightgbm\n '''\n df.sort_values(['date_block_num','shop_id','item_id'], inplace = True)\n x_train = df[df['date_block_num'] < 34]\n y_train = x_train['item_cnt_month'].astype(np.float32)\n test = df[df['date_block_num'] == 34]\n \n folds = TimeSeriesSplit(n_splits = 3) # use TimeSeriesSplit cv\n splits = folds.split(x_train, y_train)\n val_pred = np.zeros(len(x_train))\n test_pred = np.zeros(len(test))\n params = {\n 'boosting_type': 'gbdt',\n 'metric': 'rmse',\n 'objective': 'rmse', # loss function\n 'seed': 225,\n 'learning_rate': 0.07,\n 'lambda': 0.4, # l2 regularization\n #'reg_alpha': 0.4, # l1 regularization\n 'max_depth': 5, # max depth of decision trees\n 'num_leaves': 68, # number of leaves\n 'bagging_fraction': 0.7, # bootstrap sampling\n 'bagging_freq' : 1,\n 'colsample_bytree': 0.7 # feature sampling\n }\n for fold, (trn_idx, val_idx) in enumerate(splits):\n print(f'Training fold {fold + 1}')\n \n train_set = lgb.Dataset(x_train.iloc[trn_idx][features], \n y_train.iloc[trn_idx], \n categorical_feature = cat_features)\n \n val_set = lgb.Dataset(x_train.iloc[val_idx][features], \n y_train.iloc[val_idx], \n categorical_feature = cat_features)\n\n model = lgb.train(params, train_set, \n num_boost_round = 1500, \n early_stopping_rounds = 100, \n valid_sets = [val_set], \n verbose_eval = 50)\n \n val_pred[val_idx] = model.predict(x_train.iloc[val_idx][features]) # prediction\n #test_pred += model.predict(test[features]) / 3 # calculate mean prediction value of 3 models\n print('-' * 50)\n print('\\n')\n test_pred = model.predict(test[features]) \n val_rmse = np.sqrt(metrics.mean_squared_error(y_train, val_pred))\n print('Our out of folds rmse is {:.4f}'.format(val_rmse))\n return test_pred", "_____no_output_____" ], [ "test_pred_lgb = train_lightgbm(df)", "_____no_output_____" ], [ "#test_pred_cat = train_catboost(df)", "_____no_output_____" ], [ "def make_output(test_pred):\n '''make prediction\n '''\n submission = pd.DataFrame({'ID': range(0,len(test_pred)),'item_cnt_month': test_pred.clip(0,20)})\n print(submission.head(15))\n submission.to_csv('../output/submission.csv', index = False)\n #submission\nmake_output(test_pred_lgb)", " ID item_cnt_month\n0 0 0.058111\n1 1 0.200661\n2 2 0.168596\n3 3 0.189572\n4 4 0.051888\n5 5 0.051888\n6 6 0.051888\n7 7 0.051888\n8 8 0.055734\n9 9 0.051888\n10 10 0.055734\n11 11 0.127311\n12 12 0.054265\n13 13 0.083640\n14 14 0.071546\n" ] ], [ [ "```\nIndex 67512848\ndate 67512848\ndate_block_num 8439106\nshop_id 8439106\nitem_id 16878212\nitem_price 33756424\nitem_cnt_day 8439106\ncity_code 8439106\nitem_category_id 8439106\ntype_code 8439106\nsubtype_code 8439106\nlag_1 67512848\ndtype: int64\n```", "_____no_output_____" ] ], [ [ "的\ndf['lag_t1'] = df.groupby(['shop_id','item_id'])['item_cnt_day'].transform(lambda x: x.shift(30))\n\n", "_____no_output_____" ], [ "date = df.groupby(['shop_id','item_id'])['date']\ndate = pd.DataFrame(date)\ndate", "_____no_output_____" ], [ "df['lag_t1_rolling'] = df.groupby(['shop_id','item_id'])['item_cnt_day'].transform(lambda x: x.shift(30).rolling(30).mean())", "_____no_output_____" ], [ "df['lag_3'].isna().value_counts()", "_____no_output_____" ], [ "df['lag_t7'] = df.groupby(['shop_id', 'item_id'])['item_cnt_day'].transform(lambda x: x.shift(7))\ndf\ndf['lag_t7'].isna().value_counts()", "_____no_output_____" ] ], [ [ "测试集是34个月内某些商店和某些物品的乘积。 有5100个商品 * 42个商店 = 214200对。 与训练集相比,有363件新商品。 因此,对于测试集中的大多数项目,目标值应为零。 另一方面,训练集仅包含过去出售或退回的货币对。 主要思想是计算月销售额,并在一个月内将每个唯一对的零销售额扩展为零。 这样,训练数据将类似于测试数据。", "_____no_output_____" ] ], [ [ "df", "_____no_output_____" ], [ "data = pd.read_pickle('../data/data.pkl')\ndata\n# 6639294 rows × 40 columns", "_____no_output_____" ], [ "del data\ngc.collect()", "_____no_output_____" ], [ "data.columns\nfeatures = [\n 'date_block_num',\n 'shop_id',\n 'item_id',\n #'item_cnt_month',\n 'city_code',\n 'item_category_id',\n 'type_code',\n 'subtype_code',\n 'item_cnt_month_lag_1',\n 'item_cnt_month_lag_2',\n 'item_cnt_month_lag_3',\n 'item_cnt_month_lag_6',\n 'item_cnt_month_lag_12',\n 'date_avg_item_cnt_lag_1',\n 'date_item_avg_item_cnt_lag_1',\n 'date_item_avg_item_cnt_lag_2',\n 'date_item_avg_item_cnt_lag_3',\n 'date_item_avg_item_cnt_lag_6',\n 'date_item_avg_item_cnt_lag_12',\n 'date_shop_avg_item_cnt_lag_1',\n 'date_shop_avg_item_cnt_lag_2',\n 'date_shop_avg_item_cnt_lag_3',\n 'date_shop_avg_item_cnt_lag_6',\n 'date_shop_avg_item_cnt_lag_12',\n 'date_cat_avg_item_cnt_lag_1',\n 'date_shop_cat_avg_item_cnt_lag_1',\n 'date_shop_type_avg_item_cnt_lag_1',\n 'date_shop_subtype_avg_item_cnt_lag_1',\n 'date_city_avg_item_cnt_lag_1',\n 'date_item_city_avg_item_cnt_lag_1',\n 'date_type_avg_item_cnt_lag_1',\n 'date_subtype_avg_item_cnt_lag_1',\n 'delta_price_lag',\n 'month',\n 'days',\n 'item_shop_last_sale',\n 'item_last_sale',\n 'item_shop_first_sale',\n 'item_first_sale',\n]\ncat_features = ['date_block_num',\n 'month', \n 'shop_id',\n 'item_id',\n 'city_code',\n 'item_category_id',\n 'type_code', \n 'subtype_code']\n#data\n#data['id'] = data['shop_id'].astype(str) + '_' + test_indicate['item_id'].astype(str)\ndata.sort_values(['date_block_num','shop_id','item_id'],inplace = True)", "_____no_output_____" ], [ "'''\nx_train = data[data['date_block_num'] < 34]\ny_train = x_train['item_cnt_month'].astype(np.float32)\ntest = data[data['date_block_num'] == 34]\n\n#need_to_remove = ['item_cnt_day','city_code','item_category_id',\n # 'type_code','subtype_code', 'shop_id', 'item_id', 'id']\n#features = [i for i in list(df.columns) if i not in need_to_remove]\n#n_fold = 3 #3 for timely purpose of the kernel\nfolds = TimeSeriesSplit(n_splits = 3) # use TimeSeriesSplit cv\nsplits = folds.split(x_train, y_train)\nval_pred = np.zeros(len(x_train))\ntest_pred = np.zeros(len(test))\nfor fold, (trn_idx, val_idx) in enumerate(splits):\n print(f'Training fold {fold + 1}')\n \n train_set = x_train.iloc[trn_idx][features]\n y_tra = y_train.iloc[trn_idx]\n val_set = x_train.iloc[val_idx][features]\n y_val = y_train.iloc[val_idx]\n\n model = CatBoostRegressor(iterations = 500,\n learning_rate = 0.05,\n depth = 6,\n eval_metric = 'RMSE',\n random_seed = 42,\n bagging_temperature = 0.2,\n od_type = 'Iter',\n metric_period = 50,\n od_wait = 20)\n model.fit(train_set, y_tra, \n eval_set = (val_set, y_val),\n use_best_model = True, \n cat_features = cat_features,\n verbose = 50)\n \n val_pred[val_idx] = model.predict(x_train.iloc[val_idx][features]) # prediction\n test_pred += model.predict(test[features]) / 3 # calculate mean prediction value of 3 models\n print('-' * 50)\n print('\\n')\n \nval_rmse = np.sqrt(metrics.mean_squared_error(y_train, val_pred))\nprint('Our out of folds rmse is {:.4f}'.format(val_rmse))\n'''", "_____no_output_____" ], [ "test_pred_lgb = train_lightgbm(data)", "Training fold 1\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
e718536674e92d4977741f5c600a0a9132b4fecc
1,288
ipynb
Jupyter Notebook
Camera_Image_Processing.ipynb
sumit15bt/SuperVisedML
5a368a655b98d541f14b716f4a7c3b8b35edb15d
[ "Apache-2.0" ]
null
null
null
Camera_Image_Processing.ipynb
sumit15bt/SuperVisedML
5a368a655b98d541f14b716f4a7c3b8b35edb15d
[ "Apache-2.0" ]
null
null
null
Camera_Image_Processing.ipynb
sumit15bt/SuperVisedML
5a368a655b98d541f14b716f4a7c3b8b35edb15d
[ "Apache-2.0" ]
null
null
null
20.774194
48
0.51941
[ [ [ "import cv2\n\ncapture=cv2.VideoCapture(0)\nimage1=cv2.imread(\"boy.jpeg\")\nwhile capture.isOpened():\n status,image2=capture.read()\n cv2.imshow(\"Camera Image\",image2)\n image_part=image2[100:200,300:400]\n image1[100:200,300:400]=image_part\n cv2.imshow(\"Pc Image\",image1)\n if cv2.waitKey(1) & 0xFF==ord('q'):\n break\ncv2.destroyAllWindows()\ncapture.release()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
e718551a98499a936cae0e8cf0ac5fc00496f455
45,706
ipynb
Jupyter Notebook
data/Untitled.ipynb
data301-2021-winter1/solo-avaneendrasathivada
ac86e8d937a19dc9900f534a2b851eda99322f91
[ "MIT" ]
null
null
null
data/Untitled.ipynb
data301-2021-winter1/solo-avaneendrasathivada
ac86e8d937a19dc9900f534a2b851eda99322f91
[ "MIT" ]
null
null
null
data/Untitled.ipynb
data301-2021-winter1/solo-avaneendrasathivada
ac86e8d937a19dc9900f534a2b851eda99322f91
[ "MIT" ]
null
null
null
36.978964
337
0.408524
[ [ [ "import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline \n%reload_ext autoreload\n%autoreload 2\n\n", "_____no_output_____" ], [ "df = pd.read_csv(\"startup_funding.csv\")\ndf", "_____no_output_____" ], [ "df = df.copy().drop(['Remarks', 'Sr No'], axis=1)\ndf = df.dropna(axis=0)", "_____no_output_____" ], [ "for i in df[\"Date dd/mm/yyyy\"]:\n if(i.count(\"/\")<2):\n if(i.count(\".\")>0):\n df[df[\"Date dd/mm/yyyy\"]==i] = str(i).replace(\".\",\"/\")\n elif(i.count(\"/\")>0):\n df[df[\"Date dd/mm/yyyy\"]==i] = i[0:5] + \"/\" + i[5:]\n\n", "_____no_output_____" ], [ "df[\"Date\"] = pd.to_datetime(df[\"Date dd/mm/yyyy\"], errors = \"coerce\")\ndf[\"Year\"] = df[\"Date\"].dt.year", "_____no_output_____" ], [ "df=df.rename(columns={\"Amount in USD\":\"Amount\",\"Industry Vertical\":\"Industry_type\",\"City Location\":\"City\"})\ndf.columns=[x.replace(' ','_') for x in df.columns]\ndf[\"Amount\"] = [str(x).replace(',','') for x in df[\"Amount\"]]\ndf[\"Amount\"] = [str(x).replace('.','') for x in df[\"Amount\"]]\ndf[\"Amount\"] = [str(x).replace('+','') for x in df[\"Amount\"]]\nA = df.index[df['Amount'] == \"unknown\"].tolist()\nB = df.index[df['Amount'] == \"Undisclosed\"].tolist()\nC= df.index[df['Amount'] == \"undisclosed\"].tolist()\ndf1 = A + B +C\ndf1", "_____no_output_____" ], [ "df.drop(labels=df1,axis=0,inplace=True)", "_____no_output_____" ], [ "df[\"Year\"] = df[\"Year\"].astype(int)", "_____no_output_____" ], [ "df[\"Industry_type\"].unique()", "_____no_output_____" ], [ "df[\"City__Location\"].unique()", "_____no_output_____" ], [ "df=df.replace(\"Food-Tech\",\"FoodTech\")\ndf=df.replace(\"B2B Marketing\",\"B2B\")\ndf=df.replace(\"B2B Platform\",\"B2B\")\ndf=df.replace(\"B2B-focused foodtech startup\",\"B2B\")\ndf=df.replace(\"Consumer internet\",\"Consumer Internet\")\ndf=df.replace(\"Consumer Technology\",\"Consumer Internet\")\ndf=df.replace(\"Consumer Portal\",\"Consumer Internet\")\ndf=df.replace(\"Consumer Goods\",\"Consumer Internet\")\ndf=df.replace(\"Customer Service Platform\",\"Customer Service\")\ndf=df.replace(\"E-Tech\",\"E-commerce\")\ndf=df.replace(\"ECommerce\",\"E-commerce\")\ndf=df.replace(\"Ecommerce'\",\"E-commerce\")\ndf=df.replace(\"Ed-Tech\",\"E-commerce\")\ndf=df.replace(\"Ed-tech\",\"E-commerce\")\ndf=df.replace(\"EdTech\",\"E-commerce\")\ndf=df.replace(\"Edtech'\",\"E-commerce\")\ndf=df.replace(\"E-Commerce\",\"E-commerce\")\ndf=df.replace(\"Ecommerce\",\"E-commerce\")\ndf=df.replace(\"Edtech\",\"E-commerce\")\ndf=df.replace(\"Deep-Tech\",\"E-commerce\")\ndf=df.replace(\"Fiinance\",\"Finance\")\ndf=df.replace(\"Financial Tech\",\"Finance\")\ndf=df.replace(\"Fintech\",\"Finance\")\ndf=df.replace(\"Fin-Tech\",\"Finance\")\ndf=df.replace(\"Financial Tech\",\"Finance\")\ndf=df.replace(\"FinTech\",\"Finance\")\ndf=df.replace(\"Food\",\"Food & Beverage\")\ndf=df.replace(\"Food & Beverages\",\"Food & Beverage\")\ndf=df.replace(\"FoodTech\",\"Food & Beverage\")\ndf=df.replace(\"Food Tech\",\"Food & Beverage\")\ndf=df.replace(\"Food and Beverages\",\"Food & Beverage\")\ndf=df.replace(\"eCommece\",\"E-commerce\")\ndf=df.replace(\"eCommerce\",\"E-commerce\")\ndf=df.replace(\"ecommerce\",\"E-commerce\")\ndf=df.replace(\"Digital Media\",\"E-commerce\")\ndf=df.replace(\"Food and Beverage\",\"Food & Beverage\")\ndf=df.replace('healthcare','Health and wellness')\ndf=df.replace('Healthcare','Health and wellness')\ndf=df.replace('Health and Wellness','Health and wellness')\ndf=df.replace('Tech', 'Technology')\ndf=df.replace('Travel Tech','Technology')\ndf=df.replace('Software','Technology')\ndf=df.replace('Ecommerce','E-commerce')\ndf=df.replace('Agtech','Agriculture')\ndf=df.replace(\"AI\",'Artificial Intelligence')\ndf=df.replace (\"Automation\",'Automobile')\ndf=df.replace('Automotive','Automobile')\ndf=df.replace(\"Nanotechnology\",'Artificial Intelligence')\ndf=df.replace('Health Care','Health and wellness')\ndf=df.replace('Hospitality','Health and wellness')\ndf=df.replace('Online Education', 'Education')\ndf=df.replace('Logistics Tech','Technology')\ndf=df.replace('Clean-tech','Technology')\ndf=df.replace('FMCG','Technology')\ndf=df.replace('Video','Technology')\ndf=df.replace('Video Games','Technology')\ndf=df.replace('Services Platform','Customer Service')\ndf=df.replace('Transport','Transportation')\ndf=df.replace('SaaS','Others')\ndf=df.replace('Reality','Others')\ndf=df.replace('Last Mile Transportation','Transportation')\ndf=df.replace('Saas','Transportation')\ndf=df.replace('Publishing','Transportation')\ndf=df.replace('SaaS','Others')\ndf=df.replace('Services','Customer Service')\ndf=df.replace('IT', 'Information Technology')\ndf=df.replace('Inspiration', 'Investment')\ndf=df.replace('IoT','Investment')\ndf=df.replace('NBFC','Others')\ndf=df.replace('SaaS','Others')\ndf=df.replace('Social Media','Social Network')\ndf=df.replace('SaaS, Ecommerce','E-commerce')\ndf=df.replace('Online Marketplace','E-commerce')\ndf=df.replace('Luxury Label','Others')\ndf=df.replace('Media', 'Others')\ndf=df.replace('Advertising, Marketing','E-commerce')\ndf=df.replace('Artificial Intelligence','Technology')\ndf=df.replace(\"Bangalore\",\"Bengaluru\")\ndf=df.replace(\"Bangalore/ Bangkok\",\"Bengaluru\")\ndf=df.replace(\"Bangalore / SFO\",\"Bengaluru\")\ndf=df.replace(\"Pune/Seattle\",\"Pune\")\ndf=df.replace(\"New York, Bengaluru\",\"Bengaluru\")\ndf=df.replace(\"Pune / Dubai\",\"Pune\")\ndf=df.replace(\"New Delhi / US\",\"New Delhi\")\ndf=df.replace(\"Mumbai / UK\",\"Mumbai\")\ndf=df.replace(\"SFO / Bangalore\",\"Bengaluru\")\ndf=df.replace(\"Bengaluru and Gurugram\",\"Bengaluru\")\ndf=df.replace('India/US',\"Bengaluru\")\ndf=df.replace('Delhi & Cambridge',\"New Delhi\")\ndf=df.replace('Mumbai/Bengaluru',\"Bengaluru\")\ndf=df.replace(\"India/Singapore\",\"Singapore\")\ndf=df.replace(\"Sinagpore\",\"Singapore\")\n\n", "_____no_output_____" ], [ "len(df[\"Industry_type\"].unique())", "_____no_output_____" ], [ "df.columns=[x.replace(' ','_') for x in df.columns]", "_____no_output_____" ], [ "df2=df[[\"Date\",\"Year\",\"Industry_type\",\"Amount\",\"City__Location\"]]", "_____no_output_____" ], [ "df2", "_____no_output_____" ], [ "df2 = df2[df2.Amount != \"05/07/2018\"]", "_____no_output_____" ], [ "df2.to_csv('mf2.csv')", "_____no_output_____" ], [ "df3= pd.read_csv(\"mf2.csv\")\ndf3", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7185a19dfabd8a0804e45048b92ab6093abaf05
312,828
ipynb
Jupyter Notebook
Assignments/Assignment_6.ipynb
niranjana1997/RIT-DSCI-633-FDS
1e9a70a9fbfae1b87c8e0e9665160eeec44a8403
[ "MIT" ]
null
null
null
Assignments/Assignment_6.ipynb
niranjana1997/RIT-DSCI-633-FDS
1e9a70a9fbfae1b87c8e0e9665160eeec44a8403
[ "MIT" ]
null
null
null
Assignments/Assignment_6.ipynb
niranjana1997/RIT-DSCI-633-FDS
1e9a70a9fbfae1b87c8e0e9665160eeec44a8403
[ "MIT" ]
null
null
null
313.769308
103,490
0.908212
[ [ [ "**Importing required libraries**", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nimport seaborn as sns\nfrom sklearn.cluster import KMeans", "_____no_output_____" ] ], [ [ "**Loading the Dataset**", "_____no_output_____" ] ], [ [ "data = datasets.load_iris()\ndf = pd.DataFrame(data.data, columns = data.feature_names)\ndf['target'] = data.target\n# replacing label classes with numerical value\ndf['label'] = df.target.replace(dict(enumerate(data.target_names)))", "_____no_output_____" ], [ "# prints the first 10 rows in the dataframe\ndf.head(10)", "_____no_output_____" ] ], [ [ "**Checking the correlation between each feature**", "_____no_output_____" ] ], [ [ "# shows correlation between and all features\nplt.figure(1)\nsns.heatmap(df.corr())\nplt.title(\"Correlation of iris classes\") ", "_____no_output_____" ], [ "# statistical summary for the given dataset\ndf.describe() ", "_____no_output_____" ], [ "# Checking for null values\ndf.isnull().sum()", "_____no_output_____" ], [ "# Removing duplicates\ndf = df.drop_duplicates()", "_____no_output_____" ], [ "# Distribution of each class is found\ndf.value_counts('label')", "_____no_output_____" ], [ "''' Comparing petal length with petal width '''\nsns.scatterplot(x = 'petal length (cm)', y = 'petal width (cm)', hue = 'label', data = df)\n'''The plot below shows that 'Setosa' has smaller petal length and petal width\nThe 'Versicolor' lies in the middle with petal length(3 - 5 cm) and petal width (1 - 1.5 cm)\nThe 'Viginica' has larger petal length and petal width\n'''", "_____no_output_____" ], [ "''' Comparing sepal length with sepal width '''\nsns.scatterplot(x = 'sepal length (cm)', y = 'sepal width (cm)', hue = 'label', data = df )\n'''The plot below shows that 'Setosa' has lower sepal length but higher sepal width\nIts harder to comment about the other species\n'''", "_____no_output_____" ] ], [ [ "**Using GMM to separate the clusters using all 4 features**", "_____no_output_____" ] ], [ [ "# GMM is imported\nfrom sklearn.mixture import GaussianMixture\n\n# the model is fit with the iris dataset\ngm = GaussianMixture(3, random_state = 1).fit(data.data)\n\n# predict method is called with dataset\ngm_cluster = gm.predict(data.data)\n\n# prints gm_cluster\nprint(gm_cluster)", "[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 0 2 0 2 0 2\n 2 2 2 0 2 2 2 2 2 0 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0]\n" ], [ "# Report value of np.sum(y_pred==y) / len(y_pred)\nprint(np.sum(gm_cluster) / len(gm_cluster))", "0.9333333333333333\n" ], [ "# make_blobs is imported\nfrom sklearn.datasets import make_blobs\n\n# Values of the parameters are set\nblob_centers = np.array(\n [[ 0.2, 2.3],\n [-1.5 , 2.3],\n [-2.8, 1.8],\n [-2.8, 2.8],\n [-2.8, 1.3]])\nblob_std = np.array([0.4, 0.3, 0.1, 0.1, 0.1])\n\n# generating blobs\nX, y = make_blobs(n_samples = 2000, centers = blob_centers, cluster_std = blob_std)\n\n\n# Plotting blobs\nplt.figure(1)\nplt.title(\"Plotting Blobs\")\nplt.scatter(X[:,0], X[:,1], c = y, edgecolor = 'k')\n\n# Showing the plot\nplt.show()", "_____no_output_____" ] ], [ [ "**K-Means clustering**", "_____no_output_____" ] ], [ [ "from sklearn.cluster import KMeans", "_____no_output_____" ], [ "# training k-means to train on this dataset\nkmeans = KMeans(5)\ny_pred = kmeans.fit_predict(X)", "_____no_output_____" ], [ "# Printing values of centroid\nprint(\"Centroid values: \")\nprint(kmeans.cluster_centers_)", "Centroid values: \n[[-2.80497198 1.30471148]\n [-1.48227869 2.29875792]\n [ 0.21472882 2.26942721]\n [-2.79844179 2.79369933]\n [-2.79554235 1.80300259]]\n" ], [ "# X_new Test set is declared\nX_new = np.array([[0, 2], [3, 2], [-3, 3], [-3, 2.5]])\n\n# Predicting with X_new\nkmeans.predict(X_new)", "_____no_output_____" ], [ "# importing libraries for vornoi\nfrom scipy.spatial import Voronoi, voronoi_plot_2d\n\nval = kmeans.cluster_centers_\n\n# Plotting using Voronoi \nvoronoi = Voronoi(val)\nplt.figure(1)\nvoronoi_plot_2d(voronoi, line_colors='purple', point_size = 20)\nplt.title(\"Voronoi plot using data, centroids, decision boundaries\")\nplt.scatter(X[:,0], X[:,1], c = kmeans.labels_)\n\n# displaying the plot\nplt.show()", "_____no_output_____" ], [ "# The value of K is set to 3\nkmeans_3 = KMeans(3)\ny_pred_3 = kmeans_3.fit_predict(X)\n\n# Printing values of centroid when K = 3\nprint(\"Centroid values when K = 3: \")\nprint(kmeans_3.cluster_centers_)", "Centroid values when K = 3: \n[[-2.1900089 2.55736009]\n [ 0.14766325 2.26323372]\n [-2.80285839 1.55246954]]\n" ], [ "# The value of K is set to 8\nkmeans_8 = KMeans(8)\ny_pred_8 = kmeans_8.fit_predict(X)\n\n# Printing values of centroid when K = 8\nprint(\"Centroid values when K = 8: \")\nprint(kmeans_8.cluster_centers_)", "Centroid values when K = 8: \n[[-2.80497198 1.30471148]\n [ 0.07712767 1.85731924]\n [-2.80018272 2.79392601]\n [-1.71317481 2.18272424]\n [ 0.5897612 2.3478273 ]\n [-1.25757194 2.42672942]\n [-2.80317962 1.8007378 ]\n [-0.0113229 2.58528663]]\n" ], [ "# Print values of inertia when K = 3, K = 5 and K = 8\nprint(\"Inertia when K = 3 : \", kmeans_3.inertia_)\nprint(\"Inertia when K = 5 : \", kmeans.inertia_)\nprint(\"Inertia when K = 8: \", kmeans_8.inertia_)\n\n# plotting the inertia as a function of k\nnum_clusters = []\ninertia = []\nfor i in range(1,11):\n kmeans_iter = KMeans(i)\n kmeans_iter.fit(X)\n num_clusters.append(i)\n inertia.append(kmeans_iter.inertia_)\n \n# printing the values of inertia\nprint(\"\\nValues of inertia when K =\", num_clusters, \":\\n\",inertia)\nprint(\"\\nResult: With the help of the inertia calculated, we can observe that inertia decreases with increase in the value of K. \\nThe decrease is steep when the value of K is small and the steepness gradually decreases\")\n\n# Elbow method\nplt.figure(1)\nplt.plot(num_clusters, inertia, 'bx-')\nplt.xlabel('K')\nplt.ylabel('Inertia')\nplt.title('The Elbow Method')\nplt.show()\n\n\nprint(\"From the graph, it is evident that the optimal elbow value for K is 4.\")\nprint(\"The value of inertia after K = 4 does not change a lot.\")", "Inertia when K = 3 : 634.606726856782\nInertia when K = 5 : 214.3741484421692\nInertia when K = 8: 125.1376156056921\n\nValues of inertia when K = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] :\n [3512.6332288717017, 1138.2690008468387, 634.606726856782, 264.26464932438427, 214.37681898442708, 174.2692677731677, 147.26740483176485, 125.2122293557486, 109.68571448024501, 98.72045688792772]\n\nResult: With the help of the inertia calculated, we can observe that inertia decreases with increase in the value of K. \nThe decrease is steep when the value of K is small and the steepness gradually decreases\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7186a80202da79053050e75fc741e5227e5ab71
586,655
ipynb
Jupyter Notebook
code/spacy_ner_postprocessing.ipynb
k-woitas/e-rara
928a4bcba0a637608183028b98cb4f17e5d13d49
[ "MIT" ]
null
null
null
code/spacy_ner_postprocessing.ipynb
k-woitas/e-rara
928a4bcba0a637608183028b98cb4f17e5d13d49
[ "MIT" ]
null
null
null
code/spacy_ner_postprocessing.ipynb
k-woitas/e-rara
928a4bcba0a637608183028b98cb4f17e5d13d49
[ "MIT" ]
null
null
null
586,655
586,655
0.812355
[ [ [ "# Enable save and load to local machine\nfrom google.colab import files\nimport io", "_____no_output_____" ], [ "# Enable save and load to Google Drive\nfrom google.colab import drive\ndrive.mount('/content/drive', force_remount=True)", "Mounted at /content/drive\n" ], [ "# Import general modules for data processing\nimport string\nimport re\nimport numpy as np\nimport pandas as pd\nprint(\"Succesfully imported necessary modules\")", "Succesfully imported necessary modules\n" ], [ "pd. __version__", "_____no_output_____" ], [ "# Install and import SpaCy library for NLP\n\n!pip install -U spacy==3.0 # to force the latest version\nimport spacy \n!pip install -U pip setuptools wheel\n\n#!python -m pip install --upgrade spacy # 2.3.5 \nprint(\"SpaCy installed and imported successfully\") ", "Collecting spacy==3.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/8b/62/a98c61912ea57344816dd4886ed71e34d8aeec55b79e5bed05a7c2a1ae52/spacy-3.0.0-cp37-cp37m-manylinux2014_x86_64.whl (12.7MB)\n\u001b[K |████████████████████████████████| 12.7MB 273kB/s \n\u001b[?25hCollecting pathy\n Downloading https://files.pythonhosted.org/packages/a2/53/97dc0197cca9357369b3b71bf300896cf2d3604fa60ffaaf5cbc277de7de/pathy-0.4.0-py3-none-any.whl\nCollecting srsly<3.0.0,>=2.4.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/19/54/76982427ceb495dd19ff982c966708c624b85e03c45bf1912feaf60c7b2d/srsly-2.4.0-cp37-cp37m-manylinux2014_x86_64.whl (456kB)\n\u001b[K |████████████████████████████████| 460kB 38.6MB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.7/dist-packages (from spacy==3.0) (1.0.5)\nCollecting thinc<8.1.0,>=8.0.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/e3/08/20e707519bcded1a0caa6fd024b767ac79e4e5d0fb92266bb7dcf735e338/thinc-8.0.2-cp37-cp37m-manylinux2014_x86_64.whl (1.1MB)\n\u001b[K |████████████████████████████████| 1.1MB 15.2MB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: blis<0.8.0,>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from spacy==3.0) (0.4.1)\nRequirement already satisfied, skipping upgrade: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy==3.0) (2.0.5)\nRequirement already satisfied, skipping upgrade: wasabi<1.1.0,>=0.8.1 in /usr/local/lib/python3.7/dist-packages (from spacy==3.0) (0.8.2)\nRequirement already satisfied, skipping upgrade: importlib-metadata>=0.20; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from spacy==3.0) (3.8.1)\nCollecting catalogue<2.1.0,>=2.0.1\n Downloading https://files.pythonhosted.org/packages/48/5c/493a2f3bb0eac17b1d48129ecfd251f0520b6c89493e9fd0522f534a9e4a/catalogue-2.0.1-py3-none-any.whl\nCollecting typer<0.4.0,>=0.3.0\n Downloading https://files.pythonhosted.org/packages/90/34/d138832f6945432c638f32137e6c79a3b682f06a63c488dcfaca6b166c64/typer-0.3.2-py3-none-any.whl\nRequirement already satisfied, skipping upgrade: packaging>=20.0 in /usr/local/lib/python3.7/dist-packages (from spacy==3.0) (20.9)\nCollecting pydantic<1.8.0,>=1.7.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/b3/0a/52ae1c659fc08f13dd7c0ae07b88e4f807ad83fb9954a59b0b0a3d1a8ab6/pydantic-1.7.3-cp37-cp37m-manylinux2014_x86_64.whl (9.1MB)\n\u001b[K |████████████████████████████████| 9.1MB 34.0MB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: setuptools in /usr/local/lib/python3.7/dist-packages (from spacy==3.0) (54.2.0)\nRequirement already satisfied, skipping upgrade: tqdm<5.0.0,>=4.38.0 in /usr/local/lib/python3.7/dist-packages (from spacy==3.0) (4.41.1)\nRequirement already satisfied, skipping upgrade: jinja2 in /usr/local/lib/python3.7/dist-packages (from spacy==3.0) (2.11.3)\nCollecting spacy-legacy<3.1.0,>=3.0.0\n Downloading https://files.pythonhosted.org/packages/78/d8/e25bc7f99877de34def57d36769f0cce4e895b374cdc766718efc724f9ac/spacy_legacy-3.0.2-py2.py3-none-any.whl\nRequirement already satisfied, skipping upgrade: numpy>=1.15.0 in /usr/local/lib/python3.7/dist-packages (from spacy==3.0) (1.19.5)\nRequirement already satisfied, skipping upgrade: requests<3.0.0,>=2.13.0 in /usr/local/lib/python3.7/dist-packages (from spacy==3.0) (2.23.0)\nRequirement already satisfied, skipping upgrade: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy==3.0) (3.0.5)\nRequirement already satisfied, skipping upgrade: typing-extensions>=3.7.4; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from spacy==3.0) (3.7.4.3)\nCollecting smart-open<4.0.0,>=2.2.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/11/9a/ba2d5f67f25e8d5bbf2fcec7a99b1e38428e83cb715f64dd179ca43a11bb/smart_open-3.0.0.tar.gz (113kB)\n\u001b[K |████████████████████████████████| 122kB 41.3MB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=0.20; python_version < \"3.8\"->spacy==3.0) (3.4.1)\nRequirement already satisfied, skipping upgrade: click<7.2.0,>=7.1.1 in /usr/local/lib/python3.7/dist-packages (from typer<0.4.0,>=0.3.0->spacy==3.0) (7.1.2)\nRequirement already satisfied, skipping upgrade: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=20.0->spacy==3.0) (2.4.7)\nRequirement already satisfied, skipping upgrade: MarkupSafe>=0.23 in /usr/local/lib/python3.7/dist-packages (from jinja2->spacy==3.0) (1.1.1)\nRequirement already satisfied, skipping upgrade: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy==3.0) (1.24.3)\nRequirement already satisfied, skipping upgrade: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy==3.0) (2.10)\nRequirement already satisfied, skipping upgrade: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy==3.0) (2020.12.5)\nRequirement already satisfied, skipping upgrade: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy==3.0) (3.0.4)\nBuilding wheels for collected packages: smart-open\n Building wheel for smart-open (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for smart-open: filename=smart_open-3.0.0-cp37-none-any.whl size=107098 sha256=694fc04e425d2cc5c1e26a5447c7fe5a5c1dca4b3341fc4325657e5a717d44d2\n Stored in directory: /root/.cache/pip/wheels/18/88/7c/f06dabd5e9cabe02d2269167bcacbbf9b47d0c0ff7d6ebcb78\nSuccessfully built smart-open\nInstalling collected packages: typer, smart-open, pathy, catalogue, srsly, pydantic, thinc, spacy-legacy, spacy\n Found existing installation: smart-open 4.2.0\n Uninstalling smart-open-4.2.0:\n Successfully uninstalled smart-open-4.2.0\n Found existing installation: catalogue 1.0.0\n Uninstalling catalogue-1.0.0:\n Successfully uninstalled catalogue-1.0.0\n Found existing installation: srsly 1.0.5\n Uninstalling srsly-1.0.5:\n Successfully uninstalled srsly-1.0.5\n Found existing installation: thinc 7.4.0\n Uninstalling thinc-7.4.0:\n Successfully uninstalled thinc-7.4.0\n Found existing installation: spacy 2.2.4\n Uninstalling spacy-2.2.4:\n Successfully uninstalled spacy-2.2.4\nSuccessfully installed catalogue-2.0.1 pathy-0.4.0 pydantic-1.7.3 smart-open-3.0.0 spacy-3.0.0 spacy-legacy-3.0.2 srsly-2.4.0 thinc-8.0.2 typer-0.3.2\nCollecting pip\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/fe/ef/60d7ba03b5c442309ef42e7d69959f73aacccd0d86008362a681c4698e83/pip-21.0.1-py3-none-any.whl (1.5MB)\n\u001b[K |████████████████████████████████| 1.5MB 6.5MB/s \n\u001b[?25hRequirement already up-to-date: setuptools in /usr/local/lib/python3.7/dist-packages (54.2.0)\nRequirement already up-to-date: wheel in /usr/local/lib/python3.7/dist-packages (0.36.2)\nInstalling collected packages: pip\n Found existing installation: pip 19.3.1\n Uninstalling pip-19.3.1:\n Successfully uninstalled pip-19.3.1\nSuccessfully installed pip-21.0.1\nSpaCy installed and imported successfully\n" ], [ "# Download and load German language model\n# '...sm' = small, '...md' = medium, '...lg' = large language model\n\n!python -m spacy download de_core_news_lg \nimport de_core_news_lg\nprint(\"SpaCy language model imported successfully\") ", "2021-04-08 05:41:53.498066: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\nCollecting de-core-news-lg==3.0.0\n Downloading https://github.com/explosion/spacy-models/releases/download/de_core_news_lg-3.0.0/de_core_news_lg-3.0.0-py3-none-any.whl (573.0 MB)\n\u001b[K |████████████████████████████████| 573.0 MB 2.5 kB/s \n\u001b[?25hRequirement already satisfied: spacy<3.1.0,>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from de-core-news-lg==3.0.0) (3.0.0)\nRequirement already satisfied: numpy>=1.15.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (1.19.5)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (54.2.0)\nRequirement already satisfied: blis<0.8.0,>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (0.4.1)\nRequirement already satisfied: typing-extensions>=3.7.4 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (3.7.4.3)\nRequirement already satisfied: wasabi<1.1.0,>=0.8.1 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (0.8.2)\nRequirement already satisfied: spacy-legacy<3.1.0,>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (3.0.2)\nRequirement already satisfied: importlib-metadata>=0.20 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (3.8.1)\nRequirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (20.9)\nRequirement already satisfied: typer<0.4.0,>=0.3.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (0.3.2)\nRequirement already satisfied: pathy in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (0.4.0)\nRequirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (2.0.5)\nRequirement already satisfied: requests<3.0.0,>=2.13.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (2.23.0)\nRequirement already satisfied: srsly<3.0.0,>=2.4.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (2.4.0)\nRequirement already satisfied: catalogue<2.1.0,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (2.0.1)\nRequirement already satisfied: pydantic<1.8.0,>=1.7.1 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (1.7.3)\nRequirement already satisfied: jinja2 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (2.11.3)\nRequirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (1.0.5)\nRequirement already satisfied: thinc<8.1.0,>=8.0.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (8.0.2)\nRequirement already satisfied: tqdm<5.0.0,>=4.38.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (4.41.1)\nRequirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (3.0.5)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=0.20->spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (3.4.1)\nRequirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=20.0->spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (2.4.7)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (2.10)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (2020.12.5)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (3.0.4)\nRequirement already satisfied: click<7.2.0,>=7.1.1 in /usr/local/lib/python3.7/dist-packages (from typer<0.4.0,>=0.3.0->spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (7.1.2)\nRequirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.7/dist-packages (from jinja2->spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (1.1.1)\nRequirement already satisfied: smart-open<4.0.0,>=2.2.0 in /usr/local/lib/python3.7/dist-packages (from pathy->spacy<3.1.0,>=3.0.0->de-core-news-lg==3.0.0) (3.0.0)\nInstalling collected packages: de-core-news-lg\nSuccessfully installed de-core-news-lg-3.0.0\n\u001b[38;5;2m✔ Download and installation successful\u001b[0m\nYou can now load the package via spacy.load('de_core_news_lg')\nSpaCy language model imported successfully\n" ], [ "# Implement the language model\n\nnlp = de_core_news_lg.load() # Alternative: nlp = spacy.load('de_core_news_lg') \nprint(\"SpaCy language model implemented successfully\")", "SpaCy language model implemented successfully\n" ], [ "spacy.info()", "_____no_output_____" ], [ "# load corpus file\ninfile = '/content/drive/My Drive/e_rara_fulltexts/bernensia/corpus_bernensia_ger_unprocessed.csv'\nwith open(infile, 'r') as f: # rb opens file for reading in binary mode\n corpus = pd.read_csv(f, encoding=\"UTF-8\")\ncorpus", "_____no_output_____" ] ], [ [ "### Basic fulltext cleaning ", "_____no_output_____" ] ], [ [ "# VERWORFEN\n# Remove special characters and symbols, EXCLUDING diacritics\n# optionally also remove numbers and/or sentence endings\n\ndef remove_special_chars(text, remove_digits=False, remove_sent_ends=False):\n '''\n Function to remove special characters, numbers and splitlines, but keep diacritic characters.\n See https://www.codetable.net/\n To remove numbers also, set argument 'remove_digits = True'.\n To remove sentence endings (points) also set argument 'remove_sent_ends = True'.\n '''\n if remove_sent_ends:\n pattern = r'[^a-zA-Z0-9\\x7f-\\xff\\s]' if not remove_digits else r'[^a-zA-Z\\x7f-\\xff\\s]'\n else:\n pattern = r'[^a-zA-Z0-9.\\x7f-\\xff\\s]' if not remove_digits else r'[^a-zA-Z.\\x7f-\\xff\\s]'\n text = re.sub(pattern, '', text).splitlines()\n text = ''.join(text) # get rid of line splits\n text = ' '.join(text.split()) # get rid of extra whitespaces\n return text", "_____no_output_____" ], [ "# Preprocess the raw text (remove punctuation)\nimport nltk\nnltk.download('punkt') # \"Punkt\" = standard classifier for sentence segmentation \nfrom nltk import word_tokenize \n\nprint(\"Succesfully imported necessary modules\")\n\ndef remove_punctuation(wordlist):\n punctuation = [',', ';', ':', '(', ')', '[', ']', '{', '}', '\\\"', '\\'','\\'\\'', '\\`', '\\`\\`', '\\-', '«', '»', '£', '\\^', '~', '*', '®', '•', '■', '♦', '§']\n wordlist_stripped = [w for w in wordlist if w not in punctuation]\n wordlist_stripped = [w for w in wordlist if len(w) > 2]\n return wordlist_stripped\n\ndef preprocess_nltk(text):\n wordlist = nltk.word_tokenize(text, language='german')\n wordlist = remove_punctuation(wordlist)\n wordlist = ' '.join(wordlist)\n return wordlist", "[nltk_data] Downloading package punkt to /root/nltk_data...\n[nltk_data] Unzipping tokenizers/punkt.zip.\nSuccesfully imported necessary modules\n" ], [ "corpus['clean_text'] = ''\nfor index in corpus.index:\n clean_text = preprocess_nltk(corpus['fulltext'][index])\n corpus['clean_text'][index] = clean_text\n \ncorpus.head", "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:4: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n after removing the cwd from sys.path.\n" ], [ "corpus.fulltext[133]", "_____no_output_____" ], [ "corpus.clean_text[133]", "_____no_output_____" ], [ "text = nlp(corpus['clean_text'][147])", "_____no_output_____" ], [ "[print(s) for s in text.sents]", "PllWß und Große Räche der Stadt und Reß\npublik\nBern entbieten allen und jeden Unsern lieben und getreuen Bürgern und Angehörigen Stadt und Land Unsern gnädigen und wohlgeneigten Willen und geben ihnen dabey vernehmen\nDemnach\nUns die Ungleichheit vorgestellt worden welche Unsern deutschen Landen bey Verschreibung des Zinses Kaufbriefen und andern das Unterpfandsrecht mit sich führenden Instrumenten beobachtet wird die einten sich hierbey nach dem Inhalt der Satzung des XVI Tit des Theils die andern aber nach Ausweis der Satzung des XXV Tit des Theils der erneuerten Gerichtssatzung verhalten und Wir dabey den großen Nachtheil und Schaden landesvä- terlich beherziget vielen Unserer lieben und geteuen Angehörigen dadurch zufließen dörfte haben Wir Abänderung der obangeregten Satz sowohl für das Vergangene als das Zukünftige erkennt und verordnet\nDaß äußert dem Gültbrief deßenthalb Wir bey denen darauf sich beziehenden Satzungen bewenden lassen sonsten alle mögliche bereits aufgerichtete und noch Zukunft aufzurichtende zinstragende und das Unter- pfandsrecht mit sich führende Instrument wie die immer Namen haben mögen bey dem darinn verschriebenen oder Zukunft verschreibenden Unterpfandsrecht verbleiben und demnach Geldstagen darauf angewiesen und collo- cirt werden sollen obgleich der Zins nicht nach Gült- brief- /-/ XXX/ briefrechtens/ und mithin unter fünf vom Hundert verschrieben seyn wird Welchem nach sich Mäniglich vornemlich aber\nUnsere sämtliche geschworne Schreiber und die Geltsverordnete\nVorfallenheiten verhalten wißen werden Geben Unser Großen Rathsversammlung den May 1776\nKanzler Bern -i-'X-ch- X-tt-Xch-\nch-\n" ] ], [ [ "### SpaCy large german model (de_core_news_lg) NER", "_____no_output_____" ] ], [ [ "text_lengths = [len(corpus['clean_text'][index]) for index in corpus.index]\ntext_lengths = pd.Series(text_lengths)\ntext_lengths.plot()", "_____no_output_____" ], [ "corpus['clean_text_length'] = ''\nfor index in corpus.index:\n corpus['clean_text_length'][index] = len(corpus['clean_text'][index])", "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n" ], [ "corpus.sort_values('clean_text_length', ascending=False)", "_____no_output_____" ], [ "corpus['clean_text'][161][0:100]", "_____no_output_____" ], [ "from nltk import sent_tokenize\ntext = nltk.sent_tokenize(corpus['clean_text'][161])", "_____no_output_____" ], [ "for i in range(0, corpus['clean_text_length'][161], 1000000):\n text = corpus['clean_text'][index][0:i]\n ", "_____no_output_____" ], [ "text_lengths = [len(corpus['clean_text'][index]) for index in corpus.index]\nprint(\"Maximum length of clean texts in corpus: \", max(text_lengths), \" characters\")\nif max(text_lengths) > 1000000:\n nlp.max_length = max(text_lengths) + 1\n print('nlp.max_length increased to {} due to max text length to process.'.format(max(text_lengths) + 1))", "Maximum length of clean texts in corpus: 16820451 characters\nnlp.max_length increased to 16820452 due to max text length to process.\n" ], [ "# Applying large Spacy german model on clean_text and export LOC tags\nfrom nltk import sent_tokenize\nfor index in corpus.index[158:176]: \n text = nltk.sent_tokenize(corpus['clean_text'][index])\n for s in text:\n doc = nlp(s)\n %cd /content/drive/My\\ Drive/e_rara_iob/bernensia/spacy\n id = corpus['e_rara_id'][index]\n outfile = \"./spacy_iob_\" + str(id) + \".txt\"\n with open(outfile, \"a\") as f:\n for token in doc:\n if token.ent_type_=='LOC':\n f.write(str([token.i, token.text, token.ent_iob_, token.ent_type_]))\n %cd /content/\n", "_____no_output_____" ], [ "# Applying large Spacy german model on clean_text and write LOC entities per clean text\nfrom nltk import sent_tokenize\n\ncorpus['spacy'] = ''\nfor index in corpus.index:\n loc_ents = [] \n sentences = nltk.sent_tokenize(corpus['clean_text'][index], language='german')\n for s in sentences:\n doc = nlp(s)\n loc_ents.append([ent.text for ent in doc.ents if ent.label_ == 'LOC'])\n corpus['spacy'][index] = loc_ents\n", "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:10: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n # Remove the CWD from sys.path while we load stuff.\n" ] ], [ [ "#### Small Sample", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "test = 'Ansieht von viel von Büöen. Flach einer 3eicbnung aus der Kieler Lkronik des Verresius, 162b. Das alle viel und seine Umgebung. Erläuternder Text lMLi von Dr. ß. Türler, Staatsarcbivar in Bern. Einleitung. ^)as alte Biel und seine Umgebung“ soll eine Beimatkunöe in Bildern für dos gange Seelanö sein. Es ist gewidmet seinen Bewohnern und seinen freunden, jedem, dessen Wiege in einer der alt- ehrwürdigen Ortschaften stand, jedem, dem das Land zur neuen Beimat geworden ist, oder der es um seiner Vorzüge willen lieh gewonnen hat. Heue Zeiten haDer Name und das Nller der Orlsckafl viel. 4 » bisherigen Versuche, hie Etymologie des T Ortsnamens ßiel herzustellen, sind alle wertlos. Sie mutzten alle zu verkehrten ^ Resultaten führen, weil nicht die ältesten Bamensformen, sondern nur die heutige Form zur Grundlage der Untersuchung gemacht wurden. Sowohl die Erklärung des Verresius in seiner Bieler- Lhronik aus dem lateinischen Bipennae ----- Seil, als auch die neuestens von Dr. Zimmerli (Sprachgrenze etc.) wiederholte Ableitung von Bühl, als endlich die im geographischen Lexikon von Attinger durch Abbe Daucourt ausgestellte ßerleitung vorn gallischen buwil = Beil sind unhaltbar. Die älteste 1142 vorkommende Bamensform Belna ist identisch mit der mittelalterlichen Schreibung des Ortsnamens Beaune (im Coiret, Frankreich), und da als Etymon des letztem Damens Belena, Belenus, der Dame eines keltischen Gottes, festgestellt ist, liegt die Vermutung nahe, dieses auch für den ersten Ortsnamen anzunehmen. Die philologische Untersuchung des ßerrn Dr. Josef Stadelmann, Bibliothekar in Freiburg, bestätigte diese Vermutung des Verfassers genau. Die ältesten romanischen Damensformen für Biel sind: Belna 1142, Bielne 1184, Biene 1218, Beene 1225, Beenna 1233, Biena 1251, Bienna 1260. Sämtliche Glieder dieser Reibe, schreibt ßerr Stadelmann, führen unabweisbar auf die romanische Grundform Beelna zurück und diese seihst mutz zurückgehen auf die vulgärlateinische Form Belena. Damit steht man vor dem keltischen Götternamen Belenus, Belinus. Dieser Gott wurde in verschiedenen keltischen Ländern verehrt, so auch in Gallien. Er war der Bauptgott in Dorikum (Österreich bis Brain). In Aquileja bat man nickt weniger als 22 Inschriften zu seinen Ehren gefunden, wovon 6 seinen Damen als Beinamen des Apollo tragen, dessen Stelle er hei den selten wirklick vertrat. Zwei jener ln- sckriften sind für uns besonders interessant, weil sie uns melden, datz dem Belenus ein Brunnen geweiht war und sick daraus eine direkte Beziehung zu Biel ergiebt. In der Grotte der Brunnenquelle, die sick unmittelbar hinter der Stadt Biel befindet, fand man nämlick im Jahre 1846 nicht weniger als 300 — 400 Kupfermünzen römiscker Baiser, von Julius Eaesar bis Valentinian 1 (4 375). Dieser denkwürdige LDünzfund ist, wie A. John betont bat, ein neuer Beleg für die bekannte heidnische Sitte des Geldspendens an bedeutende Quellen; sie vergegenwärtigt uns ferner die Dauer der römischen Berrsckaft in ßelvetien bis zu deren Verfall und verrät uns endlich die ersten sickern Spuren römiscker Ansiedlung in der nächsten Umgebung, da diese Opferspenden als Gaben des Überflusses von Anwohnern ersckeinen. Wir geben noch einen Schritt weiter, indem wir aus dem Damen Belena erkennen, datz die Quelle dem Belenus geweiht war und die nahe romiscke Ansiedlung hinwiederum den Damen vorn Gatte der Quelle erhalten hat. Auf die hesprockene romaniscke Damensform, die sckon im 13. Jahrhundert zur heutigen Form Bienne geworden ist, gebt auch der deutsche Dame Biel zurück. Die ältesten alamanniscken Sckreihungen sind: de Beine 1160, de Bielno 1179, Bielle 1254, Bein 1259 (auf dem Stadtsiegel), Bieln 1260, Biel 1260, in Biello 1265. Die Alamannen haben wahrscheinlich den Damen übernommen, als im CDunöe der Belveto-Romanen das e der ersten Silbe sckon Doppellaut angenommen hatte. Aus Beelna entstand zunächst Beine, dann, durch Spaltung des e zu ie, Bielna. Der Endvokal a wurde abgeworfen, indem der Dame als Deutrum behandelt und dekliniert wurde, und endlick verlor sick auck das sckwerfällige Scklutz-n. Aus dieser Etymologie ergiebt sick die sckon berührte wicktige latsacke, datz Biel eine romiscke oder romisck-helvetiscke Ansiedlung war und der Dame die Stürme der Völkerwanderung überdauerte, indem ihn die einwandernden Alamannen übernahmen. Jedenfalls darf man aber dabei nur an einen Weiler oder ein Dörfcken des Damens Belna denken; denn die Behauptung, Biel habe sick aus der römiscken Zeit her als selhstständige LDunicipalstadt erhalten, ist von Dr. J. Erni in seiner Dissertation gründlich widerlegt worden. SB 2'", "_____no_output_____" ], [ "test = 'Ansieht von viel von Büöen. Flach einer 3eicbnung aus der Kieler Lkronik des Verresius, 162b. Das alle viel und seine Umgebung. Erläuternder Text lMLi von Dr. ß. Türler, Staatsarcbivar in Bern. Einleitung. ^)as alte Biel und seine Umgebung“ soll eine Beimatkunöe in Bildern für dos gange Seelanö sein. Es ist gewidmet seinen Bewohnern und seinen freunden, jedem, dessen Wiege in einer der alt- ehrwürdigen Ortschaften stand, jedem, dem das Land zur neuen Beimat geworden ist, oder der es um seiner Vorzüge willen lieh gewonnen hat. Heue Zeiten haben neue forderungen und Rufgaben gebracht, und das Rite stürzt in Ruinen. Die Stäöte haben beinahe durchweg die engen segeln gesprengt, die ihnen die Ringmauern umgelegt hatten. Über die ehemaligen Stadtgräben * hinaus dehnen sich die (Zassen aus. Rber auch in den Dörfern fordert der fortschritt gar oft den Ruin des Riten, das seine Verteidiger verloren hat. Es ist hohe Zeit, die Denkmäler aus den Zeiten der Voreltern noch im Bilde durch den Stift des Künstlers festzuhalten, die alten Bilder zu sammeln und den Enkeln zu überliefern. Der größte Raum in diesem Werke, das leicht auf die doppelte oder dreifache OröHe hätte gebracht werden können, kommt der Stadt Biel zu wegen ihrer grötzern Rus- dehnung und Bedeutung. Wenn dieses Verhältnis auch im Texte besteht, so geschieht es deswegen, weil Biel ein reichhaltiges Rrchiv besitzt und der Verfasser dasselbe besser als andere kennt. .A. /U : [■’> ( ! Der Name und das Nller der Orlsckafl viel. 4 » bisherigen Versuche, hie Etymologie des T Ortsnamens ßiel herzustellen, sind alle wertlos. Sie mutzten alle zu verkehrten ^ Resultaten führen, weil nicht die ältesten Bamensformen, sondern nur die heutige Form zur Grundlage der Untersuchung gemacht wurden. Sowohl die Erklärung des Verresius in seiner Bieler- Lhronik aus dem lateinischen Bipennae ----- Seil, als auch die neuestens von Dr. Zimmerli (Sprachgrenze etc.) wiederholte Ableitung von Bühl, als endlich die im geographischen Lexikon von Attinger durch Abbe Daucourt ausgestellte ßerleitung vorn gallischen buwil = Beil sind unhaltbar. Die älteste 1142 vorkommende Bamensform Belna ist identisch mit der mittelalterlichen Schreibung des Ortsnamens Beaune (im Coiret, Frankreich), und da als Etymon des letztem Damens Belena, Belenus, der Dame eines keltischen Gottes, festgestellt ist, liegt die Vermutung nahe, dieses auch für den ersten Ortsnamen anzunehmen. Die philologische Untersuchung des ßerrn Dr. Josef Stadelmann, Bibliothekar in Freiburg, bestätigte diese Vermutung des Verfassers genau. Die ältesten romanischen Damensformen für Biel sind: Belna 1142, Bielne 1184, Biene 1218, Beene 1225, Beenna 1233, Biena 1251, Bienna 1260. Sämtliche Glieder dieser Reibe, schreibt ßerr Stadelmann, führen unabweisbar auf die romanische Grundform Beelna zurück und diese seihst mutz zurückgehen auf die vulgärlateinische Form Belena. Damit steht man vor dem keltischen Götternamen Belenus, Belinus. Dieser Gott wurde in verschiedenen keltischen Ländern verehrt, so auch in Gallien. Er war der Bauptgott in Dorikum (Österreich bis Brain). In Aquileja bat man nickt weniger als 22 Inschriften zu seinen Ehren gefunden, wovon 6 seinen Damen als Beinamen des Apollo tragen, dessen Stelle er hei den selten wirklick vertrat. Zwei jener ln- sckriften sind für uns besonders interessant, weil sie uns melden, datz dem Belenus ein Brunnen geweiht war und sick daraus eine direkte Beziehung zu Biel ergiebt. In der Grotte der Brunnenquelle, die sick unmittelbar hinter der Stadt Biel befindet, fand man nämlick im Jahre 1846 nicht weniger als 300 — 400 Kupfermünzen römiscker Baiser, von Julius Eaesar bis Valentinian 1 (4 375). Dieser denkwürdige LDünzfund ist, wie A. John betont bat, ein neuer Beleg für die bekannte heidnische Sitte des Geldspendens an bedeutende Quellen; sie vergegenwärtigt uns ferner die Dauer der römischen Berrsckaft in ßelvetien bis zu deren Verfall und verrät uns endlich die ersten sickern Spuren römiscker Ansiedlung in der nächsten Umgebung, da diese Opferspenden als Gaben des Überflusses von Anwohnern ersckeinen. Wir geben noch einen Schritt weiter, indem wir aus dem Damen Belena erkennen, datz die Quelle dem Belenus geweiht war und die nahe romiscke Ansiedlung hinwiederum den Damen vorn Gatte der Quelle erhalten hat. Auf die hesprockene romaniscke Damensform, die sckon im 13. Jahrhundert zur heutigen Form Bienne geworden ist, gebt auch der deutsche Dame Biel zurück. Die ältesten alamanniscken Sckreihungen sind: de Beine 1160, de Bielno 1179, Bielle 1254, Bein 1259 (auf dem Stadtsiegel), Bieln 1260, Biel 1260, in Biello 1265. Die Alamannen haben wahrscheinlich den Damen übernommen, als im CDunöe der Belveto-Romanen das e der ersten Silbe sckon Doppellaut angenommen hatte. Aus Beelna entstand zunächst Beine, dann, durch Spaltung des e zu ie, Bielna. Der Endvokal a wurde abgeworfen, indem der Dame als Deutrum behandelt und dekliniert wurde, und endlick verlor sick auck das sckwerfällige Scklutz-n. Aus dieser Etymologie ergiebt sick die sckon berührte wicktige latsacke, datz Biel eine romiscke oder romisck-helvetiscke Ansiedlung war und der Dame die Stürme der Völkerwanderung überdauerte, indem ihn die einwandernden Alamannen übernahmen. Jedenfalls darf man aber dabei nur an einen Weiler oder ein Dörfcken des Damens Belna denken; denn die Behauptung, Biel habe sick aus der römiscken Zeit her als selhstständige LDunicipalstadt erhalten, ist von Dr. J. Erni in seiner Dissertation gründlich widerlegt worden. SB 2'", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ], [ [ "from nltk import sent_tokenize", "_____no_output_____" ], [ "sents = nltk.sent_tokenize(preprocess_nltk(test))\nfor s in sents:\n doc = nlp(s)\n print([[ent.text, ent.label_] for ent in doc.ents if ent.label_ == 'LOC'])", "[['Verresius', 'LOC'], ['Biel', 'LOC'], ['Stadtgräben', 'LOC'], ['Zassen', 'LOC'], ['Rber', 'LOC'], ['Stadt', 'LOC'], ['Biel', 'LOC'], ['Biel', 'LOC'], ['Orlsckafl', 'LOC']]\n[['Bühl', 'LOC'], ['Belna', 'LOC'], ['Beaune Coiret Frankreich', 'LOC'], ['Freiburg', 'LOC'], ['Biel', 'LOC'], ['Belna', 'LOC'], ['Bielne', 'LOC'], ['Beene', 'LOC'], ['Beenna', 'LOC'], ['Biena', 'LOC'], ['Bienna', 'LOC'], ['Gallien', 'LOC'], ['Bauptgott Dorikum Österreich', 'LOC'], ['Biel', 'LOC'], ['Brunnenquelle', 'LOC'], ['Stadt', 'LOC'], ['Biel', 'LOC']]\n[['Dame Biel', 'LOC'], ['Bielno', 'LOC'], ['Bielle', 'LOC'], ['Bieln', 'LOC'], ['Biel', 'LOC'], ['Biello', 'LOC'], ['Beelna', 'LOC'], ['Endvokal', 'LOC'], ['Biel', 'LOC'], ['Biel', 'LOC'], ['LDunicipalstadt', 'LOC']]\n" ], [ "test = 'Die älteste 1142 vorkommende Bamensform Belna ist identisch mit der mittelalterlichen Schreibung des Ortsnamens Beaune (im Coiret, Frankreich), und da als Etymon des letztem Damens Belena, Belenus, der Dame eines keltischen Gottes, festgestellt ist, liegt die Vermutung nahe, dieses auch für den ersten Ortsnamen anzunehmen.'", "_____no_output_____" ], [ "doc = nlp(preprocess_nltk(test))\n[[ent.text, ent.label_] for ent in doc.ents if ent.label_ == 'LOC']", "_____no_output_____" ], [ "corpus[101: 176]", "_____no_output_____" ], [ "# Save corpus dataframe file to local machine and to Google Drive\n\n# local machine\noutfile = \"./corpus_bernensia_ger_LOC_spacy_text.csv\"\nwith open(outfile, \"w\", encoding=\"utf-8\") as f:\n corpus.to_csv(f, index=False, columns=['e_rara_id', 'clean_text_length', 'spacy'])\n files.download(outfile)\nprint(\"Saved to file to local machine.\")\n\n# Google Drive\n%cd /content/drive/My\\ Drive/e_rara_fulltexts/bernensia\noutfile = \"./corpus_bernensia_ger_LOC_spacy_text.csv\"\nwith open(outfile, \"w\") as f:\n corpus.to_csv(f, index=False, columns=['e_rara_id', 'clean_text_length', 'spacy'])\n%cd /content/\nprint(\"Saved to file to Google Drive.\")", "_____no_output_____" ], [ "import glob\n%cd /content/drive/My\\ Drive/e_rara_iob/bernensia/\nfiles = []\nfor file in glob.glob('spacy/*'):\n files.append(file)\nprint(len(files))", "/content/drive/My Drive/e_rara_iob/bernensia\n176\n" ], [ "tokens_loc = []\ntokens_lemmata_loc = []\nent_loc = []\nent_lemmata_loc = []\nfor index in corpus.index[146:147]:\n text = corpus['clean_text'][index]\n doc = nlp(text)\n ent_loc.append([ent.text for ent in doc.ents if ent.label_ == 'LOC'])\n ent_lemmata_loc.append([ent.lemma_ for ent in doc.ents if ent.label_ == 'LOC'])\n tokens_loc.append([token.text for token in doc if token.ent_type_=='LOC'])\n tokens_lemmata_loc.append([token.lemma_ for token in doc if token.ent_type_=='LOC'])\n", "_____no_output_____" ], [ "print(len(tokens_loc[0]))\nprint(len(set(tokens_loc[0])))\nprint(len(ent_lemmata_loc[0]))\nprint(len(set(ent_lemmata_loc[0])))", "50\n40\n42\n33\n" ], [ "ent_lemmata_loc", "_____no_output_____" ], [ "from collections import Counter # Counter.keys() equals to list(set()), Counter.values() counts the elements' frequency\nCounter(ent_lemmata_loc[0])", "_____no_output_____" ], [ "counter = Counter(ent_lemmata_loc[0])", "_____no_output_____" ], [ "counter.most_common(3)", "_____no_output_____" ], [ "counter.items()", "_____no_output_____" ] ], [ [ "### Post processing: spacy", "_____no_output_____" ], [ "#### Clean brackets out: spacy", "_____no_output_____" ] ], [ [ "def remove_brackets_quotes(text):\n brackets = ['[',']', '\\'']\n text_clean = [c for c in text if c not in brackets]\n text_clean = ''.join(text_clean)\n return text_clean", "_____no_output_____" ], [ "# load LOC file\ninfile = '/content/drive/My Drive/e_rara_fulltexts/bernensia/corpus_bernensia_ger_LOC_spacy_text.csv'\nwith open(infile, 'r') as f: \n state = pd.read_csv(f, encoding=\"UTF-8\")", "_____no_output_____" ], [ "state[100:140]", "_____no_output_____" ], [ "test = remove_brackets_quotes(state['spacy'][75])\ntest", "_____no_output_____" ], [ "# remove brackets and quotes form LOCs\n\nstate['spacy_concat'] = ''\nfor i in corpus.index:\n clean_text = remove_brackets_quotes(state['spacy'][i])\n state['spacy_concat'][i] = clean_text", "_____no_output_____" ], [ "state", "_____no_output_____" ], [ "# Google Drive\n%cd /content/drive/My\\ Drive/e_rara_fulltexts/bernensia\noutfile = \"./corpus_bernensia_ger_LOC_spacy_text-full.csv\"\nwith open(outfile, \"w\") as f:\n state.to_csv(f, index=False)\n%cd /content/\nprint(\"Saved to file to Google Drive.\")", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
e718794ad4f4ea927094dbf7e211e8400de0ff3e
16,064
ipynb
Jupyter Notebook
Python_basics/Wide_to_long.ipynb
corybaird/Econometrics
3c2d5f33e0788093bd2dba6513726cdf563d6abe
[ "CC0-1.0" ]
null
null
null
Python_basics/Wide_to_long.ipynb
corybaird/Econometrics
3c2d5f33e0788093bd2dba6513726cdf563d6abe
[ "CC0-1.0" ]
null
null
null
Python_basics/Wide_to_long.ipynb
corybaird/Econometrics
3c2d5f33e0788093bd2dba6513726cdf563d6abe
[ "CC0-1.0" ]
2
2020-11-26T14:41:57.000Z
2022-01-04T07:44:10.000Z
32.192385
145
0.381785
[ [ [ "import pandas as pd \nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "df70 = pd.read_excel('WDI 1970-2017 Annual Data.xlsx')", "_____no_output_____" ], [ "df70.head(1)", "_____no_output_____" ] ], [ [ "# Melt date columns to rows", "_____no_output_____" ] ], [ [ "df= df70.melt(id_vars=['Country Name','Indicator Name','CountryCode_ISO Numeric','Indicator Code'], var_name='Date', value_name='Value')\ndf.Date= pd.to_datetime(df.Date)\ndf.set_index(['Country Name', 'Date'], inplace=True)\ndf.head(3)", "_____no_output_____" ] ], [ [ "# Search for data in column by string", "_____no_output_____" ] ], [ [ "df[df['Indicator Name'].str.contains('consumption')].head(3)", "_____no_output_____" ] ], [ [ "# Search for specific indicator", "_____no_output_____" ] ], [ [ "df= df.sort_index(0)\ndf.loc[df['Indicator Code']=='NE.CON.PRVT.ZS'].head(5)", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nMultiIndex: 2177664 entries, ('Afghanistan', Timestamp('1970-01-01 00:00:00')) to ('Zimbabwe', Timestamp('2017-01-01 00:00:00'))\nData columns (total 4 columns):\n # Column Dtype \n--- ------ ----- \n 0 Indicator Name object \n 1 CountryCode_ISO Numeric int64 \n 2 Indicator Code object \n 3 Value float64\ndtypes: float64(1), int64(1), object(2)\nmemory usage: 72.7+ MB\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
e7188a651c0948c9dd36ef02ccce7778e7af5744
41,691
ipynb
Jupyter Notebook
deep-learnining-specialization/1. neural nets and deep learning/resources/Python Basics With Numpy v3.ipynb
diegocavalca/education
a36b1cfedd1050d642a57b7273a93ad8faf8875b
[ "CC0-1.0" ]
1
2020-10-12T01:35:08.000Z
2020-10-12T01:35:08.000Z
deep-learnining-specialization/1. neural nets and deep learning/resources/Python Basics With Numpy v3.ipynb
diegocavalca/Studies
a36b1cfedd1050d642a57b7273a93ad8faf8875b
[ "CC0-1.0" ]
null
null
null
deep-learnining-specialization/1. neural nets and deep learning/resources/Python Basics With Numpy v3.ipynb
diegocavalca/Studies
a36b1cfedd1050d642a57b7273a93ad8faf8875b
[ "CC0-1.0" ]
1
2022-01-18T11:01:49.000Z
2022-01-18T11:01:49.000Z
34.771476
922
0.510398
[ [ [ "# Python Basics with Numpy (optional assignment)\n\nWelcome to your first assignment. This exercise gives you a brief introduction to Python. Even if you've used Python before, this will help familiarize you with functions we'll need. \n\n**Instructions:**\n- You will be using Python 3.\n- Avoid using for-loops and while-loops, unless you are explicitly told to do so.\n- Do not modify the (# GRADED FUNCTION [function name]) comment in some cells. Your work would not be graded if you change this. Each cell containing that comment should only contain one function.\n- After coding your function, run the cell right below it to check if your result is correct.\n\n**After this assignment you will:**\n- Be able to use iPython Notebooks\n- Be able to use numpy functions and numpy matrix/vector operations\n- Understand the concept of \"broadcasting\"\n- Be able to vectorize code\n\nLet's get started!", "_____no_output_____" ], [ "## About iPython Notebooks ##\n\niPython Notebooks are interactive coding environments embedded in a webpage. You will be using iPython notebooks in this class. You only need to write code between the ### START CODE HERE ### and ### END CODE HERE ### comments. After writing your code, you can run the cell by either pressing \"SHIFT\"+\"ENTER\" or by clicking on \"Run Cell\" (denoted by a play symbol) in the upper bar of the notebook. \n\nWe will often specify \"(≈ X lines of code)\" in the comments to tell you about how much code you need to write. It is just a rough estimate, so don't feel bad if your code is longer or shorter.\n\n**Exercise**: Set test to `\"Hello World\"` in the cell below to print \"Hello World\" and run the two cells below.", "_____no_output_____" ] ], [ [ "### START CODE HERE ### (≈ 1 line of code)\ntest = 'Hello World'\n### END CODE HERE ###", "_____no_output_____" ], [ "print (\"test: \" + test)", "test: Hello World\n" ] ], [ [ "**Expected output**:\ntest: Hello World", "_____no_output_____" ], [ "<font color='blue'>\n**What you need to remember**:\n- Run your cells using SHIFT+ENTER (or \"Run cell\")\n- Write code in the designated areas using Python 3 only\n- Do not modify the code outside of the designated areas", "_____no_output_____" ], [ "## 1 - Building basic functions with numpy ##\n\nNumpy is the main package for scientific computing in Python. It is maintained by a large community (www.numpy.org). In this exercise you will learn several key numpy functions such as np.exp, np.log, and np.reshape. You will need to know how to use these functions for future assignments.\n\n### 1.1 - sigmoid function, np.exp() ###\n\nBefore using np.exp(), you will use math.exp() to implement the sigmoid function. You will then see why np.exp() is preferable to math.exp().\n\n**Exercise**: Build a function that returns the sigmoid of a real number x. Use math.exp(x) for the exponential function.\n\n**Reminder**:\n$sigmoid(x) = \\frac{1}{1+e^{-x}}$ is sometimes also known as the logistic function. It is a non-linear function used not only in Machine Learning (Logistic Regression), but also in Deep Learning.\n\n<img src=\"images/Sigmoid.png\" style=\"width:500px;height:228px;\">\n\nTo refer to a function belonging to a specific package you could call it using package_name.function(). Run the code below to see an example with math.exp().", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: basic_sigmoid\n\nimport math\n\ndef basic_sigmoid(x):\n \"\"\"\n Compute sigmoid of x.\n\n Arguments:\n x -- A scalar\n\n Return:\n s -- sigmoid(x)\n \"\"\"\n \n ### START CODE HERE ### (≈ 1 line of code)\n s = 1. / (1. + math.exp(-x))\n ### END CODE HERE ###\n \n return s", "_____no_output_____" ], [ "basic_sigmoid(3)", "_____no_output_____" ] ], [ [ "**Expected Output**: \n<table style = \"width:40%\">\n <tr>\n <td>** basic_sigmoid(3) **</td> \n <td>0.9525741268224334 </td> \n </tr>\n\n</table>", "_____no_output_____" ], [ "Actually, we rarely use the \"math\" library in deep learning because the inputs of the functions are real numbers. In deep learning we mostly use matrices and vectors. This is why numpy is more useful. ", "_____no_output_____" ] ], [ [ "### One reason why we use \"numpy\" instead of \"math\" in Deep Learning ###\nx = [1, 2, 3]\nbasic_sigmoid(x) # you will see this give an error when you run it, because x is a vector.", "_____no_output_____" ] ], [ [ "In fact, if $ x = (x_1, x_2, ..., x_n)$ is a row vector then $np.exp(x)$ will apply the exponential function to every element of x. The output will thus be: $np.exp(x) = (e^{x_1}, e^{x_2}, ..., e^{x_n})$", "_____no_output_____" ] ], [ [ "import numpy as np\n\n# example of np.exp\nx = np.array([1, 2, 3])\nprint(np.exp(x)) # result is (exp(1), exp(2), exp(3))", "[ 2.71828183 7.3890561 20.08553692]\n" ] ], [ [ "Furthermore, if x is a vector, then a Python operation such as $s = x + 3$ or $s = \\frac{1}{x}$ will output s as a vector of the same size as x.", "_____no_output_____" ] ], [ [ "# example of vector operation\nx = np.array([1, 2, 3])\nprint (x + 3)", "[4 5 6]\n" ] ], [ [ "Any time you need more info on a numpy function, we encourage you to look at [the official documentation](https://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.exp.html). \n\nYou can also create a new cell in the notebook and write `np.exp?` (for example) to get quick access to the documentation.\n\n**Exercise**: Implement the sigmoid function using numpy. \n\n**Instructions**: x could now be either a real number, a vector, or a matrix. The data structures we use in numpy to represent these shapes (vectors, matrices...) are called numpy arrays. You don't need to know more for now.\n$$ \\text{For } x \\in \\mathbb{R}^n \\text{, } sigmoid(x) = sigmoid\\begin{pmatrix}\n x_1 \\\\\n x_2 \\\\\n ... \\\\\n x_n \\\\\n\\end{pmatrix} = \\begin{pmatrix}\n \\frac{1}{1+e^{-x_1}} \\\\\n \\frac{1}{1+e^{-x_2}} \\\\\n ... \\\\\n \\frac{1}{1+e^{-x_n}} \\\\\n\\end{pmatrix}\\tag{1} $$", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: sigmoid\n\nimport numpy as np # this means you can access numpy functions by writing np.function() instead of numpy.function()\n\ndef sigmoid(x):\n \"\"\"\n Compute the sigmoid of x\n\n Arguments:\n x -- A scalar or numpy array of any size\n\n Return:\n s -- sigmoid(x)\n \"\"\"\n \n ### START CODE HERE ### (≈ 1 line of code)\n s = 1. / (1. + np.exp(-x))\n ### END CODE HERE ###\n \n return s", "_____no_output_____" ], [ "x = np.array([1, 2, 3])\nsigmoid(x)", "_____no_output_____" ] ], [ [ "**Expected Output**: \n<table>\n <tr> \n <td> **sigmoid([1,2,3])**</td> \n <td> array([ 0.73105858, 0.88079708, 0.95257413]) </td> \n </tr>\n</table> \n", "_____no_output_____" ], [ "### 1.2 - Sigmoid gradient\n\nAs you've seen in lecture, you will need to compute gradients to optimize loss functions using backpropagation. Let's code your first gradient function.\n\n**Exercise**: Implement the function sigmoid_grad() to compute the gradient of the sigmoid function with respect to its input x. The formula is: $$sigmoid\\_derivative(x) = \\sigma'(x) = \\sigma(x) (1 - \\sigma(x))\\tag{2}$$\nYou often code this function in two steps:\n1. Set s to be the sigmoid of x. You might find your sigmoid(x) function useful.\n2. Compute $\\sigma'(x) = s(1-s)$", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: sigmoid_derivative\n\ndef sigmoid_derivative(x):\n \"\"\"\n Compute the gradient (also called the slope or derivative) of the sigmoid function with respect to its input x.\n You can store the output of the sigmoid function into variables and then use it to calculate the gradient.\n \n Arguments:\n x -- A scalar or numpy array\n\n Return:\n ds -- Your computed gradient.\n \"\"\"\n \n ### START CODE HERE ### (≈ 2 lines of code)\n s = 1. / (1. + np.exp(-x))\n ds = s * (1 - s)\n ### END CODE HERE ###\n \n return ds", "_____no_output_____" ], [ "x = np.array([1, 2, 3])\nprint (\"sigmoid_derivative(x) = \" + str(sigmoid_derivative(x)))", "sigmoid_derivative(x) = [ 0.19661193 0.10499359 0.04517666]\n" ] ], [ [ "**Expected Output**: \n\n\n<table>\n <tr> \n <td> **sigmoid_derivative([1,2,3])**</td> \n <td> [ 0.19661193 0.10499359 0.04517666] </td> \n </tr>\n</table> \n\n", "_____no_output_____" ], [ "### 1.3 - Reshaping arrays ###\n\nTwo common numpy functions used in deep learning are [np.shape](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.shape.html) and [np.reshape()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.reshape.html). \n- X.shape is used to get the shape (dimension) of a matrix/vector X. \n- X.reshape(...) is used to reshape X into some other dimension. \n\nFor example, in computer science, an image is represented by a 3D array of shape $(length, height, depth = 3)$. However, when you read an image as the input of an algorithm you convert it to a vector of shape $(length*height*3, 1)$. In other words, you \"unroll\", or reshape, the 3D array into a 1D vector.\n\n<img src=\"images/image2vector_kiank.png\" style=\"width:500px;height:300;\">\n\n**Exercise**: Implement `image2vector()` that takes an input of shape (length, height, 3) and returns a vector of shape (length\\*height\\*3, 1). For example, if you would like to reshape an array v of shape (a, b, c) into a vector of shape (a*b,c) you would do:\n``` python\nv = v.reshape((v.shape[0]*v.shape[1], v.shape[2])) # v.shape[0] = a ; v.shape[1] = b ; v.shape[2] = c\n```\n- Please don't hardcode the dimensions of image as a constant. Instead look up the quantities you need with `image.shape[0]`, etc. ", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: image2vector\ndef image2vector(image):\n \"\"\"\n Argument:\n image -- a numpy array of shape (length, height, depth)\n \n Returns:\n v -- a vector of shape (length*height*depth, 1)\n \"\"\"\n \n ### START CODE HERE ### (≈ 1 line of code)\n v = image.reshape((image.shape[0]*image.shape[1]*image.shape[2], 1))\n ### END CODE HERE ###\n \n return v", "_____no_output_____" ], [ "# This is a 3 by 3 by 2 array, typically images will be (num_px_x, num_px_y,3) where 3 represents the RGB values\nimage = np.array([[[ 0.67826139, 0.29380381],\n [ 0.90714982, 0.52835647],\n [ 0.4215251 , 0.45017551]],\n\n [[ 0.92814219, 0.96677647],\n [ 0.85304703, 0.52351845],\n [ 0.19981397, 0.27417313]],\n\n [[ 0.60659855, 0.00533165],\n [ 0.10820313, 0.49978937],\n [ 0.34144279, 0.94630077]]])\n\nprint (\"image2vector(image) = \" + str(image2vector(image)))", "image2vector(image) = [[ 0.67826139]\n [ 0.29380381]\n [ 0.90714982]\n [ 0.52835647]\n [ 0.4215251 ]\n [ 0.45017551]\n [ 0.92814219]\n [ 0.96677647]\n [ 0.85304703]\n [ 0.52351845]\n [ 0.19981397]\n [ 0.27417313]\n [ 0.60659855]\n [ 0.00533165]\n [ 0.10820313]\n [ 0.49978937]\n [ 0.34144279]\n [ 0.94630077]]\n" ] ], [ [ "**Expected Output**: \n\n\n<table style=\"width:100%\">\n <tr> \n <td> **image2vector(image)** </td> \n <td> [[ 0.67826139]\n [ 0.29380381]\n [ 0.90714982]\n [ 0.52835647]\n [ 0.4215251 ]\n [ 0.45017551]\n [ 0.92814219]\n [ 0.96677647]\n [ 0.85304703]\n [ 0.52351845]\n [ 0.19981397]\n [ 0.27417313]\n [ 0.60659855]\n [ 0.00533165]\n [ 0.10820313]\n [ 0.49978937]\n [ 0.34144279]\n [ 0.94630077]]</td> \n </tr>\n \n \n</table>", "_____no_output_____" ], [ "### 1.4 - Normalizing rows\n\nAnother common technique we use in Machine Learning and Deep Learning is to normalize our data. It often leads to a better performance because gradient descent converges faster after normalization. Here, by normalization we mean changing x to $ \\frac{x}{\\| x\\|} $ (dividing each row vector of x by its norm).\n\nFor example, if $$x = \n\\begin{bmatrix}\n 0 & 3 & 4 \\\\\n 2 & 6 & 4 \\\\\n\\end{bmatrix}\\tag{3}$$ then $$\\| x\\| = np.linalg.norm(x, axis = 1, keepdims = True) = \\begin{bmatrix}\n 5 \\\\\n \\sqrt{56} \\\\\n\\end{bmatrix}\\tag{4} $$and $$ x\\_normalized = \\frac{x}{\\| x\\|} = \\begin{bmatrix}\n 0 & \\frac{3}{5} & \\frac{4}{5} \\\\\n \\frac{2}{\\sqrt{56}} & \\frac{6}{\\sqrt{56}} & \\frac{4}{\\sqrt{56}} \\\\\n\\end{bmatrix}\\tag{5}$$ Note that you can divide matrices of different sizes and it works fine: this is called broadcasting and you're going to learn about it in part 5.\n\n\n**Exercise**: Implement normalizeRows() to normalize the rows of a matrix. After applying this function to an input matrix x, each row of x should be a vector of unit length (meaning length 1).", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: normalizeRows\n\ndef normalizeRows(x):\n \"\"\"\n Implement a function that normalizes each row of the matrix x (to have unit length).\n \n Argument:\n x -- A numpy matrix of shape (n, m)\n \n Returns:\n x -- The normalized (by row) numpy matrix. You are allowed to modify x.\n \"\"\"\n \n ### START CODE HERE ### (≈ 2 lines of code)\n # Compute x_norm as the norm 2 of x. Use np.linalg.norm(..., ord = 2, axis = ..., keepdims = True)\n x_norm = np.linalg.norm(x, axis=1, keepdims=True)\n \n # Divide x by its norm.\n x = x / x_norm\n ### END CODE HERE ###\n\n return x", "_____no_output_____" ], [ "x = np.array([\n [0, 3, 4],\n [1, 6, 4]])\nprint(\"normalizeRows(x) = \" + str(normalizeRows(x)))", "normalizeRows(x) = [[ 0. 0.6 0.8 ]\n [ 0.13736056 0.82416338 0.54944226]]\n" ] ], [ [ "**Expected Output**: \n\n<table style=\"width:60%\">\n\n <tr> \n <td> **normalizeRows(x)** </td> \n <td> [[ 0. 0.6 0.8 ]\n [ 0.13736056 0.82416338 0.54944226]]</td> \n </tr>\n \n \n</table>", "_____no_output_____" ], [ "**Note**:\nIn normalizeRows(), you can try to print the shapes of x_norm and x, and then rerun the assessment. You'll find out that they have different shapes. This is normal given that x_norm takes the norm of each row of x. So x_norm has the same number of rows but only 1 column. So how did it work when you divided x by x_norm? This is called broadcasting and we'll talk about it now! ", "_____no_output_____" ], [ "### 1.5 - Broadcasting and the softmax function ####\nA very important concept to understand in numpy is \"broadcasting\". It is very useful for performing mathematical operations between arrays of different shapes. For the full details on broadcasting, you can read the official [broadcasting documentation](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).", "_____no_output_____" ], [ "**Exercise**: Implement a softmax function using numpy. You can think of softmax as a normalizing function used when your algorithm needs to classify two or more classes. You will learn more about softmax in the second course of this specialization.\n\n**Instructions**:\n- $ \\text{for } x \\in \\mathbb{R}^{1\\times n} \\text{, } softmax(x) = softmax(\\begin{bmatrix}\n x_1 &&\n x_2 &&\n ... &&\n x_n \n\\end{bmatrix}) = \\begin{bmatrix}\n \\frac{e^{x_1}}{\\sum_{j}e^{x_j}} &&\n \\frac{e^{x_2}}{\\sum_{j}e^{x_j}} &&\n ... &&\n \\frac{e^{x_n}}{\\sum_{j}e^{x_j}} \n\\end{bmatrix} $ \n\n- $\\text{for a matrix } x \\in \\mathbb{R}^{m \\times n} \\text{, $x_{ij}$ maps to the element in the $i^{th}$ row and $j^{th}$ column of $x$, thus we have: }$ $$softmax(x) = softmax\\begin{bmatrix}\n x_{11} & x_{12} & x_{13} & \\dots & x_{1n} \\\\\n x_{21} & x_{22} & x_{23} & \\dots & x_{2n} \\\\\n \\vdots & \\vdots & \\vdots & \\ddots & \\vdots \\\\\n x_{m1} & x_{m2} & x_{m3} & \\dots & x_{mn}\n\\end{bmatrix} = \\begin{bmatrix}\n \\frac{e^{x_{11}}}{\\sum_{j}e^{x_{1j}}} & \\frac{e^{x_{12}}}{\\sum_{j}e^{x_{1j}}} & \\frac{e^{x_{13}}}{\\sum_{j}e^{x_{1j}}} & \\dots & \\frac{e^{x_{1n}}}{\\sum_{j}e^{x_{1j}}} \\\\\n \\frac{e^{x_{21}}}{\\sum_{j}e^{x_{2j}}} & \\frac{e^{x_{22}}}{\\sum_{j}e^{x_{2j}}} & \\frac{e^{x_{23}}}{\\sum_{j}e^{x_{2j}}} & \\dots & \\frac{e^{x_{2n}}}{\\sum_{j}e^{x_{2j}}} \\\\\n \\vdots & \\vdots & \\vdots & \\ddots & \\vdots \\\\\n \\frac{e^{x_{m1}}}{\\sum_{j}e^{x_{mj}}} & \\frac{e^{x_{m2}}}{\\sum_{j}e^{x_{mj}}} & \\frac{e^{x_{m3}}}{\\sum_{j}e^{x_{mj}}} & \\dots & \\frac{e^{x_{mn}}}{\\sum_{j}e^{x_{mj}}}\n\\end{bmatrix} = \\begin{pmatrix}\n softmax\\text{(first row of x)} \\\\\n softmax\\text{(second row of x)} \\\\\n ... \\\\\n softmax\\text{(last row of x)} \\\\\n\\end{pmatrix} $$", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: softmax\n\ndef softmax(x):\n \"\"\"Calculates the softmax for each row of the input x.\n\n Your code should work for a row vector and also for matrices of shape (n, m).\n\n Argument:\n x -- A numpy matrix of shape (n,m)\n\n Returns:\n s -- A numpy matrix equal to the softmax of x, of shape (n,m)\n \"\"\"\n \n ### START CODE HERE ### (≈ 3 lines of code)\n # Apply exp() element-wise to x. Use np.exp(...).\n x_exp = np.exp(x)\n\n # Create a vector x_sum that sums each row of x_exp. Use np.sum(..., axis = 1, keepdims = True).\n x_sum = np.sum(x_exp, axis=1, keepdims=True)\n \n # Compute softmax(x) by dividing x_exp by x_sum. It should automatically use numpy broadcasting.\n s = x_exp / x_sum\n\n ### END CODE HERE ###\n \n return s", "_____no_output_____" ], [ "x = np.array([\n [9, 2, 5, 0, 0],\n [7, 5, 0, 0 ,0]])\nprint(\"softmax(x) = \" + str(softmax(x)))", "softmax(x) = [[ 9.80897665e-01 8.94462891e-04 1.79657674e-02 1.21052389e-04\n 1.21052389e-04]\n [ 8.78679856e-01 1.18916387e-01 8.01252314e-04 8.01252314e-04\n 8.01252314e-04]]\n" ] ], [ [ "**Expected Output**:\n\n<table style=\"width:60%\">\n\n <tr> \n <td> **softmax(x)** </td> \n <td> [[ 9.80897665e-01 8.94462891e-04 1.79657674e-02 1.21052389e-04\n 1.21052389e-04]\n [ 8.78679856e-01 1.18916387e-01 8.01252314e-04 8.01252314e-04\n 8.01252314e-04]]</td> \n </tr>\n</table>\n", "_____no_output_____" ], [ "**Note**:\n- If you print the shapes of x_exp, x_sum and s above and rerun the assessment cell, you will see that x_sum is of shape (2,1) while x_exp and s are of shape (2,5). **x_exp/x_sum** works due to python broadcasting.\n\nCongratulations! You now have a pretty good understanding of python numpy and have implemented a few useful functions that you will be using in deep learning.", "_____no_output_____" ], [ "<font color='blue'>\n**What you need to remember:**\n- np.exp(x) works for any np.array x and applies the exponential function to every coordinate\n- the sigmoid function and its gradient\n- image2vector is commonly used in deep learning\n- np.reshape is widely used. In the future, you'll see that keeping your matrix/vector dimensions straight will go toward eliminating a lot of bugs. \n- numpy has efficient built-in functions\n- broadcasting is extremely useful", "_____no_output_____" ], [ "## 2) Vectorization", "_____no_output_____" ], [ "\nIn deep learning, you deal with very large datasets. Hence, a non-computationally-optimal function can become a huge bottleneck in your algorithm and can result in a model that takes ages to run. To make sure that your code is computationally efficient, you will use vectorization. For example, try to tell the difference between the following implementations of the dot/outer/elementwise product.", "_____no_output_____" ] ], [ [ "import time\n\nx1 = [9, 2, 5, 0, 0, 7, 5, 0, 0, 0, 9, 2, 5, 0, 0]\nx2 = [9, 2, 2, 9, 0, 9, 2, 5, 0, 0, 9, 2, 5, 0, 0]\n\n### CLASSIC DOT PRODUCT OF VECTORS IMPLEMENTATION ###\ntic = time.process_time()\ndot = 0\nfor i in range(len(x1)):\n dot+= x1[i]*x2[i]\ntoc = time.process_time()\nprint (\"dot = \" + str(dot) + \"\\n ----- Computation time = \" + str(1000*(toc - tic)) + \"ms\")\n\n### CLASSIC OUTER PRODUCT IMPLEMENTATION ###\ntic = time.process_time()\nouter = np.zeros((len(x1),len(x2))) # we create a len(x1)*len(x2) matrix with only zeros\nfor i in range(len(x1)):\n for j in range(len(x2)):\n outer[i,j] = x1[i]*x2[j]\ntoc = time.process_time()\nprint (\"outer = \" + str(outer) + \"\\n ----- Computation time = \" + str(1000*(toc - tic)) + \"ms\")\n\n### CLASSIC ELEMENTWISE IMPLEMENTATION ###\ntic = time.process_time()\nmul = np.zeros(len(x1))\nfor i in range(len(x1)):\n mul[i] = x1[i]*x2[i]\ntoc = time.process_time()\nprint (\"elementwise multiplication = \" + str(mul) + \"\\n ----- Computation time = \" + str(1000*(toc - tic)) + \"ms\")\n\n### CLASSIC GENERAL DOT PRODUCT IMPLEMENTATION ###\nW = np.random.rand(3,len(x1)) # Random 3*len(x1) numpy array\ntic = time.process_time()\ngdot = np.zeros(W.shape[0])\nfor i in range(W.shape[0]):\n for j in range(len(x1)):\n gdot[i] += W[i,j]*x1[j]\ntoc = time.process_time()\nprint (\"gdot = \" + str(gdot) + \"\\n ----- Computation time = \" + str(1000*(toc - tic)) + \"ms\")", "dot = 278\n ----- Computation time = 0.18839100000000997ms\nouter = [[ 81. 18. 18. 81. 0. 81. 18. 45. 0. 0. 81. 18. 45. 0.\n 0.]\n [ 18. 4. 4. 18. 0. 18. 4. 10. 0. 0. 18. 4. 10. 0.\n 0.]\n [ 45. 10. 10. 45. 0. 45. 10. 25. 0. 0. 45. 10. 25. 0.\n 0.]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0.]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0.]\n [ 63. 14. 14. 63. 0. 63. 14. 35. 0. 0. 63. 14. 35. 0.\n 0.]\n [ 45. 10. 10. 45. 0. 45. 10. 25. 0. 0. 45. 10. 25. 0.\n 0.]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0.]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0.]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0.]\n [ 81. 18. 18. 81. 0. 81. 18. 45. 0. 0. 81. 18. 45. 0.\n 0.]\n [ 18. 4. 4. 18. 0. 18. 4. 10. 0. 0. 18. 4. 10. 0.\n 0.]\n [ 45. 10. 10. 45. 0. 45. 10. 25. 0. 0. 45. 10. 25. 0.\n 0.]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0.]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0.]]\n ----- Computation time = 0.33141300000005813ms\nelementwise multiplication = [ 81. 4. 10. 0. 0. 63. 10. 0. 0. 0. 81. 4. 25. 0. 0.]\n ----- Computation time = 0.4855219999999605ms\ngdot = [ 29.45227617 21.66828484 24.83024852]\n ----- Computation time = 0.5867550000000499ms\n" ], [ "x1 = [9, 2, 5, 0, 0, 7, 5, 0, 0, 0, 9, 2, 5, 0, 0]\nx2 = [9, 2, 2, 9, 0, 9, 2, 5, 0, 0, 9, 2, 5, 0, 0]\n\n### VECTORIZED DOT PRODUCT OF VECTORS ###\ntic = time.process_time()\ndot = np.dot(x1,x2)\ntoc = time.process_time()\nprint (\"dot = \" + str(dot) + \"\\n ----- Computation time = \" + str(1000*(toc - tic)) + \"ms\")\n\n### VECTORIZED OUTER PRODUCT ###\ntic = time.process_time()\nouter = np.outer(x1,x2)\ntoc = time.process_time()\nprint (\"outer = \" + str(outer) + \"\\n ----- Computation time = \" + str(1000*(toc - tic)) + \"ms\")\n\n### VECTORIZED ELEMENTWISE MULTIPLICATION ###\ntic = time.process_time()\nmul = np.multiply(x1,x2)\ntoc = time.process_time()\nprint (\"elementwise multiplication = \" + str(mul) + \"\\n ----- Computation time = \" + str(1000*(toc - tic)) + \"ms\")\n\n### VECTORIZED GENERAL DOT PRODUCT ###\ntic = time.process_time()\ndot = np.dot(W,x1)\ntoc = time.process_time()\nprint (\"gdot = \" + str(dot) + \"\\n ----- Computation time = \" + str(1000*(toc - tic)) + \"ms\")", "dot = 278\n ----- Computation time = 0.16289599999996796ms\nouter = [[81 18 18 81 0 81 18 45 0 0 81 18 45 0 0]\n [18 4 4 18 0 18 4 10 0 0 18 4 10 0 0]\n [45 10 10 45 0 45 10 25 0 0 45 10 25 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [63 14 14 63 0 63 14 35 0 0 63 14 35 0 0]\n [45 10 10 45 0 45 10 25 0 0 45 10 25 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [81 18 18 81 0 81 18 45 0 0 81 18 45 0 0]\n [18 4 4 18 0 18 4 10 0 0 18 4 10 0 0]\n [45 10 10 45 0 45 10 25 0 0 45 10 25 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]]\n ----- Computation time = 0.141723000000038ms\nelementwise multiplication = [81 4 10 0 0 63 10 0 0 0 81 4 25 0 0]\n ----- Computation time = 0.11734799999985057ms\ngdot = [ 29.45227617 21.66828484 24.83024852]\n ----- Computation time = 0.18823800000022928ms\n" ] ], [ [ "As you may have noticed, the vectorized implementation is much cleaner and more efficient. For bigger vectors/matrices, the differences in running time become even bigger. \n\n**Note** that `np.dot()` performs a matrix-matrix or matrix-vector multiplication. This is different from `np.multiply()` and the `*` operator (which is equivalent to `.*` in Matlab/Octave), which performs an element-wise multiplication.", "_____no_output_____" ], [ "### 2.1 Implement the L1 and L2 loss functions\n\n**Exercise**: Implement the numpy vectorized version of the L1 loss. You may find the function abs(x) (absolute value of x) useful.\n\n**Reminder**:\n- The loss is used to evaluate the performance of your model. The bigger your loss is, the more different your predictions ($ \\hat{y} $) are from the true values ($y$). In deep learning, you use optimization algorithms like Gradient Descent to train your model and to minimize the cost.\n- L1 loss is defined as:\n$$\\begin{align*} & L_1(\\hat{y}, y) = \\sum_{i=0}^m|y^{(i)} - \\hat{y}^{(i)}| \\end{align*}\\tag{6}$$", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: L1\n\ndef L1(yhat, y):\n \"\"\"\n Arguments:\n yhat -- vector of size m (predicted labels)\n y -- vector of size m (true labels)\n \n Returns:\n loss -- the value of the L1 loss function defined above\n \"\"\"\n \n ### START CODE HERE ### (≈ 1 line of code)\n loss = np.sum(np.absolute(yhat - y))\n ### END CODE HERE ###\n \n return loss", "_____no_output_____" ], [ "yhat = np.array([.9, 0.2, 0.1, .4, .9])\ny = np.array([1, 0, 0, 1, 1])\nprint(\"L1 = \" + str(L1(yhat,y)))", "L1 = 1.1\n" ] ], [ [ "**Expected Output**:\n\n<table style=\"width:20%\">\n\n <tr> \n <td> **L1** </td> \n <td> 1.1 </td> \n </tr>\n</table>\n", "_____no_output_____" ], [ "**Exercise**: Implement the numpy vectorized version of the L2 loss. There are several way of implementing the L2 loss but you may find the function np.dot() useful. As a reminder, if $x = [x_1, x_2, ..., x_n]$, then `np.dot(x,x)` = $\\sum_{j=0}^n x_j^{2}$. \n\n- L2 loss is defined as $$\\begin{align*} & L_2(\\hat{y},y) = \\sum_{i=0}^m(y^{(i)} - \\hat{y}^{(i)})^2 \\end{align*}\\tag{7}$$", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: L2\n\ndef L2(yhat, y):\n \"\"\"\n Arguments:\n yhat -- vector of size m (predicted labels)\n y -- vector of size m (true labels)\n \n Returns:\n loss -- the value of the L2 loss function defined above\n \"\"\"\n \n ### START CODE HERE ### (≈ 1 line of code)\n loss = np.sum((yhat - y) ** 2)\n ### END CODE HERE ###\n \n return loss", "_____no_output_____" ], [ "yhat = np.array([.9, 0.2, 0.1, .4, .9])\ny = np.array([1, 0, 0, 1, 1])\nprint(\"L2 = \" + str(L2(yhat,y)))", "L2 = 0.43\n" ] ], [ [ "**Expected Output**: \n<table style=\"width:20%\">\n <tr> \n <td> **L2** </td> \n <td> 0.43 </td> \n </tr>\n</table>", "_____no_output_____" ], [ "Congratulations on completing this assignment. We hope that this little warm-up exercise helps you in the future assignments, which will be more exciting and interesting!", "_____no_output_____" ], [ "<font color='blue'>\n**What to remember:**\n- Vectorization is very important in deep learning. It provides computational efficiency and clarity.\n- You have reviewed the L1 and L2 loss.\n- You are familiar with many numpy functions such as np.sum, np.dot, np.multiply, np.maximum, etc...", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
e7189b426b48ac6d9af4c08e2d7df6c25128ccc5
132,993
ipynb
Jupyter Notebook
RCB VS KXIP/Player Analysis RCB VS KXIP.ipynb
tacklesta/WPL
b062aa043c62b429c1eb071f43e68b76114abbf1
[ "MIT" ]
null
null
null
RCB VS KXIP/Player Analysis RCB VS KXIP.ipynb
tacklesta/WPL
b062aa043c62b429c1eb071f43e68b76114abbf1
[ "MIT" ]
null
null
null
RCB VS KXIP/Player Analysis RCB VS KXIP.ipynb
tacklesta/WPL
b062aa043c62b429c1eb071f43e68b76114abbf1
[ "MIT" ]
null
null
null
50.072666
23,996
0.62674
[ [ [ "# Player Analysis", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Data Cleaning And Exploring", "_____no_output_____" ] ], [ [ "df1 = pd.read_csv(\"matches.csv\")\ndf2 = pd.read_csv(\"deliveries.csv\")\n\ndf1.rename(columns={\"id\" : 'match_id'}, inplace=True)\n\nmatches= pd.merge(df1, df2, on='match_id', how='outer')", "_____no_output_____" ], [ "matches.columns", "_____no_output_____" ], [ "matches = matches[['match_id', 'season','team1', 'team2', 'toss_winner','toss_decision','winner',\n 'inning', 'batting_team', 'bowling_team', 'over', 'ball',\n 'batsman','non_striker', 'bowler','wide_runs','noball_runs',\n 'batsman_runs', 'extra_runs', 'total_runs', 'player_dismissed',\n 'dismissal_kind']]\n\nmatches.shape", "_____no_output_____" ], [ "matches.head()", "_____no_output_____" ] ], [ [ "### Considering Only KXIP VS RCB Matches", "_____no_output_____" ] ], [ [ "PB=matches[np.logical_or(np.logical_and(matches['team1']=='Royal Challengers Bangalore',matches['team2']=='Kings XI Punjab'),\n np.logical_and(matches['team2']=='Royal Challengers Bangalore',matches['team1']=='Kings XI Punjab'))]", "_____no_output_____" ], [ "PB.head()", "_____no_output_____" ] ], [ [ "Taking in consideration only last 7 years matches because using too old data for analysis might not help that much while predicting Cricket matches result :", "_____no_output_____" ] ], [ [ "cond1 = PB[\"season\"] == 2013\ncond2 = PB[\"season\"] == 2014\ncond3 = PB[\"season\"] == 2015\ncond4 = PB[\"season\"] == 2016\ncond5 = PB[\"season\"] == 2017\ncond6 = PB[\"season\"] == 2018\ncond7 = PB[\"season\"] == 2019\nfinal = PB[cond1|cond2|cond3|cond4|cond5|cond6|cond7]\n\nfinal.head()", "_____no_output_____" ], [ "final.season.unique()", "_____no_output_____" ], [ "final.batsman.unique()", "_____no_output_____" ] ], [ [ "## Wicket fall Analysis of KXIP", "_____no_output_____" ] ], [ [ "Each_team_wicket_fall = pd.DataFrame(final.groupby(\"bowling_team\").player_dismissed.count())\n\nEach_team_wicket_fall.columns=[\"Total wicket fall\"]\n\nprint(\"Wicket fall down for each team (overall) :\")\nEach_team_wicket_fall", "Wicket fall down for each team (overall) :\n" ], [ "wicket = final[final[\"bowling_team\"] == \"Royal Challengers Bangalore\"]\nKXIP_wickets = pd.DataFrame(wicket.groupby(['season',\"match_id\",\"bowling_team\"]).agg({'player_dismissed' : 'count'}))\nprint(\"KXIP wickets fall down against RCB in all matches :\")\nKXIP_wickets.columns = [\"Total Wicket Fall of KXIP\"]\nKXIP_wickets", "KXIP wickets fall down against RCB in all matches :\n" ], [ "sns.boxplot(KXIP_wickets[\"Total Wicket Fall of KXIP\"])", "_____no_output_____" ], [ "count = final.match_id.unique()\ncount = len(count)\nwicket = final[final[\"bowling_team\"] == \"Royal Challengers Bangalore\"]\nwkt = wicket[\"player_dismissed\"].count()\nprint(\"Total wickets fall of KXIP against RCB = {} in {} matches\".format(wkt ,count))", "Total wickets fall of KXIP against RCB = 89 in 14 matches\n" ], [ "per_match_wicket = wkt/count\nper_match_wicket = round(per_match_wicket)\nprint(\"On an average , Per match wicket fall down for KXIP :\",per_match_wicket)", "On an average , Per match wicket fall down for KXIP : 6.0\n" ] ], [ [ "### Confidence Interval Calculation :", "_____no_output_____" ] ], [ [ "KXIP_wickets.describe().T", "_____no_output_____" ], [ "mean = KXIP_wickets[\"Total Wicket Fall of KXIP\"].mean()\nsd = KXIP_wickets[\"Total Wicket Fall of KXIP\"].std()\nn = len(KXIP_wickets)\n\nn", "_____no_output_____" ], [ "tstar = 2.064\nse = sd/np.sqrt(n)\nse", "_____no_output_____" ], [ "lcb = mean - tstar * se\nucb = mean + tstar * se\nlcb = round(lcb)\nucb = round(ucb)\n\nprint(\"So , our 95% Confidence Interval for Total Wicket that can be fall today of KXIP :{}\".format((lcb, ucb)))", "So , our 95% Confidence Interval for Total Wicket that can be fall today of KXIP :(5.0, 8.0)\n" ] ], [ [ "## Partnership Analysis", "_____no_output_____" ], [ "Taking in consideration only those columns where batsman and non striker are AB de Villers and V Kohli for getting the data related to their partnership :", "_____no_output_____" ] ], [ [ "AV =final[np.logical_or(np.logical_and(final['batsman']=='V Kohli',final['non_striker']=='AB de Villiers'),\n np.logical_and(final['non_striker']=='V Kohli',final['batsman']=='AB de Villiers'))]", "_____no_output_____" ], [ "AV.head()", "_____no_output_____" ], [ "AV.season.unique()", "_____no_output_____" ], [ "AV.match_id.unique()", "_____no_output_____" ], [ "partnership = pd.DataFrame(AV.groupby([\"season\",\"match_id\"])[\"total_runs\"].sum())\n\npartnership.columns = [\"Partnership b/w ABD and VK\"]\n\nprint(\"Season wise partnership of ABD and VK:\")\n\npartnership", "Season wise partnership of ABD and VK:\n" ], [ "partnership.describe().T", "_____no_output_____" ], [ "print(\"Average no of run's partnership beween AB and VK in RCB VS KXIP matches :\",round(partnership[\"Partnership b/w ABD and VK\"].mean()))", "Average no of run's partnership beween AB and VK in RCB VS KXIP matches : 22\n" ] ], [ [ "### Inning wise Partnership Analysis\n", "_____no_output_____" ], [ "#### Overall partnership against all the teams (inning wise) in last 2 years :", "_____no_output_____" ] ], [ [ "VKAB =matches[np.logical_or(np.logical_and(matches['batsman']=='V Kohli',matches['non_striker']=='AB de Villiers'),\n np.logical_and(matches['non_striker']=='V Kohli',matches['batsman']=='AB de Villiers'))]", "_____no_output_____" ], [ "cond6 = VKAB[\"season\"] == 2018\ncond7 = VKAB[\"season\"] == 2019\nFill = VKAB[cond6|cond7]", "_____no_output_____" ], [ "inning_wise_partnership_overall = pd.DataFrame(Fill.groupby([\"season\",\"match_id\",\"inning\"])[\"total_runs\"].sum())\n\ninning_wise_partnership_overall", "_____no_output_____" ] ], [ [ "#### Parntership against KXIP bowlers :", "_____no_output_____" ] ], [ [ "inning_wise_partnership = pd.DataFrame(AV.groupby([\"season\",\"match_id\",\"inning\"])[\"total_runs\"].sum())\n\ninning_wise_partnership", "_____no_output_____" ] ], [ [ "### Partnership Breakdown by KXIP Bowlers", "_____no_output_____" ] ], [ [ "partnership_dismiss = AV[(AV[\"player_dismissed\"]==\"V Kohli\") | (AV[\"player_dismissed\"]==\"AB de Villiers\")]", "_____no_output_____" ], [ "dismissal_record = pd.DataFrame(partnership_dismiss.groupby([\"season\",\"match_id\",\"bowler\",\"batsman\"])[\"player_dismissed\"].count())\n\ndismissal_record.columns = [\"Player dismissal count\"] \n\nprint(\"Partnership breakdown by :\")\ndismissal_record", "Partnership breakdown by :\n" ] ], [ [ "From here we can see that out of their 5 last partnerships majority times the partnership is broken by the pacers.", "_____no_output_____" ] ], [ [ "sns.distplot(partnership[\"Partnership b/w ABD and VK\"])", "_____no_output_____" ] ], [ [ "From the plot above of their last 5 partnerships we can see that their partnership either broke down early when less then 20 runs or otherwise it goes for a long hunt. ", "_____no_output_____" ], [ "So, from all the above analysis related to their average partnership runs, dismissal, dismissal time , inning wise partnership etc we can predict that if both these batsman stayed long on crease(20+ runs initially) then they are expected to have a big partnership together.", "_____no_output_____" ], [ "## Sixes count analysis", "_____no_output_____" ] ], [ [ "six = final[final[\"batsman_runs\"]==6]", "_____no_output_____" ], [ "per_match_sixes = pd.DataFrame(six.groupby(\"match_id\")[\"batsman_runs\"].count())\nper_match_sixes.columns = [\"six count\"]\n\nper_match_sixes", "_____no_output_____" ], [ "sns.boxplot(per_match_sixes[\"six count\"])", "_____no_output_____" ], [ "print(\"Average no of sixes scored in RCB VS KXIP matches :\",round(per_match_sixes[\"six count\"].mean()))", "Average no of sixes scored in RCB VS KXIP matches : 13\n" ] ], [ [ "### Confidence Interval Calculation :", "_____no_output_____" ] ], [ [ "per_match_sixes.describe().T", "_____no_output_____" ], [ "mean = per_match_sixes[\"six count\"].mean()\nsd = per_match_sixes[\"six count\"].std()\nn = len(per_match_sixes)\n\nn", "_____no_output_____" ], [ "tstar = 2.064\nse = sd/np.sqrt(n)\nse", "_____no_output_____" ], [ "lcb = mean - tstar * se\nucb = mean + tstar * se\nlcb = round(lcb)\nucb = round(ucb)\n\nprint(\"So , our 95% Confidence Interval for Total sixes that can be hit in today's match :{}\".format((lcb, ucb)))", "So , our 95% Confidence Interval for Total sixes that can be hit in today's match :(9.0, 16.0)\n" ] ], [ [ "### No ball Analysis\n", "_____no_output_____" ] ], [ [ "nbcount = final[final[\"noball_runs\"] >0]", "_____no_output_____" ], [ "nbcount.noball_runs.value_counts()", "_____no_output_____" ], [ "runs_from_noballs = nbcount.noball_runs.sum()\n\nprint(\"Runs from no ball delivery (overall) :\",runs_from_noballs)", "Runs from no ball delivery (overall) : 17\n" ], [ "no_balls_per_match = pd.DataFrame(nbcount.groupby(\"match_id\")[\"noball_runs\"].count())\n\nno_balls_per_match.columns = [\"noball count\"]\n\n\nprint(\"No balls bowled in RCB VS KXIP matches :\")\nno_balls_per_match", "No balls bowled in RCB VS KXIP matches :\n" ], [ "sns.boxplot(no_balls_per_match[\"noball count\"])", "_____no_output_____" ], [ "count = final.match_id.unique()\ncount = len(count)\n\ntotal_no_balls = nbcount.noball_runs.count()\n\nAvg_no_ball = total_no_balls/count\n\nprint(\"On an average , No Balls bowled in RCB VS KXIP matches (overall) :\",round(Avg_no_ball))", "On an average , No Balls bowled in RCB VS KXIP matches (overall) : 1.0\n" ], [ "plt.figure(figsize = (18,9))\nsns.countplot(nbcount['noball_runs'],palette='Set2',hue=nbcount['bowling_team'])\nplt.title('Noball balled by KXIP and RCB',fontsize=15)\nplt.yticks(fontsize=15)\nplt.xticks(fontsize=15)\nplt.xlabel('Noball runs',fontsize=15)\nplt.ylabel('count',fontsize=15)\nplt.legend(loc=1,fontsize=15)\n\nplt.show()", "_____no_output_____" ] ], [ [ "So, from the analysis of no balls we see that there are 9 matches out of 14 matches where 1 or more no balls were bowled and KXIP bowlers have bowled more no balls as campared to RCB bowlers.\n\nAlso in an average we found that 1 no ball is bowled per match .\n\nSo based on all these information we can estimate that for this match also 0 -1 noball are expected to be bowled.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
e7189cce7b8d045fe7e9daa1cf8a5083685382a6
22,872
ipynb
Jupyter Notebook
CIFAR100/main.ipynb
Irek21/DeltaEncoder
463e46bf7cbe99b243d01f1585d48031e9cd20be
[ "BSD-3-Clause" ]
null
null
null
CIFAR100/main.ipynb
Irek21/DeltaEncoder
463e46bf7cbe99b243d01f1585d48031e9cd20be
[ "BSD-3-Clause" ]
null
null
null
CIFAR100/main.ipynb
Irek21/DeltaEncoder
463e46bf7cbe99b243d01f1585d48031e9cd20be
[ "BSD-3-Clause" ]
null
null
null
29.360719
134
0.512111
[ [ [ "import torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nimport torchvision.models as models\n\nimport numpy as np\n\nfrom PIL import Image\n\nimport pickle as pkl\n\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")", "_____no_output_____" ] ], [ [ "# Data parser", "_____no_output_____" ] ], [ [ "class BatchLoader():\n def __init__(self, features, labels):\n self.features = features\n self.reference_features = self.random_pairs(features, labels)\n self.labels = labels\n \n def random_pairs(self, X, labels):\n Y = X.copy()\n for l in range(labels.shape[1]):\n inds = np.where(labels[:, l])[0]\n inds_pairs = np.random.permutation(inds)\n Y[inds, :] = X[inds_pairs, :]\n return Y\n \n def batch_load(self, start, end):\n if start == 0:\n idx = np.r_[:self.features.shape[0]]\n np.random.shuffle(idx)\n self.features = self.features[idx]\n self.reference_features = self.reference_features[idx]\n self.labels = self.labels[idx]\n \n if end > self.features.shape[0]:\n end = self.features.shape[0]\n \n return self.features[start:end], self.reference_features[start:end], self.labels[start:end]", "_____no_output_____" ], [ "# features_train = np.load('Data/DEdata/features_train.npy').astype('float32')\n# labels_train = np.load('Data/DEdata/labels_train.npy').astype('float32')\nloader = BatchLoader(features_train, labels_train)", "_____no_output_____" ], [ "features_train = np.zeros((80, 600, 2048), dtype=np.float32)\nfor i in range(80):\n with open('Data/PickledClasses/' + str(i), 'rb') as f:\n data = pkl.load(f)\n features_train[i] = data", "_____no_output_____" ], [ "np.save('Data/DEdata/features_train.npy', features_train.reshape(-1, 2048))", "_____no_output_____" ] ], [ [ "# Meta-learning models", "_____no_output_____" ] ], [ [ "class DeltaEncoder(nn.Module):\n def __init__(self, input_size=2048, hidden_size=8192, neck_size=16):\n encoder = nn.Sequential(\n nn.Linear(input_size * 2, hidden_size),\n nn.LeakyReLU(0.2),\n nn.Dropout(0.5),\n \n nn.Linear(hidden_size, neck_size),\n )\n \n decoder = nn.Sequential(\n nn.Linear(input_size + neck_size, hidden_size),\n nn.LeakyReLU(0.2),\n nn.Dropout(0.5),\n \n nn.Linear(hidden_size, input_size),\n )\n dropout = nn.Dropout(0.5)\n \n super(DeltaEncoder, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n self.dropout = dropout\n\n def forward(self, X1, X2):\n out = self.dropout(X1)\n out = torch.cat((out, X2), dim=1)\n out = self.encoder(out)\n \n out = torch.cat((X2, out), dim=1)\n out = self.decoder(out)\n return out", "_____no_output_____" ], [ "G = DeltaEncoder(2048, 512, 8).to(device)", "_____no_output_____" ] ], [ [ "# Meta-learning phase", "_____no_output_____" ] ], [ [ "MAE = nn.L1Loss(reduction='none')\nMSE = nn.MSELoss(reduction='none')\ndef weighted_MAE(predict, target):\n batch_size = predict.shape[0]\n feature_size = predict.shape[1]\n\n substract_norm = MSE(predict, target)\n L2_norms = torch.sum(substract_norm, dim=1) + 10e-7\n weights = substract_norm / L2_norms.reshape((batch_size, 1)).expand((batch_size, feature_size))\n\n substract = MAE(predict, target)\n losses = torch.sum(substract * weights, dim=1)\n loss = torch.mean(losses)\n return loss", "_____no_output_____" ], [ "# optimizer = torch.optim.Adam(G.parameters(), lr=10e-5)\noptimizer = torch.optim.Adam(G.parameters(), lr=10e-5)", "_____no_output_____" ], [ "batch_size = 512\ntrain_size = 48000\n\nfor epoch in range(2):\n for i in range(train_size // batch_size):\n features, reference_features, labels = loader.batch_load(i * batch_size, (i + 1) * batch_size)\n features = torch.tensor(features, device=device, dtype=torch.float32, requires_grad=False)\n reference_features = torch.tensor(reference_features, device=device, dtype=torch.float32, requires_grad=False)\n predict = G(features, reference_features)\n \n loss = weighted_MAE(predict, features)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n if (i % 10 == 0):\n print('Epoch {} Loss {}'.format(epoch, loss.detach()))", "Epoch 0 Loss 3.8619093894958496\nEpoch 0 Loss 3.8883285522460938\nEpoch 0 Loss 3.8175785541534424\nEpoch 0 Loss 3.8877413272857666\nEpoch 0 Loss 3.918534278869629\nEpoch 0 Loss 3.9437286853790283\nEpoch 0 Loss 3.9527225494384766\nEpoch 0 Loss 3.923792600631714\nEpoch 0 Loss 3.873991012573242\nEpoch 0 Loss 3.881967544555664\nEpoch 1 Loss 3.905362606048584\nEpoch 1 Loss 3.903092861175537\nEpoch 1 Loss 3.920227289199829\nEpoch 1 Loss 3.8377187252044678\nEpoch 1 Loss 3.878110885620117\nEpoch 1 Loss 3.9147236347198486\nEpoch 1 Loss 3.88032603263855\nEpoch 1 Loss 3.8578286170959473\nEpoch 1 Loss 3.87811541557312\nEpoch 1 Loss 3.854017734527588\n" ] ], [ [ "# Generation & storing new samples", "_____no_output_____" ] ], [ [ "class DeltaEncoderGenerator(nn.Module):\n def __init__(self, input_size=2048, hidden_size=8192, neck_size=16):\n encoder = nn.Sequential(\n nn.Linear(input_size * 2, hidden_size),\n nn.LeakyReLU(0.2),\n nn.Dropout(0.5),\n \n nn.Linear(hidden_size, neck_size),\n )\n \n decoder = nn.Sequential(\n nn.Linear(input_size + neck_size, hidden_size),\n nn.LeakyReLU(0.2),\n nn.Dropout(0.5),\n \n nn.Linear(hidden_size, input_size),\n )\n dropout = nn.Dropout(0.5)\n \n super(DeltaEncoderGenerator, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n self.dropout = dropout\n\n def forward(self, X1, X2, shot):\n out = self.dropout(X1)\n out = torch.cat((out, X2), dim=1)\n out = self.encoder(out)\n \n out = torch.cat((shot, out), dim=1)\n out = self.decoder(out)\n return out", "_____no_output_____" ], [ "G_trained = DeltaEncoderGenerator(2048, 512, 8).to(device)\nG_trained.load_state_dict(G.state_dict())", "_____no_output_____" ], [ "num_shots = 1\nepisode = torch.zeros(1, 5, num_shots, 2048, device=device, requires_grad=False)\n\nfor i in range(5):\n with open('Data/PickledClasses/' + str(95 + i), 'rb') as f:\n data = pkl.load(f)\n \n shot_numbers = np.random.randint(0, 600, size=num_shots)\n episode[0][i][:num_shots] = data[shot_numbers]\n \nepisode.shape", "_____no_output_____" ], [ "# store samples to cpu!\n\nbatch_size = 128\ngen_size = 1024\ntrain_size = 48000\nclass_data = torch.zeros(gen_size, 2048, device=device, dtype=torch.float32, requires_grad=False)\n\nfor class_num in range(5):\n indices = np.random.randint(low=0, high=train_size // batch_size, size=gen_size // batch_size)\n j = 0\n for i in indices:\n features, reference_features, labels = loader.batch_load(i * batch_size, (i + 1) * batch_size)\n features = torch.tensor(features, device=device, dtype=torch.float32, requires_grad=False)\n reference_features = torch.tensor(reference_features, device=device, dtype=torch.float32, requires_grad=False)\n \n shot = episode[0][class_num].expand(batch_size, 2048)\n class_data[j * batch_size:(j + 1) * batch_size] = G_trained(features, reference_features, shot).detach()\n j += 1\n \n with open('Data/DEFeatures/' + str(class_num), 'wb') as f:\n pkl.dump(class_data, f)\n print('Class {} processed.'.format(class_num))", "Class 0 processed.\nClass 1 processed.\nClass 2 processed.\nClass 3 processed.\nClass 4 processed.\n" ], [ "batch_size = 128\nclass_size = 1024\ntrain_size = 48000\npack_features = np.zeros((5, 2, 1024, 2048), dtype=np.float32)\n\ntotal_indices = np.random.permutation(train_size // batch_size)\nfor class_num in range(5):\n indices = total_indices[class_num * (class_size // batch_size):(class_num + 1) * (class_size // batch_size)]\n j = 0\n for i in indices:\n features, reference_features, labels = loader.batch_load(i * batch_size, (i + 1) * batch_size)\n pack_features[class_num][0][j * batch_size:(j + 1) * batch_size] = features\n pack_features[class_num][1][j * batch_size:(j + 1) * batch_size] = reference_features\n j += 1\n \nwith open('Data/SynthMaterial/0', 'wb') as f:\n pkl.dump({'features': pack_features}, f)\nprint('Package processed.')", "Package processed.\n" ], [ "pack_features.shape", "_____no_output_____" ] ], [ [ "# Training target classyfier", "_____no_output_____" ] ], [ [ "class BatchLoader():\n def __init__(self, class_size, num_classes, first_class, batch_size, batches_in_buff, path):\n self.class_size = class_size\n self.num_classes = num_classes\n self.first_class = first_class\n self.batch_size = batch_size\n self.batches_in_buff = batches_in_buff\n self.path = path\n \n self.indices = np.random.permutation(num_classes * class_size)\n self.buff_size = batches_in_buff * batch_size\n self.buff = [{'label': 0, 'features': torch.zeros(2048, device=device)} for i in range(self.buff_size)]\n self.buff_num = 0\n \n def buff_gen(self, buff_num):\n buff_indices = self.indices[buff_num * self.buff_size:(buff_num + 1) * self.buff_size]\n\n for i in range(self.num_classes):\n with open(self.path + str(self.first_class + i), 'rb') as f:\n class_data = pkl.load(f)\n\n class_indices = np.where(((buff_indices < (i + 1) * self.class_size) & (buff_indices >= i * self.class_size)))[0]\n for j in class_indices:\n self.buff[j] = {\n 'label': i,\n 'features': class_data[buff_indices[j] % self.class_size]\n }\n \n def batch_load(self, i):\n buff_i = i % self.batches_in_buff\n if (buff_i == 0):\n self.buff_gen(self.buff_num)\n self.buff_num += 1\n \n return self.buff[buff_i * self.batch_size:(buff_i + 1) * self.batch_size]", "_____no_output_____" ], [ "class Classyfier(nn.Module):\n def __init__(self):\n fc_layers = nn.Sequential(\n nn.Linear(2048, 5),\n # nn.Linear(512, 256),\n # nn.Linear(256, 5),\n nn.Softmax(dim=1)\n )\n super(Classyfier, self).__init__()\n self.fc = fc_layers\n \n def forward(self, x):\n out = self.fc(x)\n return out", "_____no_output_____" ], [ "classyfier = Classyfier().to(device)\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(classyfier.parameters(), lr=0.0001)", "_____no_output_____" ], [ "class_size = 1024 # + 21997\nnum_classes = 5\nfirst_class = 0\ntrain_size = class_size * num_classes\nbatch_size = 128\nbatches_in_buff = 128\nbuff_size = batch_size * batches_in_buff\n\nfor epoch in range(3):\n loader = BatchLoader(class_size, num_classes, first_class, batch_size, batches_in_buff, 'Data/DEFeatures/')\n for i in range(train_size // batch_size):\n batch_tuple = loader.batch_load(i)\n images = torch.zeros(batch_size, 2048, device=device, requires_grad=False)\n labels = torch.zeros(batch_size, device=device, requires_grad=False, dtype=int)\n for k in range(batch_size):\n images[k] = batch_tuple[k]['features']\n labels[k] = batch_tuple[k]['label']\n \n predict = classyfier(images)\n loss = criterion(predict, labels)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n loss_value = loss.detach()\n \n # if (epoch % 10 == 0):\n print('Epoch {} Loss = {}'.format(epoch, loss_value))", "Epoch 0 Loss = 1.2448004484176636\nEpoch 1 Loss = 1.2351857423782349\nEpoch 2 Loss = 1.1250107288360596\n" ], [ "class_size = 600\nnum_classes = 5\nfirst_class = 95\ntrain_size = class_size * 5\nbatch_size = 100\nbatches_in_buff = 10\nbuff_size = batch_size * batches_in_buff\nloader = BatchLoader(class_size, num_classes, first_class, batch_size, batches_in_buff, 'Data/PickledClasses/')\n\ncorrect = 0\ntotal = 0\nfor i in range(train_size // batch_size):\n batch_tuple = loader.batch_load(i)\n images = torch.zeros(batch_size, 2048, device=device, requires_grad=False)\n labels = torch.zeros(batch_size, device=device, requires_grad=False, dtype=int)\n for k in range(batch_size):\n images[k] = batch_tuple[k]['features']\n labels[k] = batch_tuple[k]['label'] # don't forget about this\n \n predict = classyfier(images)\n _, predicted = torch.max(predict.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum()\n\naccuracy = 100 * correct / total\nprint('Accuracy on FSL task = {} %'.format(accuracy))", "Accuracy on FSL task = 74 %\n" ] ], [ [ "# Saving and loading models", "_____no_output_____" ] ], [ [ "torch.save(G.to('cpu').state_dict(), 'Models/G')\nG.to(device)", "_____no_output_____" ], [ "torch.save(classyfier.to('cpu').state_dict(), 'Models/classyfier')\nclassyfier.to(device)", "_____no_output_____" ], [ "classyfier.load_state_dict(torch.load(\"Models/classyfier\"))", "_____no_output_____" ], [ "G.load_state_dict(torch.load(\"Models/G\"))", "_____no_output_____" ], [ "episode = torch.load('episode.pt')\nepisode.shape", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
e718a4d86d573d4ff2462f58fc66780e3fa1b125
867,626
ipynb
Jupyter Notebook
Session10/Day2/MultiDimensionalMCMCSolutions.ipynb
rmorgan10/LSSTC-DSFP-Sessions
1d0b3c28fe7f6f93e00e332e74873e6d1ec29d0b
[ "MIT" ]
null
null
null
Session10/Day2/MultiDimensionalMCMCSolutions.ipynb
rmorgan10/LSSTC-DSFP-Sessions
1d0b3c28fe7f6f93e00e332e74873e6d1ec29d0b
[ "MIT" ]
null
null
null
Session10/Day2/MultiDimensionalMCMCSolutions.ipynb
rmorgan10/LSSTC-DSFP-Sessions
1d0b3c28fe7f6f93e00e332e74873e6d1ec29d0b
[ "MIT" ]
null
null
null
517.676611
491,678
0.93344
[ [ [ "If you are running this notebook on [google collab](https://colab.research.google.com/notebooks/welcome.ipynb#recent=true), uncomment and execute the cell below. Otherwise you can jump down to the other import statements.", "_____no_output_____" ] ], [ [ "#!pip install emcee==3.0rc2\n#!pip install corner", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nfrom scipy.optimize import minimize, newton\nimport emcee\nimport corner\n\nimport matplotlib.pyplot as plt\n\nnp.random.seed(42)", "_____no_output_____" ] ], [ [ "# Multi-Dimensional Integration with MCMC\n\n-----\n\nBy Megan Bedell (Flatiron Institute)\n\n10 September 2019", "_____no_output_____" ], [ "### Problem 1: Fitting a Sinusoid to Data\n\nIn this example, we will download a time series of radial velocities for the star HD209458. This star hosts a Hot Jupiter exoplanet. In fact, [this planet](https://en.wikipedia.org/wiki/HD_209458_b) was the first to be seen in transit and was discovered 20 years ago yesterday!\n\nBecause the eccentricity is low for this planet, we can fit its orbit in the radial velocities with a relatively simple model: a sinusoid.", "_____no_output_____" ], [ "Below is a snippet of code that will download the time-series data from [NASA Exoplanet Archive](https://exoplanetarchive.ipac.caltech.edu/):", "_____no_output_____" ] ], [ [ "datafile = 'https://exoplanetarchive.ipac.caltech.edu/data/ExoData/0108/0108859/data/UID_0108859_RVC_001.tbl'\ndata = pd.read_fwf(datafile, header=0, names=['t', 'rv', 'rv_err'], skiprows=22)\ndata['t'] -= data['t'][0]", "_____no_output_____" ] ], [ [ "#### Problem 1a\nPlot the data. Let's take a look at what we're working with!", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots()\nax.errorbar(data['t'], data['rv'], data['rv_err'], fmt='o', ms=4)\nax.set_xlabel('Time (days)')\nax.set_ylabel(r'RV (m s$^{-1}$)');", "_____no_output_____" ] ], [ [ "#### Problem 1b\n\nWrite the sinusoid function that we want to fit and get ready to run MCMC with helper functions.", "_____no_output_____" ], [ "First let's write a \"get_model_predictions\" function - this will resemble yesterday's same-named function, but instead of returning a line it should return a sinusoid. I suggest using the following free parameters, although there are a few alternative options that you may use instead:\n\n\n\n```\ntheta = [period, # period of the sinusoid\n amplitude, # semi-amplitude of the sinusoid\n t0, # reference x at which sine phase = 0\n rv0] # constant offset in y\n```\n\n", "_____no_output_____" ] ], [ [ "def get_model_predictions(theta, t):\n '''\n Calculate RV predictions for parameters theta and timestamps t.\n '''\n period, amplitude, t0, rv0 = theta\n model_preds = amplitude * np.sin(2. * np.pi / period * (t - t0)) + rv0\n return model_preds", "_____no_output_____" ] ], [ [ "Write a lnprior function with flat priors on all parameters - again, this will be similar to yesterday's function, but with different values.\n\n*Hint: some of the bounds on these parameters will be physically motivated (i.e. orbital period cannot be negative). For others, you'll need to guess something reasonable but generous - i.e., a Hot Jupiter planet probably does not have an orbital period above a year or so.*", "_____no_output_____" ] ], [ [ "def lnprior(theta):\n period, amplitude, t0, rv0 = theta\n if 0 < period <= 1e4 and 0 <= amplitude <= 1e3: # physical priors\n lnp = np.log(1e-4) + np.log(1e-3)\n else:\n return -np.inf\n if np.abs(t0) <= 1e3 and np.abs(rv0) <= 1e3: # generous flat priors\n lnp += 2 * np.log(1/2e3)\n else:\n return -np.inf \n return lnp", "_____no_output_____" ] ], [ [ "The following functions can be reused as-is from the previous day's Metropolis-Hastings exercise, so just copy-and-paste or import them:\n\nlnlikelihood, lnposterior, hastings_ratio, propose_jump, mh_mcmc\n\n", "_____no_output_____" ] ], [ [ "def lnlikelihood(theta, y, x, y_unc):\n model_preds = get_model_predictions(theta, x)\n \n lnl = -np.sum((y-model_preds)**2/(2*y_unc**2))\n \n return lnl\n \n\ndef lnposterior(theta, y, x, y_unc):\n lnp = lnprior(theta)\n if not np.isfinite(lnp):\n return -np.inf\n lnl = lnlikelihood(theta, y, x, y_unc)\n lnpost = lnl + lnp\n \n return lnpost\n\ndef hastings_ratio(theta_1, theta_0, y, x, y_unc):\n lnpost1 = lnposterior(theta_1, y, x, y_unc)\n lnpost0 = lnposterior(theta_0, y, x, y_unc)\n \n h_ratio = np.exp(lnpost1 - lnpost0)\n \n return h_ratio\n \n\ndef propose_jump(theta, cov):\n if np.shape(theta) == np.shape(cov):\n cov = np.diag(np.array(cov)**2)\n \n proposed_position = np.random.multivariate_normal(theta, cov)\n \n return proposed_position\n \ndef mh_mcmc(theta_0, cov, nsteps, y, x, y_unc):\n \n positions = np.zeros((nsteps+1, len(theta_0)))\n lnpost_at_pos = -np.inf*np.ones(nsteps+1)\n acceptance_ratio = np.zeros_like(lnpost_at_pos)\n accepted = 0\n \n positions[0] = theta_0\n lnpost_at_pos[0] = lnposterior(theta_0, y, x, y_unc)\n \n for step_num in np.arange(1, nsteps+1):\n proposal = propose_jump(positions[step_num-1], cov)\n H = hastings_ratio(proposal, positions[step_num-1], y, x, y_unc)\n R = np.random.uniform()\n \n if H > R:\n accepted += 1\n positions[step_num] = proposal\n lnpost_at_pos[step_num] = lnposterior(proposal, y, x, y_unc)\n acceptance_ratio[step_num] = float(accepted)/step_num\n else:\n positions[step_num] = positions[step_num-1]\n lnpost_at_pos[step_num] = lnpost_at_pos[step_num-1]\n acceptance_ratio[step_num] = float(accepted)/step_num\n \n return (positions, lnpost_at_pos, acceptance_ratio)\n ", "_____no_output_____" ] ], [ [ "#### Problem 1c\n\nRun the MCMC.", "_____no_output_____" ], [ "Let's start with initialization values.\n\nTo save some time, I will assert that if we made a Lomb-Scargle periodogram of the RVs, there would be a peak near period = 3.53 days, so start with that guess and let's figure out what the best values might be for the other parameters.\n\n(If you finish early and are up for a bonus problem, you can double-check my assertion using [astropy timeseries](https://docs.astropy.org/en/stable/timeseries/lombscargle.html)!)", "_____no_output_____" ] ], [ [ "theta_0 = [3.53, 80, 0, 0] # [period, amplitude, t0, rv0] starting guesses", "_____no_output_____" ] ], [ [ "Now run the MCMC for 5000 steps. I'll give you (the diagonal of a) covariance matrix to start with. As you saw yesterday afternoon, this `cov` parameter sets the step sizes that the M-H algorithm will use when it proposes new values.", "_____no_output_____" ] ], [ [ "cov = [0.01, 1, 0.05, 0.01]\npos, lnpost, acc = mh_mcmc(theta_0, cov, 5000, \n data['rv'], data['t'], data['rv_err'])", "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:22: RuntimeWarning: overflow encountered in exp\n" ] ], [ [ "Do a pairs plot for the first two parameters. Does the behavior of this chain seem efficient?", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots()\nax.plot(pos[:,0], pos[:,1], 'o-', alpha=0.3)\nax.plot(theta_0[0], theta_0[1], '*', ms=30, \n mfc='Crimson', mec='0.8', mew=2, \n alpha=0.7)\nax.set_xlabel('Period', fontsize=14)\nax.set_ylabel(r'K (m s$^{-1}$)', fontsize=14)\nfig.tight_layout()", "_____no_output_____" ] ], [ [ "This chain looks super inefficient for a couple of reasons: one, it's wandering from from the starting point, which implies a poor initialization and would require us to drop samples from the beginning (burn-in); and two, the acceptance fraction is low and it spends a long time at each point.", "_____no_output_____" ], [ "#### Problem 1d\n\nThere were a couple of issues with the previous MCMC run. Let's start with this one: we started the chains running at a place that was not very close to the best-fit solution.\n\nFind a better set of initialization values by optimizing *before* we run the MCMC.", "_____no_output_____" ], [ "We'll use scipy.optimize.minimize to get best-fit parameters. Remember that the lnlikelihood function needs to be *maximized* not *minimized*, so we'll need a new function that works the same way, but negative.", "_____no_output_____" ] ], [ [ "def nll(*par):\n '''\n The negative ln(likelihood).\n '''\n return -1. * lnlikelihood(*par)\n\n \nres = minimize(nll, theta_0, \n args=(data['rv'], data['t'], data['rv_err']),\n method='Powell')\nprint('Optimizer finished with message \"{0}\" and \\n\\\n best-fit parameters {1}'.format(res['message'], res['x']))", "Optimizer finished with message \"Optimization terminated successfully.\" and \n best-fit parameters [ 3.52472405 84.28970101 0.21361303 0.14178435]\n" ] ], [ [ "Plot the data points and your best-fit model. Does the fit look reasonable? (You may need to zoom into a small time range to tell.)", "_____no_output_____" ] ], [ [ "plt.errorbar(data['t'], data['rv'], data['rv_err'],\n fmt='o', ms=4)\nxs = np.linspace(-0.1, 6, 1000)\nplt.plot(xs, get_model_predictions(res['x'], xs), c='DarkOrange')\nplt.xlim([-0.1,6])\nplt.xlabel('Time (days)')\nplt.ylabel(r'RV (m s$^{-1}$)');", "_____no_output_____" ] ], [ [ "Another way to see if we're on the right track is to plot the data phased to the orbital period that we found. Do that and optionally overplot the phased model as well.", "_____no_output_____" ] ], [ [ "period, amplitude, t0, rv0 = res['x']\n\nfig, ax = plt.subplots()\nphased_t = (data['t'] - t0) % period\nax.errorbar(phased_t / period, data['rv'], data['rv_err'],\n fmt='o', ms=4)\nphase_xs = np.linspace(0, period, 100)\nax.plot(phase_xs / period, get_model_predictions(res['x'], phase_xs + t0), \n c='DarkOrange')\nax.set_xlabel('Phase')\nax.set_ylabel(r'RV (m s$^{-1}$)');", "_____no_output_____" ] ], [ [ "Now re-run the MCMC using these parameters as the initial values and make another pairs plot. Again, I'm going to give you some step size parameters to start with. Because we're now initializing the chain close to the likelihood maximum, we don't want it to move too far away, so I've lowered the values of `cov`.", "_____no_output_____" ] ], [ [ "theta_bestfit = res['x']\ncov = [0.001, 0.1, 0.01, 0.1]\n\npos, lnpost, acc = mh_mcmc(theta_bestfit, cov, 5000, \n data['rv'], data['t'], data['rv_err'])", "_____no_output_____" ], [ "fig, ax = plt.subplots()\nax.plot(pos[:,0], pos[:,1], 'o-', alpha=0.3)\nax.plot(theta_bestfit[0], theta_bestfit[1], '*', ms=30, \n mfc='Crimson', mec='0.8', mew=2, \n alpha=0.7)\nax.set_xlabel('Period', fontsize=14)\nax.set_ylabel(r'K (m s$^{-1}$)', fontsize=14)\nfig.tight_layout()", "_____no_output_____" ] ], [ [ "The chain is now staying relatively stationary, which is good! However, it's still spending a long time at each point.", "_____no_output_____" ], [ "#### Problem 1e\n\nNow let's tackle another issues: chain efficiency. Calculate the auto-correlation length of your chain.", "_____no_output_____" ], [ "First, let's just plot the sequence of orbital period values in the chain in a trace plot. From eyeballing this sequence, about how many steps do you think are needed to reach a sample that is independent from the previous one(s)?", "_____no_output_____" ] ], [ [ "plt.plot(pos[:,0]);", "_____no_output_____" ] ], [ [ "Writing an autocorrelation function for this purpose actually gets a bit tricky, so we'll use the built-in functionality of emcee.\n\nFor the documentation on these functions, check [the emcee user guide](https://emcee.readthedocs.io/en/latest/user/autocorr/).\n\nFor a more in-depth look at how this is calculated and why it's tricky, check out [this tutorial](https://emcee.readthedocs.io/en/latest/tutorials/autocorr/).", "_____no_output_____" ] ], [ [ "acf = emcee.autocorr.function_1d(pos[:,0])\nplt.plot(acf)\nplt.xlabel('Lag')\nplt.ylabel('Normalized ACF');", "_____no_output_____" ], [ "act = emcee.autocorr.integrated_time(pos[:,0], quiet=True)\nprint('The integrated autocorrelation time is estimated as: {0}'.format(act))", "WARNING:root:The chain is shorter than 50 times the integrated autocorrelation time for 1 parameter(s). Use this estimate with caution and run a longer chain!\nN/50 = 100;\ntau: [334.66856313]\n" ] ], [ [ "This is worrying - it means we have achieved very few actual independent draws from the posterior in our chain.", "_____no_output_____" ], [ "#### Problem 1f\n\nChange the step size of the MCMC. What does this do to the auto-correlation length? Does this seem better or worse, and why?", "_____no_output_____" ] ], [ [ "cov = [0.0001, 0.1, 0.01, 0.1]\npos, lnpost, acc = mh_mcmc(theta_bestfit, cov, 5000, \n data['rv'], data['t'], data['rv_err'])", "_____no_output_____" ], [ "plt.plot(pos[:,0]);", "_____no_output_____" ], [ "acf = emcee.autocorr.function_1d(pos[:,0])\nplt.plot(acf)\nplt.xlabel('Lag')\nplt.ylabel('Normalized ACF');", "_____no_output_____" ], [ "act = emcee.autocorr.integrated_time(pos[:,0])\nprint('The integrated autocorrelation time is estimated as: {0}'.format(act))", "The integrated autocorrelation time is estimated as: [17.10591179]\n" ] ], [ [ "Much better!!", "_____no_output_____" ], [ "#### Problem 1g\n\nUsing the step sizes and starting conditions that you deem best, run your MCMC for *at least* 500x the auto-correlation length to get a large number of independent samples. Plot the posterior distribution of radial velocity semi-amplitude K. This parameter is arguably the most important output of an RV fit, because it is a measurement of the mass of the planet.", "_____no_output_____" ] ], [ [ "pos, lnpost, acc = mh_mcmc(theta_bestfit, cov, 20000, \n data['rv'], data['t'], data['rv_err'])", "_____no_output_____" ], [ "plt.hist(pos[:,1])\nplt.xlabel(r'K (m s$^{-1}$)');", "_____no_output_____" ] ], [ [ "From these results, what can we say about the true value of K? What is the probability that K > 84 m/s? 85 m/s? 90 m/s? Are these numbers a reliable estimator of the true probability, in your opinion?", "_____no_output_____" ] ], [ [ "N_tot = len(pos[:,1])\n\nprint('The probability that K > 84 m/s is: {0:.2f}'.format(np.sum(pos[:,1] > 84.)/N_tot))\nprint('The probability that K > 85 m/s is: {0:.2f}'.format(np.sum(pos[:,1] > 85.)/N_tot))\nprint('The probability that K > 90 m/s is: {0:.2f}'.format(np.sum(pos[:,1] > 90.)/N_tot))", "The probability that K > 84 m/s is: 0.67\nThe probability that K > 85 m/s is: 0.12\nThe probability that K > 90 m/s is: 0.00\n" ] ], [ [ "Note: we have not actually sampled parameter space around K > 90 m/s, so take this estimate with a grain of salt -- we can certainly conclude that the probability of K > 90 is low, but we'd need to actually calculate posterior values around K = 90 before we'd have a reliable estimate of the PDF there.", "_____no_output_____" ], [ "#### Challenge Problem 1h\n\nTry some different values of `cov[0]` (the step size for the orbital period). Make a plot of the acceptance fraction as a function of step size. Does this make sense?", "_____no_output_____" ], [ "#### Challenge Problem 1i\n\nFor different values of `cov[0]`, plot the correlation length. Does this make sense?", "_____no_output_____" ], [ "### Problem 2: Fitting a Keplerian to Data\n\nIn the previous example, the orbit we were fitting had negligible eccentricity, so we were able to fit it with a sinusoid. In this example, we'll look at the high-eccentricity planet HD 80606b and fit a full Keplerian model to its RV data. This requires introducing some new free parameters to the model, which as we will see are not always straightforward to sample!", "_____no_output_____" ] ], [ [ "datafile = 'https://exoplanetarchive.ipac.caltech.edu/data/ExoData/0045/0045982/data/UID_0045982_RVC_006.tbl'\ndata = pd.read_fwf(datafile, header=0, names=['t', 'rv', 'rv_err'], skiprows=21)\ndata['t'] -= data['t'][0]", "_____no_output_____" ] ], [ [ "#### Problem 2a\n\nAgain, let's start by plotting the data. Make plots of the time series and the time series phased to a period of 111.4 days.", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots()\nax.errorbar(data['t'], data['rv'], data['rv_err'], fmt='o', ms=4)\nax.set_xlabel('Time (days)')\nax.set_ylabel(r'RV (m s$^{-1}$)');", "_____no_output_____" ], [ "plt.errorbar(data['t'] % 111.4, data['rv'], data['rv_err'], fmt='o', ms=4)\nplt.xlabel('Phase (days)')\nplt.ylabel(r'RV (m s$^{-1}$)');", "_____no_output_____" ] ], [ [ "This planet's orbit should look pretty different from a sine wave!", "_____no_output_____" ], [ "#### Problem 2b\n\nRemake the `get_model_predictions` and `lnprior` functions to fit a Keplerian.\n\nSince this is a bit in the weeds of astronomy for the purposes of this workshop, I've gone ahead and written a solver for Kepler's equation and a `get_model_predictions` function that will deliver RVs for you. Read over the docstring and use the information given there to write a `lnprior` function for `theta`.", "_____no_output_____" ] ], [ [ "def calc_ea(ma, ecc):\n # Kepler solver - calculates eccentric anomaly\n tolerance = 1e-3\n ea = np.copy(ma)\n while True:\n diff = ea - ecc * np.sin(ea) - ma\n ea -= diff / (1. - ecc * np.cos(ea))\n if abs(diff).all() <= tolerance:\n break\n return ea \n \ndef get_model_predictions(theta, t):\n '''\n Calculate Keplerian orbital RVs\n \n Input\n -----\n theta : list\n A list of values for the following parameters:\n Orbital period,\n RV semi-amplitude,\n eccentricity (between 0-1),\n omega (argument of periastron; an angle in radians\n denoting the orbital phase where the planet\n passes closest to the host star)\n Tp (time of periastron; reference timestamp for the above)\n RV0 (constant RV offset)\n \n t : list or array\n Timestamps at which to calculate the RV\n \n Returns\n -------\n rvs : list or array\n Predicted RVs at the input times.\n '''\n P, K, ecc, omega, tp, rv0 = theta\n \n ma = 2. * np.pi / P * (t - tp) # mean anomaly\n ea = calc_ea(ma, ecc) # eccentric anomaly\n\n f = 2.0 * np.arctan2(np.sqrt(1+ecc)*np.sin(ea/2.0), \n np.sqrt(1-ecc)*np.cos(ea/2.0)) # true anomaly\n rvs = - K * (np.cos(omega + f) + ecc*np.cos(omega))\n return rvs + rv0\n \ndef lnprior(theta):\n period, amplitude, ecc, omega, tp, rv0 = theta\n if 0 < period <= 1e5 and 0 <= amplitude <= 1e4: # physical priors\n lnp = np.log(1e-5) + np.log(1e-4)\n else:\n return -np.inf\n if np.abs(tp) <= 1e4 and np.abs(rv0) <= 1e4: # generous flat priors\n lnp += 2 * np.log(1/2e4)\n else:\n return -np.inf \n if 0 <= ecc < 1 and 0 < omega < 2*np.pi: # more physical priors\n lnp += np.log(1) + np.log(1/(2*np.pi))\n else: \n return -np.inf\n return lnp", "_____no_output_____" ] ], [ [ "#### Problem 2c\n\nPlay around with the starting parameters until you're convinced that you have a reasonable fit.", "_____no_output_____" ] ], [ [ "theta_0 = [111.4, 480, 0.95, 2.0, 89, -200] # P, K, ecc, omega, tp, rv0\n\nplt.errorbar(data['t'], data['rv'], data['rv_err'],\n fmt='o', ms=4)\nxs = np.linspace(900, 1050, 1000)\nplt.plot(xs, get_model_predictions(theta_0, xs), c='DarkOrange')\nplt.xlim([900,1050])\nplt.xlabel('Time (days)')\nplt.ylabel(r'RV (m s$^{-1}$)');", "_____no_output_____" ] ], [ [ "#### Problem 2d\n\nRun the MCMC for 1000 steps and plot a trace of the eccentricity parameter. How efficiently is it running?\n\nOptional challenge: if you wrote a Gibbs sampler yesterday, use that instead of Metropolis-Hastings here!", "_____no_output_____" ] ], [ [ "cov = [0.1, 100, 0.01, 0.1, 0.1, 100]\n\npos, lnpost, acc = mh_mcmc(theta_0, cov, 10000, \n data['rv'], data['t'], data['rv_err'])", "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:22: RuntimeWarning: overflow encountered in exp\n" ], [ "plt.plot(pos[:,2])\nplt.ylabel('Eccentricity')\nplt.xlabel('Step');", "_____no_output_____" ] ], [ [ "#### Problem 2e\n\nMake a corner plot of the results. Which parameters seem most correlated? Which are most and least well-constrained by the data?", "_____no_output_____" ] ], [ [ "corner.corner(pos, labels=['Period (days)', r\"K (m s$^{-1}$)\", \n r\"$e$\", r\"$\\omega$\", \n r\"T$_p$\", r\"RV$_0$ (m s$^{-1}$)\"]);", "WARNING:root:Too few points to create valid contours\nWARNING:root:Too few points to create valid contours\nWARNING:root:Too few points to create valid contours\nWARNING:root:Too few points to create valid contours\nWARNING:root:Too few points to create valid contours\nWARNING:root:Too few points to create valid contours\nWARNING:root:Too few points to create valid contours\nWARNING:root:Too few points to create valid contours\nWARNING:root:Too few points to create valid contours\nWARNING:root:Too few points to create valid contours\nWARNING:root:Too few points to create valid contours\nWARNING:root:Too few points to create valid contours\nWARNING:root:Too few points to create valid contours\nWARNING:root:Too few points to create valid contours\nWARNING:root:Too few points to create valid contours\n" ] ], [ [ "It's hard to tell since we have very few independent samples, but $e$ and $\\omega$ are definitely both highly correlated with many parameters and with each other!", "_____no_output_____" ], [ "#### Problem 2f\n\n[Ford (2005)](https://ui.adsabs.harvard.edu/abs/2005AJ....129.1706F/abstract) suggests mitigating this issue by reparameterizing the orbital parameters $e$ and $\\omega$ as $e cos\\omega$ and $e sin\\omega$. Modify the `get_model_predictions` and `lnprior` functions accordingly and rerun the MCMC. Does performance improve?\n\n*Note: the efficiency of a basic MCMC in this situation is never going to be excellent. We'll talk more about challenging cases like this and how to deal with them in later lectures!*", "_____no_output_____" ] ], [ [ "def get_model_predictions(theta, t):\n P, K, esinw, ecosw, tp, rv0 = theta\n \n omega = np.arctan2(esinw, ecosw)\n ecc = esinw / np.sin(omega)\n \n ma = 2. * np.pi / P * (t - tp) # mean anomaly\n ea = calc_ea(ma, ecc) # eccentric anomaly\n\n f = 2.0 * np.arctan2(np.sqrt(1+ecc)*np.sin(ea/2.0), \n np.sqrt(1-ecc)*np.cos(ea/2.0)) # true anomaly\n rvs = - K * (np.cos(omega + f) + ecc*np.cos(omega))\n return rvs + rv0\n \ndef lnprior(theta):\n period, amplitude, esinw, ecosw, tp, rv0 = theta\n if 0 < period <= 1e5 and 0 <= amplitude <= 1e4: # physical priors\n lnp = np.log(1e-5) + np.log(1e-4)\n else:\n return -np.inf\n if np.abs(tp) <= 1e4 and np.abs(rv0) <= 1e4: # generous flat priors\n lnp += 2 * np.log(1/2e4)\n else:\n return -np.inf \n if -1 <= esinw < 1 and -1 < ecosw < 1: # more physical priors\n lnp += 2 * np.log(1/2)\n else: \n return -np.inf\n return lnp", "_____no_output_____" ], [ "theta_0 = [111.4, 480, 0.95 * np.cos(2), 0.95 * np.sin(2), 89, -200]\ncov = [0.1, 100, 0.1, 0.1, 0.1, 100]\n\npos, lnpost, acc = mh_mcmc(theta_0, cov, 5000, \n data['rv'], data['t'], data['rv_err'])", "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:11: RuntimeWarning: invalid value encountered in sqrt\n # This is added back by InteractiveShellApp.init_path()\n/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:22: RuntimeWarning: overflow encountered in exp\n" ], [ "plt.plot(pos[:,2])\nplt.ylabel('ecosw')\nplt.xlabel('Step');", "_____no_output_____" ], [ "corner.corner(pos, labels=['Period (days)', r\"K (m s$^{-1}$)\", \n r\"$e\\sin(\\omega)$\", r\"$e\\cos(\\omega)$\", \n r\"T$_p$\", r\"RV$_0$ (m s$^{-1}$)\"]);", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ] ]
e718aa876996f6d9d7b5ab20a6ef979af753c339
453,557
ipynb
Jupyter Notebook
Code/IBM Watson_dataset/.ipynb_checkpoints/ibm-watson-marketing-data-analysis-prediction-checkpoint.ipynb
singh-sudo098/Customer-Lifetime-Value-prediction-using-Machine-Learning
09de04c79f541c67634a89178935e6e77d2e910d
[ "MIT" ]
null
null
null
Code/IBM Watson_dataset/.ipynb_checkpoints/ibm-watson-marketing-data-analysis-prediction-checkpoint.ipynb
singh-sudo098/Customer-Lifetime-Value-prediction-using-Machine-Learning
09de04c79f541c67634a89178935e6e77d2e910d
[ "MIT" ]
null
null
null
Code/IBM Watson_dataset/.ipynb_checkpoints/ibm-watson-marketing-data-analysis-prediction-checkpoint.ipynb
singh-sudo098/Customer-Lifetime-Value-prediction-using-Machine-Learning
09de04c79f541c67634a89178935e6e77d2e910d
[ "MIT" ]
1
2022-02-25T04:22:07.000Z
2022-02-25T04:22:07.000Z
179.41337
84,032
0.854739
[ [ [ "# Index\n\n1. Data Loading\n2. Exploratory Data Analysis (EDA)\n3. Regression Analysis with Continuous Variables Only\n4. Regression Analysis with Categorical Variables\n5. Regression Analysis with bith Continous and Categorical Variables\n6. Regression Analysis with excluding Non-significant variables\n7. Classification\n> 7.1 Support Vector Classification\n\n> 7.2 Random Forest Classifier\n\n8. Feature Importance\n", "_____no_output_____" ] ], [ [ "import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns # Provides a high level interface for drawing attractive and informative statistical graphics\n%matplotlib inline\nsns.set()\nfrom subprocess import check_output\n\nimport warnings # Ignore warning related to pandas_profiling\nwarnings.filterwarnings('ignore') \n\ndef annot_plot(ax,w,h): # function to add data to plot\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n for p in ax.patches:\n ax.annotate(f\"{p.get_height() * 100 / df.shape[0]:.2f}%\", (p.get_x() + p.get_width() / 2., p.get_height()),\n ha='center', va='center', fontsize=11, color='black', rotation=0, xytext=(0, 10),\n textcoords='offset points') \ndef annot_plot_num(ax,w,h): # function to add data to plot\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n for p in ax.patches:\n ax.annotate('{0:.1f}'.format(p.get_height()), (p.get_x()+w, p.get_height()+h))\n\nimport os\nprint(os.listdir(\"../input\"))\n", "['WA_Fn-UseC_-Marketing-Customer-Value-Analysis.csv']\n" ] ], [ [ "# 1. Data Loading", "_____no_output_____" ] ], [ [ "df = pd.read_csv('../input/WA_Fn-UseC_-Marketing-Customer-Value-Analysis.csv')", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "df.Response = df.Response.apply(lambda X : 0 if X == 'No' else 1)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "# 2 2. Exploratory Data Analysis(EDA):", "_____no_output_____" ] ], [ [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 9134 entries, 0 to 9133\nData columns (total 24 columns):\nCustomer 9134 non-null object\nState 9134 non-null object\nCustomer Lifetime Value 9134 non-null float64\nResponse 9134 non-null int64\nCoverage 9134 non-null object\nEducation 9134 non-null object\nEffective To Date 9134 non-null object\nEmploymentStatus 9134 non-null object\nGender 9134 non-null object\nIncome 9134 non-null int64\nLocation Code 9134 non-null object\nMarital Status 9134 non-null object\nMonthly Premium Auto 9134 non-null int64\nMonths Since Last Claim 9134 non-null int64\nMonths Since Policy Inception 9134 non-null int64\nNumber of Open Complaints 9134 non-null int64\nNumber of Policies 9134 non-null int64\nPolicy Type 9134 non-null object\nPolicy 9134 non-null object\nRenew Offer Type 9134 non-null object\nSales Channel 9134 non-null object\nTotal Claim Amount 9134 non-null float64\nVehicle Class 9134 non-null object\nVehicle Size 9134 non-null object\ndtypes: float64(2), int64(7), object(15)\nmemory usage: 1.7+ MB\n" ], [ "df.isnull().sum()", "_____no_output_____" ], [ "df.describe()", "_____no_output_____" ] ], [ [ "# 2.1 Response Rate:", "_____no_output_____" ] ], [ [ "ax = sns.countplot('Response',data = df)\nplt.ylabel('Total number of Response')\nannot_plot(ax, 0.08,1)\nplt.show()", "_____no_output_____" ] ], [ [ "Notice that about 14% customers have responded to marketing calls, and the remaining 86% of the customers have not responded.", "_____no_output_____" ] ], [ [ "ax = sns.countplot('Response',hue = 'Gender' ,data = df)\nplt.ylabel('Total number of Response')\nannot_plot(ax, 0.08,1)\nplt.show()", "_____no_output_____" ] ], [ [ "Notice that, ratio of male and female for **responding to a marketing call** is almost same.", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(12,6))\nax = sns.countplot('Response', hue = df['Marital Status'], data = df)\nannot_plot(ax,0.08,1)\nplt.show()", "_____no_output_____" ] ], [ [ "Notice that out of 14% customers, 8% customers those who rsponded to marketing calls are from married category. ", "_____no_output_____" ], [ "# 2.1.1 Response rate by renew offer", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(8,4))\nax = sns.countplot('Response',hue = 'Renew Offer Type' ,data = df)\nplt.ylabel('Total number of Response')\nannot_plot(ax, 0.08,1)\nplt.show()", "_____no_output_____" ] ], [ [ "For offer1 and offer2 customers have responded to marketing calls,but for offer3 and offer4 almost nobody responded.", "_____no_output_____" ], [ "# 2.1.2 Response rate by Education", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(8,4))\nax = sns.countplot('Response',hue = 'Education' ,data = df)\nplt.ylabel('Total number of Response')\nannot_plot(ax, 0.08,1)\nplt.show()", "_____no_output_____" ] ], [ [ "Notice that customers with Doctor and master degree are very less who responded to marketing calls, may be they are not intersted or busy. or we can say young people are most likely to respond to marketing calls.", "_____no_output_____" ], [ "# 2.1.3 Response rate by Sales Channel", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(8,4))\nax = sns.countplot('Response',hue = 'Sales Channel' ,data = df)\nplt.ylabel('Total number of Response')\nannot_plot(ax, 0.08,1)\nplt.show()", "_____no_output_____" ] ], [ [ "# 2.1.4 Response rate by Total Claim Amount", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(12,6))\nsns.boxplot(y = 'Total Claim Amount' , x = 'Response', data = df)\nplt.ylabel('Total number of Response')\nplt.show()", "_____no_output_____" ] ], [ [ "Box plots are a great way to visualize the distribuation of countinous variables. They show the min, max, first quatile, meadian and third quartile, all in one view. The central rectangle spans from the first quartile to the third quartile, and the green line shows the median. The lower and upper ends show the minimum and the maximum of each distribution.\n\n The dots above the upper boundry line show the suspected outliers that are decided based on the **INterquartile range (IQR)**. The points that fall **1.5*IQR** above the third quartile or **1.5*IQR** below the quartile are suspected outliers and are drawn with the dots.", "_____no_output_____" ], [ "# 2.1.5 Response rate by Income Distributions", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(12,6))\nsns.boxplot(y = 'Income' , x = 'Response', data = df)\nplt.show()", "_____no_output_____" ] ], [ [ "# 2.1.6 Response rate by EmploymentStatus", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10,6))\nax = sns.countplot('Response',hue = 'EmploymentStatus' ,data = df)\nplt.ylabel('Total number of Response')\nannot_plot(ax, 0.08,1)\nplt.show()", "_____no_output_____" ] ], [ [ "# 2.1.7 Response rate by Vehicle Class:", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10,6))\nax = sns.countplot('Response',hue = 'Vehicle Class' ,data = df)\nplt.ylabel('Total number of Response')\nannot_plot(ax, 0.08,1)\nplt.show()", "_____no_output_____" ] ], [ [ "# 2.1.8 Response rate by Policy:", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(15,6))\nax = sns.countplot('Response',hue = 'Policy' ,data = df)\nplt.ylabel('Total number of Response')\nannot_plot(ax, 0.08,1)\nplt.show()", "_____no_output_____" ] ], [ [ "# 3. Regression Analysis with Continuous Variables Only", "_____no_output_____" ] ], [ [ "import statsmodels.api as sm", "_____no_output_____" ], [ "continous_var_df = df.select_dtypes(include=['int64','float'])\ncontinous_var_df.nunique()", "_____no_output_____" ], [ "continous_var_df.columns", "_____no_output_____" ], [ "continous_var_reg = sm.Logit(continous_var_df['Response'], continous_var_df.drop('Response', axis = 1))\ncontinous_var_reg.fit().summary()", "Optimization terminated successfully.\n Current function value: 0.421189\n Iterations 6\n" ] ], [ [ "By looking at the **p-value** of **'Income', 'Monthly Premium Auto','Months Since Last Claim', 'Months Since Policy Inception','Number of Open Complaints', 'Number of Policies'**, these input variables seems to have significant relationships with the output(target) variable 'Response'. By looking at the coeff,they are all **negatively correlated** to the Response variable.", "_____no_output_____" ], [ "**What Are P Values?**\n\nP values evaluate how well the sample data support the devil’s advocate argument that the null hypothesis is true. It measures how compatible your data are with the null hypothesis. How likely is the effect observed in your sample data if the null hypothesis is true?\n\n> **High P values: your data are likely with a true null.**\n\n> **Low P values: your data are unlikely with a true null**\n\nA low P value suggests that your sample provides enough evidence that you can reject the null hypothesis for the entire population.", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10,6))\nsns.heatmap(continous_var_df.corr(), annot = True)\nplt.show()", "_____no_output_____" ] ], [ [ "# 4.Regression Analysis with Categorical Variables", "_____no_output_____" ] ], [ [ "categorical_df = df.select_dtypes(include='object')\ncat_df = categorical_df.drop(['Customer','Effective To Date'], axis = 1)", "_____no_output_____" ], [ "cat_df.nunique()", "_____no_output_____" ] ], [ [ " # Conversion of Categorical data", "_____no_output_____" ] ], [ [ "cat_df.columns", "_____no_output_____" ], [ "cols = ['State', 'Coverage', 'Education', 'EmploymentStatus', 'Gender',\n 'Location Code', 'Marital Status', 'Policy Type', 'Policy',\n 'Renew Offer Type', 'Sales Channel', 'Vehicle Class', 'Vehicle Size']", "_____no_output_____" ], [ "from sklearn.preprocessing import LabelEncoder\nlb = LabelEncoder()\nfor col in cat_df[cols]:\n cat_df[col] = lb.fit_transform(cat_df[col])", "_____no_output_____" ], [ "cat_df.head()", "_____no_output_____" ], [ "categorical_train = sm.Logit(continous_var_df.Response, cat_df)\ncategorical_train.fit().summary()", "Optimization terminated successfully.\n Current function value: 0.387557\n Iterations 7\n" ] ], [ [ "By looking at **p-value**,**'Marital Status','Renew Offer type','Sales channel','Vehicle size','policy'** variables are significant at 0.05 significance level, and all of them have negative relationship with the output variable, **Response**.", "_____no_output_____" ], [ "# 5. Regression Analysis with bith Continous and Categorical Variables.", "_____no_output_____" ] ], [ [ "continous_var_df.reset_index(drop = True, inplace=True)\ncat_df.reset_index(drop = True, inplace=True)", "_____no_output_____" ], [ "all_data_df = pd.concat([continous_var_df,cat_df], axis = 1)", "_____no_output_____" ], [ "all_data_df.head()", "_____no_output_____" ], [ "total_train = sm.Logit(all_data_df.Response, all_data_df.drop(['Response'], axis = 1))\ntotal_train.fit().summary()", "Optimization terminated successfully.\n Current function value: 0.384709\n Iterations 7\n" ] ], [ [ "By looking at the **p-value** of **'Customer lifetime value','Income', 'Monthly Premium Auto','Months Since Last Claim', 'Months Since Policy Inception','Number of Policies','Total claim amount','Marital Status','Renew offer type','Sales Channel','Vehicle size'**, these input variables seems to have significant relationships with the output(target) variable 'Response'. By looking at the coeff,they are all **negatively correlated** to the Response variable.", "_____no_output_____" ], [ "**Conclusion:** The higher the Customer Lifetime value is, the less likely that the customer will be response to marketing calls.", "_____no_output_____" ], [ "# 6. Regression Analysis with excluding Non-significant variables", "_____no_output_____" ] ], [ [ "all_data_df.columns", "_____no_output_____" ], [ "significant_cols = ['Customer Lifetime Value','Income','Monthly Premium Auto','Months Since Last Claim',\n 'Months Since Policy Inception','Number of Policies','Total Claim Amount','Marital Status',\n 'Renew Offer Type','Sales Channel','Vehicle Size']\ntrainData = sm.Logit(all_data_df.Response, all_data_df[significant_cols])\ntrainData.fit().summary()", "Optimization terminated successfully.\n Current function value: 0.385476\n Iterations 7\n" ] ], [ [ "## 7.Classification:", "_____no_output_____" ] ], [ [ "y = all_data_df.Response\nX = all_data_df.drop('Response', axis = 1)", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split, cross_validate\nX_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.3, random_state = 42)", "_____no_output_____" ], [ "ax = sns.countplot(y_test)\nannot_plot_num(ax,0.08,1)", "_____no_output_____" ] ], [ [ "## 7.1 SVC Classification", "_____no_output_____" ] ], [ [ "from sklearn.metrics import confusion_matrix, accuracy_score, classification_report\nfrom sklearn.svm import SVC\n\nsvc = SVC()\nsvc.fit(X_train, y_train)\nsvc_pred = svc.predict(X_test)\n\nprint(confusion_matrix(svc_pred,y_test))\nprint('accuracy_score:',accuracy_score(svc_pred, y_test))\nprint(classification_report(svc_pred, y_test))\n\ncross_val_score_svc = cross_validate(svc, X_train, y_train,cv = 5)\nprint('Cross validation train_score',cross_val_score_svc['train_score'].mean())\nprint('Cross validation test_score',cross_val_score_svc['test_score'].mean())\n", "[[2332 43]\n [ 0 366]]\naccuracy_score: 0.9843122947829259\n precision recall f1-score support\n\n 0 1.00 0.98 0.99 2375\n 1 0.89 1.00 0.94 366\n\n micro avg 0.98 0.98 0.98 2741\n macro avg 0.95 0.99 0.97 2741\nweighted avg 0.99 0.98 0.98 2741\n\nCross validation train_score 1.0\nCross validation test_score 0.9776321678484378\n" ] ], [ [ "## 7.2.RandomForestClassifier", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestClassifier\nrfc = RandomForestClassifier()\nrfc.fit(X_train, y_train)\nrfc_pred = rfc.predict(X_test)\n\nprint(confusion_matrix(rfc_pred,y_test))\nprint('Accuracy score:',accuracy_score(rfc_pred, y_test))\nprint(classification_report(rfc_pred, y_test))\n\ncross_val_score_rfc = cross_validate(rfc, X_train, y_train,cv = 5)\n\nprint('Cross validation train_score',cross_val_score_rfc['train_score'].mean())\nprint('Cross validation test_score',cross_val_score_rfc['test_score'].mean())\n", "[[2326 27]\n [ 6 382]]\nAccuracy score: 0.9879605983217804\n precision recall f1-score support\n\n 0 1.00 0.99 0.99 2353\n 1 0.93 0.98 0.96 388\n\n micro avg 0.99 0.99 0.99 2741\n macro avg 0.97 0.99 0.98 2741\nweighted avg 0.99 0.99 0.99 2741\n\nCross validation train_score 0.9991396931815437\nCross validation test_score 0.9809167180458012\n" ] ], [ [ "## 8.Feature Importance", "_____no_output_____" ] ], [ [ "feature_imp = rfc.feature_importances_.round(3)\nser_rank = pd.Series(feature_imp, index=X.columns).sort_values(ascending = False)\n\nplt.figure(figsize=(12,7))\nsns.barplot(x= ser_rank.values, y = ser_rank.index, palette='deep')\nplt.xlabel('relative importance')\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e718b42da668d0abf712fab113999be840d6cdf9
6,471
ipynb
Jupyter Notebook
Volume 1 - Supervised Deep Learning/Part 2 - CNN/Section 8 - Building a CNN/.ipynb_checkpoints/CNN-checkpoint.ipynb
Niranjankumar-c/DeepLearning_A-Z_NeuralNetworks_Python
8d45367c4cf2d93027600359defc9257bb68a772
[ "MIT" ]
2
2019-11-09T13:46:40.000Z
2021-11-30T20:53:01.000Z
Volume 1 - Supervised Deep Learning/Part 2 - CNN/Section 8 - Building a CNN/.ipynb_checkpoints/CNN-checkpoint.ipynb
Niranjankumar-c/DeepLearning_A-Z_NeuralNetworks_Python
8d45367c4cf2d93027600359defc9257bb68a772
[ "MIT" ]
null
null
null
Volume 1 - Supervised Deep Learning/Part 2 - CNN/Section 8 - Building a CNN/.ipynb_checkpoints/CNN-checkpoint.ipynb
Niranjankumar-c/DeepLearning_A-Z_NeuralNetworks_Python
8d45367c4cf2d93027600359defc9257bb68a772
[ "MIT" ]
4
2019-11-13T13:33:10.000Z
2021-01-19T05:21:15.000Z
19.20178
98
0.498996
[ [ [ "## Convolution Neural Networks", "_____no_output_____" ] ], [ [ "#keras - prepare a structure for the datasets\n\n#building the cnn", "_____no_output_____" ], [ "#importing the libraries\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D\nfrom keras.layers import MaxPool2D\nfrom keras.layers import Flatten\nfrom keras.layers import Dense", "_____no_output_____" ], [ "#intitalizing the cnn\n\nclassifier = Sequential()", "_____no_output_____" ], [ "#convolution layer\n\nclassifier.add(Conv2D(filters=32,kernel_size= (3,3), input_shape = (64,64,3),\n data_format=\"channels_last\", activation = \"relu\"))", "_____no_output_____" ], [ "#max pooling\n\nclassifier.add(MaxPool2D(pool_size=(2,2)))", "_____no_output_____" ], [ "classifier.add(Flatten())", "_____no_output_____" ], [ "classifier.add(Dense(units= 128, activation=\"relu\"))\nclassifier.add(Dense(units = 1, activation=\"sigmoid\"))", "_____no_output_____" ], [ "classifier.compile(optimizer=\"adam\", loss=\"binary_crossentropy\", metrics=[\"accuracy\"])", "_____no_output_____" ], [ "#Fitting the cnn to the images\n\nfrom keras.preprocessing.image import ImageDataGenerator", "_____no_output_____" ], [ "train_datagen = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)", "_____no_output_____" ], [ "test_datagen = ImageDataGenerator(rescale=1./255)", "_____no_output_____" ], [ "training_set = train_datagen.flow_from_directory(\n 'dataset/training_set',\n target_size=(64, 64),\n batch_size=32,\n class_mode='binary')", "Found 8000 images belonging to 2 classes.\n" ], [ "test_set = test_datagen.flow_from_directory(\n 'dataset/test_set',\n target_size=(64, 64),\n batch_size=32,\n class_mode='binary')", "Found 2000 images belonging to 2 classes.\n" ], [ "#fitting the data\n\nclassifier.fit_generator(\n training_set,\n steps_per_epoch=8000,\n epochs=20,\n validation_data=test_set,\n validation_steps=2000)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e718b621ca57391282a6ec7da948a3438bfb008c
463,427
ipynb
Jupyter Notebook
1.1.MachineLearning/Chapter2/Task2.ipynb
mihaighidoveanu/machine-learning-examples
e5a7ab71e52ae2809115eb7d7c943b46ebf394f3
[ "MIT" ]
null
null
null
1.1.MachineLearning/Chapter2/Task2.ipynb
mihaighidoveanu/machine-learning-examples
e5a7ab71e52ae2809115eb7d7c943b46ebf394f3
[ "MIT" ]
null
null
null
1.1.MachineLearning/Chapter2/Task2.ipynb
mihaighidoveanu/machine-learning-examples
e5a7ab71e52ae2809115eb7d7c943b46ebf394f3
[ "MIT" ]
1
2021-05-02T13:12:21.000Z
2021-05-02T13:12:21.000Z
125.96548
130,295
0.807115
[ [ [ "# Text classification and clustering \n\nOur data is represented by 20 samples of text from 20 authors. We will use this data for two tasks:\n1. Classification\n - will use a SVM to build a model which predicts which author wrote a text\n2. Clustering\n - by using KMeans, DBSCAN and Hierarchical Clustering, we will explore our data and try to find structure in it\n\nWe will also use PCA for dimensionality reduction during both tasks.", "_____no_output_____" ] ], [ [ "# Some IPython magic\n# enable multiple output\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"\n\n# Put these at the top of every notebook, here nbagg is used for interactive plots\n%reload_ext autoreload\n%autoreload 2\n%matplotlib nbagg\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# set floating points write format\nfloat_formatter = lambda x: \"%.3f\" % x\nnp.set_printoptions(formatter={'float_kind':float_formatter})", "_____no_output_____" ] ], [ [ "Below is a glimpse of data. First cleaning we make is to drop unnecesary columns. In our case, this column is 'Unnamed: 0' \nIn what follow, we will call each text sample from an author a **document**\n\n", "_____no_output_____" ] ], [ [ "# read data\ndata = pd.read_csv('Ghidoveanu A. Mihai.csv')\ndata.head()\ndata = data.drop(columns = 'Unnamed: 0')", "_____no_output_____" ] ], [ [ "We have data represented by text, but our machine learning models work with numerical data. One way to represent text in a numerical way is the **Bag Of Words** representation. \nThis means that we transform a document in an array of words whose values are each word's count of appearances in a document \ne.g. *\"My boy is my boy\" -> (2, 2, 1)* \n \nBut this form of count data gives preference to the most common words in documents, which are usually not at all relevant to the author style : *\"the\", \"and\", \"you\", \"I\"* \nA safe bet is to choose to ignore words which appear in 99% documents. There also exists a metric that takes how often words appear in other documents into account. \n \n**TF-IDF** (Term Frequency - Inverse Document Frequency) is a metric that favors terms which appear often in a document, and not often in many documents. That is, we will find words which are more specific to the writer.\nThus, our final solution is to use tf-idf for transforming the counts. \n", "_____no_output_____" ], [ " # Task 1. Text Classification\n \nAs said above, we will use an SVM to classify texts to their authors. \nWe have 400 samples of documents, each document having a feature for each word in the vocabulary. \nThis leads to a large amount of features, and given that not all features have a value because not every document contains all the possible words, our data is a **sparse matrix**. \n \nLabels for our data will be the author names, encoded into numbers. We have 20 authors and each author has 20 documents, so we will end up with an array of 400 labels, one for each document in our data. \n\nWhen we split data into train and test, it is important to take equal amounts of samples from each author because we want to be able to predict new documents to the same authors that we have learnt from. That's where **stratification** comes into play. It is a method of sampling that does just that.\n\nAlso, we *tfidf vectorize* our dataset **after splitting** into train and test datasets. That is because in a real life scenario, we might find words in the test dataset that are not available in the train dataset. And the vectorizer creates a feature for each of the words encountered. Thus, we will meet words that won't be represented by some features.", "_____no_output_____" ] ], [ [ "%%time\nfrom sklearn.feature_extraction import text\nfrom sklearn.model_selection import train_test_split\n\ndef make_tfidf(X):\n tfidf = text.TfidfVectorizer(max_df=0.9)\n tfidf.fit(X)\n return tfidf.transform(X), tfidf\n\n# we need an array of texts to feed the vectorizers\n# so we flatten the dataframe, with stories from the same author one after the other\ncorpus = data.values.ravel(order = 'F')\n\nRSTATE = 42\n\n# we split in train and text examples before counter vectorization\nlabels, authors = pd.factorize(data.columns.values)\ny = np.array([label for label in labels for _ in range(20)])\nx_train, x_test, y_train, y_test = train_test_split(corpus, y, stratify=y,test_size=0.3, random_state = RSTATE)\n\n## Preprocessing text and turning it into a bag of words tf-idf model\nx_train, tfidf = make_tfidf(x_train)\nx_test = tfidf.transform(x_test)", "_____no_output_____" ], [ "# %%time\nfrom sklearn import svm\nfrom sklearn.model_selection import GridSearchCV\n\n# C gives way to correct classification of training examples (larger value of C) \n# gives way to maximization of the decision function’s margin.(lower value of C)\n# gamma -- the inverse of the radius of influence of samples selected by the model as support vectors\nsvc = svm.SVC(cache_size = 1000, C = 10, kernel='linear')\nclf = GridSearchCV(estimator = svc, cv = 3, refit = True, param_grid=[{}], iid=False)\nsvc.fit(x_train, y_train);\nprint('After PCA score is ', svc.score(x_test, y_test))", "_____no_output_____" ] ], [ [ "## Tuning the hyperparameters\n\nThe SVM has many hyperparameters that we have to try to build a better model. Firstly, it has 4 types of **kernel** functions:\n1. linear\n2. rbf (gaussian)\n3. poly (polynomial)\n4. sigmoid\n\nThen, because the sklearn uses the Soft-margin SVM, we can tune the **C** parameter, which does the regularization of the model. \nA large value of C makes the decision margin narrower if it can correctly classify more training examples this way. This leads to a higher bias and a lower variance.\nOn the other hand, a small value of C makes the decision margin larger, with the cost of misclassifying some training examples. This leads to a lower bias and a higher variance.\n\nFor *rbf* and *sigmoid* kernels only, we also have the **gamma** parameter.\n\nFor *poly* kernel only, we have the **degree** parameter. It controls the degree of the polynomial function used to train the svm.\n\nIn below code, we perform a **Grid-Search** for each kernel on various set of values for hyperparameters. After we find the best hyperparameters for each kernel, we compare all the kernels at the end and choose the best parameter set. \nWe compare our results with each parameter by performing a **K-Fold** with 3 folds on our training samples. We hold out a **test set** to be sure we don't overfit the parameters on the validation one.\n \nBesides the *linear* kernel, we have to tune two parameters at a time : (*C* and *gamma*) or (*C* and *degree*). Because the grid search performs a cartesian product between the parameter value sets, it can take a rather long time if we try many parameter values at once. \nTo avoid this, we will take the following approach:\n1. Try three values in a logarithmic scale of 100 (e.g. For *C* and *gamma* we try values in (0.01, 1, 100).\n2. We find best *C* and *gamma* (e.g. *C* = 100, *gamma* = 1)\n3. Try three values in a logarithmic scale of 10, centered around the best values found above\n - e.g *C* in (10, 100, 1000)\n - e.g. *gamma* in (0.1, 1, 10)\n4. We find best *C* and *gamma* (e.g. *C* = 1000, *gamma* = 0.1). Check scores on the test set and compare them with the previous best to be wary of overfit\n5. Be creative ! Maybe try more values on a linear scale after we found our best on a logarithmic. \n\n**WARNING !!!** : Next cells usually took up to 4 minutes per run on my machine. \n\n**Important Note !!!** : Even though we always compute and print the score on the test population, we use it only to check if we did overfit on the validation set. We tune the parameters to the validation set.", "_____no_output_____" ] ], [ [ "# Utilities to cross validate a classifier with a grid search of params and print the results\n# @clf needs to be GridSearchCV\ndef cross_validate(clf, params, x_train = x_train, x_test = x_test, y_train = y_train, y_test = y_test):\n clf.param_grid = params\n clf.fit(x_train, y_train)\n scores = {'score' : clf.best_score_,\n 'params' : clf.best_params_, \n 'tscore' : clf.best_estimator_.score(x_test, y_test)}\n return scores, clf\n\ndef print_results(scores):\n kernel = scores['params']['kernel']\n print('For {:10} : validation {:.2f} test {:.2f} - {}'.format(scores['params']['kernel'], scores['score'], \n scores['tscore'], scores['params']))", "_____no_output_____" ], [ "X", "_____no_output_____" ] ], [ [ "### Linear Kernel Tuning", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom sklearn.manifold import TSNE\nX_embedded = TSNE(n_components=2).fit_transform(X)\nX_embedded.shape", "_____no_output_____" ], [ "params = [{'kernel' : ['linear', 'rbf', 'poly', 'sigmoid'],\n 'C' : [0.01, 0.1, 1, 10, 100, 1000, 10000],\n 'gamma' : [0.01, 0.1, 1, 10, 100, 1000],\n 'degree' : [0.01, 0.1, 1, 10 ,100, 1000, 10000, 10000]}]\n\nscores = {} # to store scores and parameters for each of the kernels\n\nscores, clf = cross_validate(clf, params)\nprint_results(scores)\n", "_____no_output_____" ], [ "%%time\n# tuning svc hyper parameters\n# Firstly we try the linear kernel with many C's\nlinear_params = [{'kernel' : ['linear'],\n 'C' : [0.01, 0.1, 1, 10, 100, 1000]}]\n\nscores = {} # to store scores and parameters for each of the kernels\n\nlinear_scores, clf = cross_validate(clf, linear_params)\nprint_results(linear_scores)\n\n# we found our best : \nscores['linear'] = linear_scores", "For linear : validation 0.96 test 0.97 - {'C': 10, 'kernel': 'linear'}\nWall time: 3min 10s\n" ] ], [ [ "### RBF Kernel Tuning", "_____no_output_____" ] ], [ [ "%%time\n# for rbf, we try at first lesser values for parameters\n# because rbf kernel has more parameters to try and thus it would take more computational time\n# then try more values based on the best found\nrbf_params = [{'kernel' : ['rbf'],\n 'C' : [0.01, 1, 100],\n 'gamma' : [0.01, 1, 100, 'auto']}]\n\nrbf_scores, clf = cross_validate(clf, rbf_params)\nprint_results(rbf_scores)", "For rbf : validation 0.95 test 0.97 - {'C': 100, 'gamma': 0.01, 'kernel': 'rbf'}\nWall time: 5min 21s\n" ], [ "%%time\n\n# after this set of params we see that best (C, gamma) = (100,1)\n# so we try more closer values to the optimal C and gamma found above\nrbf_params[0]['C'] = [10, 100, 1000]\nrbf_params[0]['gamma'] = [0.1, 1, 10]\nrbf_scores_2, clf = cross_validate(clf, rbf_params)\nprint_results(rbf_scores_2)\n", "For rbf : validation 0.95 test 0.97 - {'C': 10, 'gamma': 0.1, 'kernel': 'rbf'}\nWall time: 4min 14s\n" ] ], [ [ "Now we see that we obtain the same values for this set of parameters.\n\nWe know that *C* and *gamma* are in a trade-off of bias and variance, so increasing *C* and lowering gamma gets the same results from a moment on. \nThus, we will keep this best **(10, 0.1)** because a smaller *C* leads to smaller training times and the parameters being more ponderate makes the model less prone to overfitting.", "_____no_output_____" ] ], [ [ "# We found our best \n\nscores['rbf'] = rbf_scores", "_____no_output_____" ] ], [ [ "### Poly Kernel Tuning", "_____no_output_____" ] ], [ [ "%%time\n\n# now we try to tune the polynomial kernel with the same strategy as for rbf, because we have many parameters to choose from\npoly_params = [{'kernel' : ['poly'],\n 'C' : [0.01, 1, 100],\n 'degree' : [2, 10, 100]}]\npoly_scores, clf = cross_validate(clf, poly_params)\nprint_results(poly_scores)", "D:\\Programs\\Anaconda\\lib\\site-packages\\sklearn\\svm\\base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\n" ] ], [ [ "After this trial we see that best parameters are (*C*, *degree*) = (0.01, 10) \nWe now try another set of values around (0.01, 10).", "_____no_output_____" ] ], [ [ "%%time\npoly_params[0]['C'] = [0.01]\npoly_params[0]['degree'] = [10, 20, 30]\npoly_scores_2, clf = cross_validate(clf, poly_params)\nprint_results(poly_scores_2)", "D:\\Programs\\Anaconda\\lib\\site-packages\\sklearn\\svm\\base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\n" ] ], [ [ "Parameters kept the same best value.", "_____no_output_____" ] ], [ [ "# %%time\n# poly_params[0]['C'] = [ 1000, 10000]\n# poly_params[0]['degree'] = [2, 3]\n# poly_scores_3, clf = cross_validate(clf, poly_params)\n# print_results(poly_scores_3)", "_____no_output_____" ], [ "# We found our best\n\nscores['poly'] = poly_scores", "_____no_output_____" ] ], [ [ "### Sigmoid Kernel Tuning", "_____no_output_____" ] ], [ [ "%%time\n# Now try the sigmoid kernel\nsigmoid_params = [{'kernel' : ['sigmoid'],\n 'C' : [0.01, 1, 100],\n 'gamma' : [0.01, 1, 100, 'auto']}]\nsigmoid_scores, clf = cross_validate(clf, sigmoid_params)\nprint_results(sigmoid_scores)", "For sigmoid : validation 0.96 test 0.97 - {'C': 100, 'gamma': 1, 'kernel': 'sigmoid'}\nWall time: 5min 19s\n" ], [ "%%time\n# For now the best params are (C, gamma) = (100, 1)\n\nsigmoid_params[0]['C'] = [10, 100, 1000]\nsigmoid_params[0]['gamma'] = [0.1, 1, 10]\nsigmoid_scores_2, clf = cross_validate(clf, sigmoid_params)\nprint_results(sigmoid_scores_2)", "For sigmoid : validation 0.92 test 0.97 - {'C': 10, 'gamma': 1, 'kernel': 'sigmoid'}\nWall time: 514 ms\n" ] ], [ [ "We see no change in results after changing the hyperparameter range, so we keep the best value *(C, gamma)* = *(100,1)* ", "_____no_output_____" ] ], [ [ "# We found our best\n\nscores['sigmoid'] = sigmoid_scores", "_____no_output_____" ] ], [ [ "Now we compare our results for all the kernels up till now. ", "_____no_output_____" ] ], [ [ "# Now we compare the results for all the kernels tried\nfor k, v in scores.items():\n print('{:10} : validation {:.2f} ---- test {:.2f} : {}'.format(k, v['score'], v['tscore'], v['params']))\n# scores", "linear : validation 0.91 ---- test 0.96 : {'C': 10, 'kernel': 'linear'}\nsigmoid : validation 0.92 ---- test 0.97 : {'C': 100, 'gamma': 1, 'kernel': 'sigmoid'}\npoly : validation 0.82 ---- test 0.86 : {'C': 100, 'degree': 2, 'kernel': 'poly'}\nrbf : validation 0.92 ---- test 0.96 : {'C': 100, 'gamma': 0.01, 'kernel': 'rbf'}\n" ] ], [ [ "### Decision on tuning \nWe make our final choice below.", "_____no_output_____" ] ], [ [ "svc.kernel = 'rbf'\nsvc.C = 10\nsvc.gamma = 0.1\nsvc.fit(x_train, y_train);\nprint('Our score on the test set is ', svc.score(x_test, y_test))", "_____no_output_____" ] ], [ [ "## Task 1.2 Dimensionality Reduction\nWe use **PCA** to reduce the dimensionality of our dataset and see how our model performs on the reduced dataset. \nSklearn package uses **SVD** (Singular Value Decomposition) to perform *PCA*. Using *SVD* on count / tf-idf data, as we have now, is called **LSA** (Latent Semantic Analysis). \nSklearn *SVD* implementation for sparse inputs is *TruncatedSVD*. But given that we don't have many samples, it will not come at a great cost to transform our sparse matrix in a dense one. This comes with the benefit of using *PCA*'s feature of reducing data to as many dimensions as needed to explain a given variance ratio. \nTo keep most of our data information, we will keep **99%** variance for the dataset. ", "_____no_output_____" ] ], [ [ "%%time\nfrom sklearn.decomposition import PCA\ndef reduce_dimensions(X, dimensions = 0.99):\n pca = PCA(dimensions, random_state = RSTATE).fit(X)\n return pca.transform(X), pca\n\nrx_train, pca = reduce_dimensions(x_train.todense())\nrx_test = pca.transform(x_test.todense())\nsvc.fit(rx_train, y_train);\nsvc.score(rx_test,y_test)", "Wall time: 11.4 s\n" ] ], [ [ "We notice that the score on test set goes down a little after pca, but not much. Given how many features had our data in the first place, reducing its dimensionality in this way will lead to much faster training times.\n", "_____no_output_____" ], [ "# Task 2. Clustering documents\nTo explore data and find structure in it, we will try to clusterize the data. \nWe will reduce data dimensions using PCA. We will reduce to as many dimensions needed to explain **99%** of the variance.\n\nWe use three clustering algorithms :\n1. *Kmeans*\n2. *DBSCAN*\n3. *Hierarhical Clustering* (represented by *AgglomerativeClustering* in sklearn)\n\nWe score these algorithms using following metrics:\n1. *Silhouette score* : measures how similar is a sample to its cluster and how different it is to other clusters\n - The best value is 1 and the worst value is -1. Values near 0 indicate overlapping clusters. \n - Negative values generally indicate that a sample has been assigned to the wrong cluster, as a different cluster is more similar.\n2. *Homogeneity score* \n - A clustering result satisfies homogeneity if all of its clusters contain only data points which are members of a single class.\n3. *Completeness score*\n - A clustering result satisfies completeness if all the data points that are members of a given class are elements of the same cluster.", "_____no_output_____" ], [ "## Task 2.1 Exploratory plots\n\nOur count/tf-idf representation gives a sparse matrix with many features, and in this way, many dimensions. So it is impossible to visualize in a plot. \nBut by using **PCA** we can reduce our data to 2 or 3 features and plot it in a 2d, respectively 3d, setting. \n\nResults can be seen below.", "_____no_output_____" ] ], [ [ "## Utilities for plotting data \n\nfrom matplotlib.lines import Line2D\nfrom matplotlib import rcParams\nfrom mpl_toolkits.mplot3d import Axes3D\n\nrcParams.update({'font.size': 6})\n\n# 20 colors with high contrast from the Helpful Internet\n# https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/\nunique_colors = ['#e6194B', '#3cb44b', '#ffe119', '#4363d8', '#f58231', '#911eb4', '#42d4f4', '#f032e6', '#bfef45', '#fabebe',\n '#469990', '#e6beff', '#9A6324', '#fffac8', '#800000', '#aaffc3', '#808000', '#ffd8b1', '#000075', '#a9a9a9']\ndef marker_with(color, label, marker = '.'):\n return Line2D([0],[0], marker = marker, color = color, label = label)\n\ndef plot_data(rX, subplot_poz, dimensions = 2, unique_colors = unique_colors):\n # give one of the unique 20 colors to each document label \n colors = [color for color in unique_colors for _ in range(20)]\n # create a 3d or 2d plot based on parameters\n fig = plt.figure()\n if dimensions == 3:\n ax = fig.add_subplot(subplot_poz, projection='3d');\n ax.scatter(rX[:,0], rX[:,1], rX[:,2], marker = '.', c = colors);\n ax.set_title('Texts by author - 3D')\n else:\n ax = fig.add_subplot(subplot_poz)\n ax.scatter(rX[:,0], rX[:,1] , marker = '.', c = colors);\n ax.set_title('Texts by author - 2D')\n # add legend based on the choosen colors and the author names\n legend_handles = [marker_with(color, author, 'o') for author, color in zip(authors,unique_colors)]\n ax.legend(legend_handles, authors);\n fig.tight_layout()\n", "_____no_output_____" ], [ "%%time\n# make vectorization of entire corpus without splitting\nX, _ = make_tfidf(corpus)\nX = X.todense()", "Wall time: 1min 26s\n" ], [ "%%time\n# use pca to reduce dimensionality to make dataset visualizable\nrX, _ = reduce_dimensions(X, 3)\nplot_data(rX, 111, dimensions=3)\nrX, _ = reduce_dimensions(X, 2)\nplot_data(rX, 111, dimensions=2)\n", "_____no_output_____" ] ], [ [ "## Now Clustering", "_____no_output_____" ] ], [ [ "# Now clustering\nfrom sklearn.cluster import DBSCAN, KMeans, AgglomerativeClustering\nrX, _ = reduce_dimensions(X)\ndbscan = DBSCAN(eps = 1, min_samples = 5)\nclusters = []\nclusters.append(dbscan.fit_predict(rX))\n", "_____no_output_____" ], [ "no_clusters = clusters[0].max()", "_____no_output_____" ], [ "%%time\n\nkmeans = KMeans(n_clusters=no_clusters, init = 'random')\nclusters.append(kmeans.fit_predict(rX))\n", "Wall time: 203 ms\n" ], [ "%%time\nhierarchical = AgglomerativeClustering(n_clusters = no_clusters)\nclusters.append(hierarchical.fit_predict(rX))\n", "Wall time: 38 ms\n" ], [ "from sklearn.metrics import silhouette_score, homogeneity_score, completeness_score\n\nprint('Silhouette : ', [silhouette_score(X, cluster) for cluster in clusters])\nprint('Homogeinity : ', [homogeneity_score(y, cluster) for cluster in clusters])\nprint('Completeness : ', [completeness_score(y, cluster) for cluster in clusters])", "Silhouette : [0.040605232641092941, 0.044350683841450696, 0.045141741283111811]\nHomogeinity : [0.24635918520097347, 0.42407413445792763, 0.30637343086899516]\nCompleteness : [0.78200844990673657, 0.87107343196176046, 0.91715648532774607]\n" ] ], [ [ "We tried to estimate the number of clusters using DBSCAN and then used this number of clusters for the other algorithms. \nWe notice that **K-Means** works the best.", "_____no_output_____" ], [ "\n## Task 2.2 Visualising Clusters", "_____no_output_____" ] ], [ [ "rX = reduce_dimensions(X,2)", "_____no_output_____" ], [ "def plot_clusters(X, clusters, dimensions = 2, titles = []):\n black = '#000000'\n if dimensions == 3:\n fig, axs = plt.subplots(nrows = 1, ncols = len(clusters), subplot_kw={'projection': '3d'})\n else:\n fig, axs = plt.subplots(nrows = 1, ncols = len(clusters))\n fig.suptitle('Document Clustering')\n fig.tight_layout()\n for index, clustering in enumerate(clusters): \n labels = clustering\n cluster_labels = np.unique(labels).tolist()\n cluster_colors = unique_colors[:len(cluster_labels)]\n colors = [cluster_colors[label] for label in labels]\n ax = axs[index]\n if dimensions == 3:\n ax.scatter(rX[:,0], rX[:,1], rX[:,2], s = 20, c = colors, marker = '.')\n ax.set_title('{} - 3D'.format(titles[index]))\n else:\n ax.scatter(rX[:,0], rX[:,1], s = 20, c = colors, marker = '.')\n ax.set_title('{} - 2D'.format(titles[index]))\n legend_handles = [ marker_with(color, label) if label != -1 else marker_with(black, 'outlier') for label, color in zip(cluster_labels,cluster_colors)]\n ax.legend(legend_handles, cluster_labels)", "_____no_output_____" ], [ "plot_clusters(X, clusters,dimensions = 2, titles = ['dbscan', 'klearns', 'hieararchical'])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
e718c6e712b82f11bd6dc8ad6bf953852b211b37
20,720
ipynb
Jupyter Notebook
course_materials/project_03_data_warehouses/L1 E2 - 2 - Roll up and Drill Down.ipynb
ranstotz/data-eng-nanodegree
0344c4d5d42ee3ec58befaaffe8749aa0bd9c143
[ "MIT" ]
null
null
null
course_materials/project_03_data_warehouses/L1 E2 - 2 - Roll up and Drill Down.ipynb
ranstotz/data-eng-nanodegree
0344c4d5d42ee3ec58befaaffe8749aa0bd9c143
[ "MIT" ]
null
null
null
course_materials/project_03_data_warehouses/L1 E2 - 2 - Roll up and Drill Down.ipynb
ranstotz/data-eng-nanodegree
0344c4d5d42ee3ec58befaaffe8749aa0bd9c143
[ "MIT" ]
null
null
null
28.658368
263
0.372346
[ [ [ "# Exercise 02 - OLAP Cubes - Roll Up and Drill Down", "_____no_output_____" ], [ "All the databases table in this demo are based on public database samples and transformations\n- `Sakila` is a sample database created by `MySql` [Link](https://dev.mysql.com/doc/sakila/en/sakila-structure.html)\n- The postgresql version of it is called `Pagila` [Link](https://github.com/devrimgunduz/pagila)\n- The facts and dimension tables design is based on O'Reilly's public dimensional modelling tutorial schema [Link](http://archive.oreilly.com/oreillyschool/courses/dba3/index.html)\n\nStart by connecting to the database by running the cells below. If you are coming back to this exercise, then uncomment and run the first cell to recreate the database. If you recently completed the slicing and dicing exercise, then skip to the second cell.", "_____no_output_____" ] ], [ [ "!PGPASSWORD=student createdb -h 127.0.0.1 -U student pagila_star\n!PGPASSWORD=student psql -q -h 127.0.0.1 -U student -d pagila_star -f Data/pagila-star.sql", " set_config \n------------\n \n(1 row)\n\n setval \n--------\n 200\n(1 row)\n\n setval \n--------\n 605\n(1 row)\n\n setval \n--------\n 16\n(1 row)\n\n setval \n--------\n 600\n(1 row)\n\n setval \n--------\n 109\n(1 row)\n\n setval \n--------\n 599\n(1 row)\n\n setval \n--------\n 1\n(1 row)\n\n setval \n--------\n 1\n(1 row)\n\n setval \n--------\n 1\n(1 row)\n\n setval \n--------\n 1\n(1 row)\n\n setval \n--------\n 16049\n(1 row)\n\n setval \n--------\n 1000\n(1 row)\n\n setval \n--------\n 4581\n(1 row)\n\n setval \n--------\n 6\n(1 row)\n\n setval \n--------\n 32098\n(1 row)\n\n setval \n--------\n 16049\n(1 row)\n\n setval \n--------\n 2\n(1 row)\n\n setval \n--------\n 2\n(1 row)\n\n" ] ], [ [ "### Connect to the local database where Pagila is loaded", "_____no_output_____" ] ], [ [ "import sql\n%reload_ext sql\n\nDB_ENDPOINT = \"127.0.0.1\"\nDB = 'pagila_star'\nDB_USER = 'student'\nDB_PASSWORD = 'student'\nDB_PORT = '5432'\n\n# postgresql://username:password@host:port/database\nconn_string = \"postgresql://{}:{}@{}:{}/{}\" \\\n .format(DB_USER, DB_PASSWORD, DB_ENDPOINT, DB_PORT, DB)\n\nprint(conn_string)\n%sql $conn_string", "postgresql://student:[email protected]:5432/pagila_star\n" ] ], [ [ "### Star Schema", "_____no_output_____" ], [ "<img src=\"pagila-star.png\" width=\"50%\"/>", "_____no_output_____" ], [ "## Roll-up\n- Stepping up the level of aggregation to a large grouping\n- e.g.`city` is summed as `country`\n\nTODO: Write a query that calculates revenue (sales_amount) by day, rating, and country. Sort the data by revenue in descending order, and limit the data to the top 20 results. The first few rows of your output should match the table below.", "_____no_output_____" ] ], [ [ "%%time\n%%sql\nSELECT dd.day, dm.rating, dc.country, sum(f.sales_amount) as revenue\nFROM factSales f\nJOIN dimDate dd ON (f.date_key=dd.date_key)\nJOIN dimMovie dm ON (f.movie_key=dm.movie_key)\nJOIN dimCustomer dc ON (f.customer_key=dc.customer_key)\nGROUP BY (dd.day, dm.rating, dc.country)\nORDER BY revenue DESC\nLIMIT 20", " * postgresql://student:***@127.0.0.1:5432/pagila_star\n20 rows affected.\nCPU times: user 4.92 ms, sys: 0 ns, total: 4.92 ms\nWall time: 31 ms\n" ] ], [ [ "<div class=\"p-Widget jp-RenderedHTMLCommon jp-RenderedHTML jp-mod-trusted jp-OutputArea-output jp-OutputArea-executeResult\" data-mime-type=\"text/html\"><table>\n <tbody><tr>\n <th>day</th>\n <th>rating</th>\n <th>country</th>\n <th>revenue</th>\n </tr>\n <tr>\n <td>30</td>\n <td>G</td>\n <td>China</td>\n <td>169.67</td>\n </tr>\n <tr>\n <td>30</td>\n <td>PG</td>\n <td>India</td>\n <td>156.67</td>\n </tr>\n <tr>\n <td>30</td>\n <td>NC-17</td>\n <td>India</td>\n <td>153.64</td>\n </tr>\n <tr>\n <td>30</td>\n <td>PG-13</td>\n <td>China</td>\n <td>146.67</td>\n </tr>\n <tr>\n <td>30</td>\n <td>R</td>\n <td>China</td>\n <td>145.66</td>\n </tr>\n</tbody></table></div>", "_____no_output_____" ], [ "## Drill-down\n- Breaking up one of the dimensions to a lower level.\n- e.g.`city` is broken up into `districts`\n\nTODO: Write a query that calculates revenue (sales_amount) by day, rating, and district. Sort the data by revenue in descending order, and limit the data to the top 20 results. The first few rows of your output should match the table below.", "_____no_output_____" ] ], [ [ "%%time\n%%sql\nSELECT dd.day, dm.rating, dc.district, sum(f.sales_amount) as revenue\nFROM factSales f\nJOIN dimDate dd ON (f.date_key=dd.date_key)\nJOIN dimMovie dm ON (f.movie_key=dm.movie_key)\nJOIN dimCustomer dc ON (f.customer_key=dc.customer_key)\nGROUP BY (dd.day, dm.rating, dc.district)\nORDER BY revenue DESC\nLIMIT 20", " * postgresql://student:***@127.0.0.1:5432/pagila_star\n20 rows affected.\nCPU times: user 5.68 ms, sys: 0 ns, total: 5.68 ms\nWall time: 36.6 ms\n" ] ], [ [ "<div class=\"p-Widget jp-RenderedHTMLCommon jp-RenderedHTML jp-mod-trusted jp-OutputArea-output jp-OutputArea-executeResult\" data-mime-type=\"text/html\"><table>\n <tbody><tr>\n <th>day</th>\n <th>rating</th>\n <th>district</th>\n <th>revenue</th>\n </tr>\n <tr>\n <td>30</td>\n <td>PG-13</td>\n <td>Southern Tagalog</td>\n <td>53.88</td>\n </tr>\n <tr>\n <td>30</td>\n <td>G</td>\n <td>Inner Mongolia</td>\n <td>38.93</td>\n </tr>\n <tr>\n <td>30</td>\n <td>G</td>\n <td>Shandong</td>\n <td>36.93</td>\n </tr>\n <tr>\n <td>30</td>\n <td>NC-17</td>\n <td>West Bengali</td>\n <td>36.92</td>\n </tr>\n <tr>\n <td>17</td>\n <td>PG-13</td>\n <td>Shandong</td>\n <td>34.95</td>\n </tr>\n</tbody></table></div>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
e718ca9b217ced6acba04a304fb187a4021a32a4
4,194
ipynb
Jupyter Notebook
Back Tracking/Review/22. Generate Parentheses.ipynb
YuHe0108/Leetcode
90d904dde125dd35ee256a7f383961786f1ada5d
[ "Apache-2.0" ]
1
2020-08-05T11:47:47.000Z
2020-08-05T11:47:47.000Z
Back Tracking/Review/22. Generate Parentheses.ipynb
YuHe0108/LeetCode
b9e5de69b4e4d794aff89497624f558343e362ad
[ "Apache-2.0" ]
null
null
null
Back Tracking/Review/22. Generate Parentheses.ipynb
YuHe0108/LeetCode
b9e5de69b4e4d794aff89497624f558343e362ad
[ "Apache-2.0" ]
null
null
null
22.427807
70
0.419647
[ [ [ "1、当前左右括号都有大于 0 个可以使用的时候,才产生分支;\n2、产生左分支的时候,只看当前是否还有左括号可以使用;\n3、产生右分支的时候,还受到左分支的限制,右边剩余可以使用的括号数量一定得在严格大于左边剩余的数量的时候,才可以产生分支;\n4、在左边和右边剩余的括号数都等于 0 的时候结算。", "_____no_output_____" ] ], [ [ "from typing import List\n\nclass Solution:\n def generateParenthesis(self, n: int) -> List[str]:\n\n def dfs(cur_str, left, right):\n \"\"\"\n :param cur_str: 从根结点到叶子结点的路径字符串\n :param left: 左括号还可以使用的个数\n :param right: 右括号还可以使用的个数\n :return:\n \"\"\"\n if left == 0 and right == 0:\n res.append(cur_str)\n return\n if right < left:\n return\n if left:\n dfs(cur_str + '(', left - 1, right)\n if right:\n dfs(cur_str + ')', left, right - 1)\n \n res = []\n cur_str = ''\n dfs(cur_str, n, n)\n return res", "_____no_output_____" ], [ "from typing import List\n\nclass Solution:\n def generateParenthesis(self, n: int) -> List[str]:\n \n def backtracking(left, right, path):\n nonlocal res\n \n if left == right == 0:\n res.append(''.join(path[:]))\n return\n \n if left:\n path.append('(')\n backtracking(left-1, right, path)\n path.pop()\n \n if left < right:\n path.append(')')\n backtracking(left, right-1, path)\n path.pop()\n \n res = []\n backtracking(n, n, [])\n return res", "_____no_output_____" ], [ "solution = Solution()\nsolution.generateParenthesis(n = 3)", "_____no_output_____" ] ] ]
[ "raw", "code" ]
[ [ "raw" ], [ "code", "code", "code" ] ]
e718d1fa4d07e7317e8c0c5eb471c1e3752d5c6b
418,948
ipynb
Jupyter Notebook
notebooks/training-and-plotting.ipynb
Neuralwood-Net/face-recognizer-9000
b7804355927540bf07ce70cfe44dac6988a9b8cc
[ "MIT" ]
null
null
null
notebooks/training-and-plotting.ipynb
Neuralwood-Net/face-recognizer-9000
b7804355927540bf07ce70cfe44dac6988a9b8cc
[ "MIT" ]
null
null
null
notebooks/training-and-plotting.ipynb
Neuralwood-Net/face-recognizer-9000
b7804355927540bf07ce70cfe44dac6988a9b8cc
[ "MIT" ]
null
null
null
261.352464
108,202
0.892431
[ [ [ "<a href=\"https://colab.research.google.com/github/Neuralwood-Net/woodnet/blob/main/notebooks/training-and-plotting.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Train networks on 224px color images\n### Notebook for training WoodNet and SqueezeNet on both the images cropped around center and the images cropped to faces\n\n", "_____no_output_____" ], [ "### Make sure the hardware is in order", "_____no_output_____" ] ], [ [ "gpu_info = !nvidia-smi\ngpu_info = '\\n'.join(gpu_info)\nif gpu_info.find('failed') >= 0:\n print('Select the Runtime > \"Change runtime type\" menu to enable a GPU accelerator, ')\n print('and then re-execute this cell.')\nelse:\n print(gpu_info)", "Fri Nov 20 07:25:59 2020 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 455.38 Driver Version: 418.67 CUDA Version: 10.1 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n| N/A 54C P8 10W / 70W | 0MiB / 15079MiB | 0% Default |\n| | | ERR! |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n" ] ], [ [ "### Imports", "_____no_output_____" ] ], [ [ "import time\nimport os\nimport copy\nimport sys\nimport tarfile\nimport zipfile\n\nimport numpy as np\nimport pandas as pd\nimport cv2\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\nfrom tqdm import tqdm\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data\n\nimport torchvision\nfrom torchvision import datasets, models, transforms\n\nfrom google.cloud import storage\n\n# Placeholder to make it run until the real WoodNet is defined\nclass WoodNet:\n pass\n\n# Define an enumeration type for the different datasets\n# (easily extendable to more sets in the future)\nclass Dataset:\n CENTER = 0\n FACE = 1\n\n# Define enumration type for the different networks\nclass Network:\n WOODNET = 0\n SQUEEZENET = 1\n BADNET = 2\n\n# Set what device to run the training on (preferrably GPU)\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ndevice", "_____no_output_____" ] ], [ [ "### Create configurations for the different datasets", "_____no_output_____" ] ], [ [ "# Define some configuration for the different datasets so the below code will work\nconfiguration = {\n Dataset.CENTER: {\n \"blobname\": \"faces/balanced_sampled_224px_color_156240_images_70_15_15_split.zip\",\n \"datadir\": \"sampled_dataset_balanced_244\",\n },\n Dataset.FACE: {\n \"blobname\": \"faces/balanced_sampled_cropped_224px_color_70_15_15_split.tar.gz\",\n \"datadir\": \"sampled_dataset_balanced_cropped_224\",\n },\n}", "_____no_output_____" ] ], [ [ "### Decide what dataset to use for training of the models", "_____no_output_____" ] ], [ [ "# Available types are Dataset.RAW and Dataset.CROPPED\n# Choose one of them and continue\n# dataset = Dataset.CENTER\ndataset = Dataset.FACE", "_____no_output_____" ] ], [ [ "### Fetch and extract the data from the storage bucket", "_____no_output_____" ] ], [ [ "# Define paths for use later\n# (Kept separate from the heavy operations below in case they need to be rerun)\nBASE_PATH = \"/content\"\nSTORAGE_ROOT = \"gs://tdt4173-datasets\"\n\nBLOB_NAME = configuration[dataset][\"blobname\"]\nzipfilename = os.path.join(BASE_PATH, BLOB_NAME)\nextract_to_dir = os.path.join(BASE_PATH, *BLOB_NAME.split(os.path.sep)[:-1])", "_____no_output_____" ], [ "# Make the required directories\nos.makedirs(os.path.join(BASE_PATH, \"faces\"), exist_ok=True)\nos.makedirs(os.path.join(BASE_PATH, \"checkpoints\"), exist_ok=True)\nos.makedirs(os.path.join(BASE_PATH, \"logs\"), exist_ok=True)\nos.makedirs(os.path.join(BASE_PATH, \"examples\"), exist_ok=True)\n\n# Fetch the data\nwith open(zipfilename, \"wb\") as f:\n storage.Client.create_anonymous_client().download_blob_to_file(os.path.join(STORAGE_ROOT, BLOB_NAME), f)", "_____no_output_____" ], [ "# Extract the data\nif dataset == Dataset.FACE:\n with tarfile.open(zipfilename, \"r:gz\") as f:\n f.extractall(extract_to_dir)\nelif dataset == Dataset.CENTER:\n with zipfile.ZipFile(zipfilename, 'r') as zip_ref:\n zip_ref.extractall(extract_to_dir)\nelse:\n raise Exception(\"Invalid dataset chosen\")", "_____no_output_____" ] ], [ [ "### Load the data into wrapper classes and apply transformations", "_____no_output_____" ] ], [ [ "# Custom transform to reorder the color channels to the correct order\nclass BGR2RGB:\n def __call__(self, im):\n b, g, r = im.split()\n return Image.merge(\"RGB\", (r, g, b))", "_____no_output_____" ], [ "# Set how many images to process at once\n# Can be changed to accomodate lower memory devices\nBATCH_SIZE = 64\n\ntrans = [\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n]\n\n# Color channel transform only applies to cropped dataset\nif dataset == Dataset.FACE:\n trans = [BGR2RGB()] + trans\n\ndata_transforms = transforms.Compose(trans)\n\ndata_dir = os.path.join(extract_to_dir, configuration[dataset][\"datadir\"])\n\nimage_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),\n data_transforms)\n for x in ['train', 'val', 'test']}\n\ndataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=BATCH_SIZE,\n shuffle=True, num_workers=4)\n for x in ['train', 'val', 'test']}\ndataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val', 'test']}\nclass_names = image_datasets['train'].classes\nprint(class_names)\nprint(image_datasets['val'].classes)\nprint(dataset_sizes)", "['Kjartan', 'Lars', 'Morgan', 'Other']\n['Kjartan', 'Lars', 'Morgan', 'Other']\n{'train': 109396, 'val': 23445, 'test': 23442}\n" ] ], [ [ "### Create a helper function to aid in image plotting and show a random sample of the input data", "_____no_output_____" ] ], [ [ "def imshow(inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = inp.numpy().transpose((1, 2, 0))\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n plt.imshow(inp)\n if title is not None:\n plt.title(title)\n plt.pause(0.001)\n\n# Get a batch of training data\ninputs, classes = next(iter(dataloaders['val']))\n\ninputs, classes = inputs[:8], classes[:8]\n\nprint(inputs.shape)\n\n# Make a grid from batch\nout = torchvision.utils.make_grid(inputs)\n\nimshow(out, title=[class_names[x] for x in classes])", "torch.Size([8, 3, 224, 224])\n" ] ], [ [ "### Create a function for training and validation\nThe following function trains the supplied model with the loss criterion and optimizer supplied, for the specified number of epochs. During training it logs the loss and accuracy for both training and validation. Whenever a better model is found on the validation set, the function saves the model parameters to a file for use for inference later.", "_____no_output_____" ] ], [ [ "def train_model(model, criterion, optimizer, num_epochs=25):\n since = time.time()\n\n modelname = f\"{type(model).__name__}-{since}\"\n print(f\"Training model: `{type(model).__name__}`\")\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n num_img = {\n \"train\": 0,\n \"val\": 0,\n }\n \n datapoints_per_epoch = 100\n\n imgs_per_datapoint = {\n \"train\": int(float(dataset_sizes[\"train\"] / datapoints_per_epoch)),\n \"val\": int(float(dataset_sizes[\"val\"] / datapoints_per_epoch)),\n }\n\n print(\"Images per phase:\", imgs_per_datapoint[\"train\"], imgs_per_datapoint[\"val\"])\n\n for epoch in range(num_epochs):\n print(f\"Epoch {epoch}/{num_epochs - 1}\")\n print(\"-\" * 10)\n \n with open(os.path.join(BASE_PATH, f\"logs/{modelname}.csv\"), \"a\") as f:\n\n # For each epoch we want to both train and evaluate in that order\n for phase in [\"train\", \"val\"]:\n if phase == \"train\":\n # Makes the network ready for training, i.e. the parameters can be tuned\n # and possible Dropouts are activated\n model.train()\n else:\n # Makes the network ready for inference, i.e. it is not tunable and will\n # turn off regularization that might interfere with training\n model.eval()\n\n running_loss = 0.0\n running_corrects = 0\n\n plot_loss = 0\n plot_corrects = 0\n\n plot_points = 0\n\n # Iterate over training or validation data\n for inputs, labels in tqdm(dataloaders[phase], desc=f\"Epoch: {epoch} ({phase})\", file=sys.stdout):\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # Reset the gradients before calculating new ones\n optimizer.zero_grad()\n\n \n # Ask PyTorch to generate computation graph only if in training mode\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n \n # Only perform update steps if we're training\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n\n # Save values for statistics and logging\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n \n plot_loss += loss.item() * inputs.size(0)\n plot_corrects += torch.sum(preds == labels.data)\n \n num_img[phase] += BATCH_SIZE\n \n if (num_img[phase] % imgs_per_datapoint[phase]) < (BATCH_SIZE + 1):\n f.write(f\"{time.time()},{epoch},{phase},\\\n {num_img[phase]},{plot_loss / float(imgs_per_datapoint[phase])},\\\n {plot_corrects / float(imgs_per_datapoint[phase])}\\n\")\n \n plot_loss = 0\n plot_corrects = 0\n plot_points += 1\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n print(f\"{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}\")\n print(f\"Points plotted: {plot_points}\")\n\n # deep copy the model\n if phase == \"val\" and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n # This saves the data that can be loaded later for inference\n torch.save(\n {\n \"loss\": epoch_loss,\n \"acc\": epoch_acc,\n \"epoch\": epoch,\n \"parameters\": best_model_wts,\n },\n os.path.join(BASE_PATH, f\"checkpoints/{modelname}.data\"),\n )\n print()\n\n time_elapsed = time.time() - since\n print(f\"Training complete in {time_elapsed // 60:.0f}m {time_elapsed % 60:.0f}s\")\n print(f\"Best val Acc: {best_acc:4f}\")\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return model", "_____no_output_____" ] ], [ [ "### Prepare the home-made CNN – WoodNet\nBelow are two networks. The first is made by the authors, and is made to be trained from scratch on the training data. The other is fully trained on ImageNet (1000 classes) and fine-tuned on the training data.", "_____no_output_____" ] ], [ [ "class WoodNet(nn.Module):\n # After 5 pooling layers the 224x224 images are reduced to 7x7\n # while 3 channels have become 64.\n size_after_conv = 7 * 7 * 64\n def __init__(self):\n super(WoodNet, self).__init__()\n self.features = nn.Sequential( \n nn.Conv2d(3, 32, kernel_size=3, padding=1),\n nn.MaxPool2d(2),\n nn.ReLU(),\n\n nn.Conv2d(32, 64, kernel_size=3, padding=1),\n nn.MaxPool2d(2),\n nn.ReLU(),\n \n nn.Conv2d(64, 64, kernel_size=3, padding=1),\n nn.MaxPool2d(2),\n nn.ReLU(),\n \n nn.Conv2d(64, 64, kernel_size=3, padding=1),\n nn.MaxPool2d(2),\n nn.ReLU(),\n \n nn.Conv2d(64, 64, kernel_size=3, padding=1),\n nn.MaxPool2d(2),\n nn.ReLU(),\n )\n self.classify = nn.Sequential(\n nn.Linear(self.size_after_conv, 2048),\n nn.ReLU(),\n nn.Linear(2048, 1024),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(1024, len(class_names)),\n )\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(-1, self.size_after_conv)\n x = self.classify(x)\n\n return x\n\n# Create the net\nwoodnet = WoodNet()\nwoodnet", "_____no_output_____" ] ], [ [ "### Prepare the pretrained CNN – SqueezeNet\nBelow is the code for loading in the pretrained SqueezeNet. After it is loaded, the last classification layer is replaced with a one with the correct amount of output classes.", "_____no_output_____" ] ], [ [ "# Load pretrained SqueezeNet\nsqueezenet = models.squeezenet1_1(pretrained=True, progress=True)\n# Replace the last layer with one with the correct number of channels\nnum_ftr = squeezenet.classifier[1].in_channels\nsqueezenet.classifier[1] = nn.Conv2d(num_ftr, len(class_names), 1, 1)\nsqueezenet", "_____no_output_____" ] ], [ [ "### Prepare a bad net – BadNet \nBelow is a simple model which will be used for performance comparison with WoodNet and SqueezeNet. \n\n\n", "_____no_output_____" ] ], [ [ "class BadNet(nn.Module):\n def __init__(self):\n super(BadNet, self).__init__()\n \n self.classify = nn.Sequential(\n nn.Linear(224*224*3, 128),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(128, len(class_names)),\n )\n\n def forward(self, x):\n x = x.view(-1, 224*224*3)\n x = self.classify(x)\n\n return x\n\nbadnet = BadNet()\nbadnet", "_____no_output_____" ], [ "# Put all the different networks in a dictionary for convenient use later\nnetworks = {\n Network.WOODNET: woodnet,\n Network.SQUEEZENET: squeezenet,\n Network.BADNET: badnet,\n}", "_____no_output_____" ] ], [ [ "### Train the network\nBelow is code that instantiates the loss function and optimization method and starts the training.\nTo train every parameter in SqueezeNet, set `train_full_network = True`, and to `False` if only the last layer is to be trained. Set the variable network to the network that you want to train. Choices are `woodnet` and `squeezenet`.", "_____no_output_____" ] ], [ [ "# network = networks[Network.SQUEEZENET].to(device)\n# network = networks[Network.BADNET].to(device)\nnetwork = networks[Network.WOODNET].to(device)\n\ntrain_full_network = False\n\nif train_full_network or isinstance(network, (WoodNet, BadNet)):\n print(\"Training full network\")\n parameters = network.parameters()\nelse:\n print(\"Training only last layer of SqueezeNet\")\n parameters = network.classifier[1].parameters()\n\noptimizer = torch.optim.SGD(parameters, lr=0.001, momentum=0.9)\nloss_function = nn.CrossEntropyLoss()\n\ntrain_model(network, loss_function, optimizer, num_epochs=25)", "_____no_output_____" ] ], [ [ "### Visualize the model performance for some images", "_____no_output_____" ] ], [ [ "def visualize_model(model, num_images=6):\n was_training = model.training\n model.eval()\n images_so_far = 0\n fig = plt.figure()\n\n with torch.no_grad():\n for i, (inputs, labels) in enumerate(dataloaders['test']):\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n\n for j in range(inputs.size()[0]):\n images_so_far += 1\n ax = plt.subplot(num_images//2, 2, images_so_far)\n ax.axis('off')\n ax.set_title('predicted: {}'.format(class_names[preds[j]]))\n imshow(inputs.cpu().data[j])\n\n if images_so_far == num_images:\n model.train(mode=was_training)\n return\n model.train(mode=was_training)", "_____no_output_____" ], [ "visualize_model(network)", "_____no_output_____" ] ], [ [ "### Download checkpoints from previous training for evaluation\nMeant to use if running the training of the models is not something you'd feel inclined to do.", "_____no_output_____" ] ], [ [ "\nblobnames = [\n \"SqueezeNet-1605361529.9021263_cropped.data\",\n \"SqueezeNet-1605290736.1277423.data\",\n \"WoodNet-1605365270.1111202_cropped.data\",\n \"WoodNet-1605294933.5362356.data\",\n]\n\nfor blob in blobnames:\n with open(os.path.join(BASE_PATH, \"checkpoints\", blob), \"wb\") as f:\n storage.Client.create_anonymous_client().download_blob_to_file(f\"{STORAGE_ROOT}/checkpoints/{blob}\", f)\n\n# Put the files in a dictionary for convenient use\nsaves_data = {\n Network.WOODNET: {\n Dataset.FACE: \"WoodNet-1605365270.1111202_cropped.data\",\n Dataset.CENTER: \"WoodNet-1605294933.5362356.data\",\n },\n Network.SQUEEZENET: {\n Dataset.FACE: \"SqueezeNet-1605361529.9021263_cropped.data\",\n Dataset.CENTER: \"SqueezeNet-1605290736.1277423.data\",\n },\n}", "_____no_output_____" ] ], [ [ "### Test the trained network with example images scraped from facebook etc. Can be used by uploading images and changing the paths", "_____no_output_____" ] ], [ [ "# This requires the training to have been run in the current session\n# One could alternatively load in weights if training has not been run:\nload_weights = True\nif load_weights:\n network = networks[Network.SQUEEZENET]\n save_data = saves_data[Network.SQUEEZENET][Dataset.FACE]\n network.load_state_dict(torch.load(os.path.join(BASE_PATH, \"checkpoints\", save_data))[\"parameters\"])\n network = network.to(device).eval()\n\ninputs = [\n # These paths are meant as examples\n # Here one can change and add paths to one's own images and test with\n # cv2.imread(\"/content/lars_1.png\", cv2.IMREAD_COLOR),\n # cv2.imread(\"/content/morgan_1.png\", cv2.IMREAD_COLOR),\n cv2.imread(os.path.join(BASE_PATH, \"faces\", configuration[dataset][\"datadir\"], \"test/Kjartan/kjartan_video_0_100_augmentation_4.jpg\"), cv2.IMREAD_COLOR),\n]\n\n\ndef get_prediction_image(net, img, true_lab=None, plot=False):\n assert not plot or (plot and true_lab)\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n\n inp = cv2.resize(img, (224, 224)) / 255.0\n inp = inp / std - mean\n inp = inp.transpose((2, 0, 1))\n\n imgt = torch.Tensor(inp).unsqueeze(0).to(device)\n\n out = net(imgt)\n\n probabilities = F.softmax(out, dim=1)\n\n prob, class_idx = torch.max(probabilities, dim=1)\n pred = class_names[class_idx]\n\n if plot:\n plt.imshow(img)\n plt.text(5, 17, f\"Actual : {true_lab}\", color=\"white\", fontsize=14)\n plt.text(5, 34, f\"Predicted: {pred}\", color=\"white\", fontsize=14)\n\n return pred, round(prob.item() * 100, 2), probabilities\n\nget_prediction_image(network, inputs[0], \"Kjartan\", plot=True)", "_____no_output_____" ] ], [ [ "### Plot training and validation loss and accuracy", "_____no_output_____" ] ], [ [ "# This downloads and uses training logs from the training\n# done by the authors, but feel free to use logs from training\n# runs done by yourself as well\n# The plots in the paper was generated from the below code run in a .py file\nlogfiles = [\n \"WoodNet-1605365270.1111202_cropped.csv\",\n \"SqueezeNet-1605361529.9021263_cropped.csv\",\n]\n\nfor blob in logfiles:\n with open(os.path.join(BASE_PATH, \"logs\", blob), \"wb\") as f:\n storage.Client.create_anonymous_client().download_blob_to_file(f\"{STORAGE_ROOT}/logs/{blob}\", f)\n\n\nf, axes = plt.subplots(2, 2)\nf.suptitle(\"WoodNet and SqueezeNet training on images cropped to faces\")\n\nfor i, (logfile, ax) in enumerate(zip(logfiles, axes)):\n\n netname = logfile.split(\"-\")[0]\n\n df = pd.read_csv(\n os.path.join(BASE_PATH, \"logs\", logfile),\n names=[\"time\", \"epoch\", \"phase\", \"img\", \"loss\", \"acc\"],\n )\n\n df[\"acc\"] = df[\"acc\"] * 100\n\n trn = df[df[\"phase\"] == \"train\"]\n val = df[df[\"phase\"] == \"val\"]\n val[\"img\"] = trn[\"img\"]\n\n epochs = 3\n num_row = min(trn.shape[0], val.shape[0], 103 * epochs)\n\n max_img = trn[\"img\"].max()\n\n trn = trn.iloc[:num_row, :]\n val = val.iloc[:num_row, :]\n\n trn['acc_sma'] = trn.loc[:,\"acc\"].rolling(window=60).mean()\n val['acc_sma'] = val.loc[:,\"acc\"].rolling(window=60).mean()\n\n trn['loss_sma'] = trn.loc[:,\"loss\"].rolling(window=60).mean()\n val['loss_sma'] = val.loc[:,\"loss\"].rolling(window=60).mean()\n\n ax[0].plot(trn[\"img\"], list(trn[\"acc_sma\"]), label=\"Training accuracy\")\n ax[0].plot(trn[\"img\"], list(val[\"acc_sma\"]), label=\"Validation accuracy\")\n\n for x in range(109000, 109000*epochs + 1, 109000):\n ax[0].axvline(x=x, linewidth=1, color=\"black\")\n\n ax[0].set_xlabel(\"Number of images\")\n ax[0].set_ylabel(\"Accuracy (%)\")\n ax[0].set_ylim((80, 100))\n ax[0].set_title(f\"{netname} Accuracy\")\n ax[0].legend()\n\n ax[1].plot(trn[\"img\"], list(trn[\"loss_sma\"]), label=\"Training loss\")\n ax[1].plot(trn[\"img\"], list(val[\"loss_sma\"]), label=\"Validation loss\")\n\n for x in range(109000, 109000*epochs + 1, 109000):\n ax[1].axvline(x=x, linewidth=1, color=\"black\")\n\n ax[1].set_xlabel(\"Number of images\")\n ax[1].set_ylabel(\"Loss measured by cross-entropy\")\n ax[1].set_title(f\"{netname} Loss\")\n ax[1].legend()\n\nplt.rcParams['figure.figsize'] = [15, 5]\n\nplt.show()", "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:30: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:30: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n" ] ], [ [ "### Plot example of prediction with and without cropping to face", "_____no_output_____" ] ], [ [ "images = [\n \"morgan_1_uncropped.png\",\n \"morgan_1_cropped.png\",\n]\n\nfor blob in images:\n with open(os.path.join(BASE_PATH, \"examples\", blob), \"wb\") as f:\n storage.Client.create_anonymous_client().download_blob_to_file(f\"{STORAGE_ROOT}/examples/{blob}\", f)\n\ninputs = [cv2.imread(os.path.join(BASE_PATH, \"examples\", img), cv2.IMREAD_COLOR) for img in images]\ninputs = list(map(lambda x: cv2.cvtColor(cv2.resize(x, (224, 224)), cv2.COLOR_BGR2RGB), inputs))\n\nnetwork = networks[Network.SQUEEZENET]\nsave_data = saves_data[Network.SQUEEZENET][Dataset.CENTER]\nnetwork.load_state_dict(torch.load(os.path.join(BASE_PATH, \"checkpoints\", save_data))[\"parameters\"])\nnetwork = network.to(device).eval()\n\nplt.imshow(np.concatenate((inputs[0], inputs[1]), axis=1))\n(pred, prob, _), actual = get_prediction_image(network, inputs[0]), \"Morgan\"\nplt.text(5, 17, f\"Actual : {actual}\", color=\"white\", fontsize=14)\nplt.text(5, 34, f\"Predicted: {pred}\", color=\"white\", fontsize=14)\nplt.text(5, 52, f\"[Certainty ({prob}%)]\", color=\"white\", fontsize=12)\n\n\n(pred, prob, _), actual = get_prediction_image(network, inputs[1]), \"Morgan\"\nplt.text(249, 17, f\"Actual : {actual}\", color=\"white\", fontsize=14)\nplt.text(249, 34, f\"Predicted: {pred}\", color=\"white\", fontsize=14)\nplt.text(249, 52, f\"[Certainty ({prob}%)]\", color=\"white\", fontsize=12)\n\nplt.savefig(\"morgan_crop_plot.png\")\nplt.show();", "_____no_output_____" ] ], [ [ "### Code for calculating the accuracy, loss, precision, and recall for all the networks and datasets\nEach time the cell is run you must choose one of the four configurations, i.e. WoodNet or SqueezeNet and cropped to face or center.", "_____no_output_____" ] ], [ [ "# Load saved data from a training run\n# and initialize model weights to the saved weights\n# Choose net and dataset\nnet_type = Network.SQUEEZENET\ndata_type = Dataset.CENTER\n\nnetwork = networks[net_type]\nsave_data = saves_data[net_type][data_type]\nnetwork.load_state_dict(torch.load(os.path.join(BASE_PATH, \"checkpoints\", save_data))[\"parameters\"])\nnetwork = network.to(device).eval()\n\nloss_function = nn.CrossEntropyLoss()\n\nrunning_loss = 0\nrunning_corrects = 0\n\n# Define what classes have access\naccess = set([0, 1, 2])\n\n# Counters for true positives, false positives, and false negatives\ntp = 0\nfp = 0\nfn = 0\n\nfor inputs, labels in tqdm(dataloaders[\"test\"]):\n inputs, labels = inputs.to(device), labels.to(device)\n with torch.no_grad():\n\n outputs = network(inputs)\n _, preds = torch.max(outputs, 1)\n loss = loss_function(outputs, labels)\n\n # Save values for statistics and logging\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n for pred, lab in zip(preds, labels):\n pred, lab = pred.item(), lab.item()\n if lab in access and pred in access:\n tp += 1\n elif lab in access and pred not in access:\n fn += 1\n elif lab not in access and pred in access:\n fp += 1\n\n\n# Loss\nprint(running_loss / dataset_sizes[\"test\"])\n\n# Accuracy\nprint(running_corrects.double() / dataset_sizes[\"test\"])\n\n# Precision\nprint(tp / (tp + fp))\n\n# Recall\nprint(tp / (tp + fn))", "_____no_output_____" ] ], [ [ "### Create confusion matrix for the WoodNet, SqueezeNet, and BadNet", "_____no_output_____" ], [ "Below is a function which returns all true lables and a models predicted values", "_____no_output_____" ] ], [ [ "@torch.no_grad()\ndef get_all_preds_labels(model, loader):\n all_preds = torch.tensor([]).to(device)\n all_labels = torch.tensor([]).to(device)\n\n for inputs, labels in tqdm(loader):\n labels = labels.to(device)\n inputs = inputs.to(device)\n\n preds = model(inputs)\n all_preds = torch.cat(\n (all_preds, preds)\n ,dim=0\n )\n all_labels = torch.cat(\n (all_labels, labels)\n ,dim=0\n )\n return all_preds, all_labels\n", "_____no_output_____" ] ], [ [ "This is a convolutional matrix in tensor form, not used for the visualisation, but we provide it as it can be useful in some cases", "_____no_output_____" ] ], [ [ "def cm_tensor(stacked):\n cm_tensor = torch.zeros(4,4, dtype=torch.int64)\n for pair in stacked:\n al,pl = pair.tolist()\n cm_tensor[int(al),int(pl)] = cm_tensor[int(al),int(pl)] + 1\n ", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import plot_confusion_matrix\n", "_____no_output_____" ] ], [ [ "The plotting is done on the cpu. As such we return our calculated test predictions and acutal labels to be calculated on the cpu. ", "_____no_output_____" ], [ "The below plotter is based on the code from DeepLizard: https://deeplizard.com/learn/video/0LhiS6yu2qQ?fbclid=IwAR1Zb3LSBe4nhuxa6OhwpW4-rXwg7LhMIeG0C0iCWMrYLH2Bkhfh-z5IaL0 ", "_____no_output_____" ] ], [ [ "import itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef plot_confusion_matrix(cm, classes, filename, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n #print(\"Normalized confusion matrix\")\n else:\n #print('Confusion matrix, without normalization')\n pass\n\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), horizontalalignment=\"center\", color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n print(filename)\n plt.savefig(filename)\n# plot_confusion_matrix(cm,class_names)", "_____no_output_____" ], [ "#test_preds, test_labels = get_all_preds_labels(model,dataloader_and_type)\ndef cnn_constructor(preds,labels,filename,title): \n stacked = torch.stack(\n (\n labels\n ,preds.argmax(dim=1)\n )\n ,dim=1\n )\n labels = labels.cpu()\n preds = preds.cpu()\n cm = confusion_matrix(labels, preds.argmax(dim=1))\n plot_confusion_matrix(cm,class_names,filename,title=title)\n\n# Below is code for getting confusion matrix for each of the networks\n# Uncomment one at a time to get corresponding confusion matrix\n\n# SqueezeNet\n# test_preds_s, test_labels_s = get_all_preds_labels(squeezenet, dataloaders[\"test\"])\n# cnn_constructor(test_preds_s, test_labels_s, \"/content/squeezenet_cm.png\", title=\"SqueezeNet Confusion Matrix\");\n\n# WoodNet\ntest_preds_w, test_labels_w = get_all_preds_labels(woodnet,dataloaders[\"test\"])\ncnn_constructor(test_preds_w, test_labels_w, \"/content/woodnet_cm.png\", title=\"WoodNet Confusion Matrix\");\n\n# BadNet\n# test_preds_b ,test_labels_b = get_all_preds_labels(badnet,dataloaders[\"test\"])\n#cnn_constructor(test_preds_b, test_labels_b, \"/content/badnet_cm.png\", title=\"BadNet Confusion Matrix\");\n", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
e718d46d42b234fe799d354f71fea49370fc3fc8
4,580
ipynb
Jupyter Notebook
nb_dev_python/python_numba_en.ipynb
jdhp-docs/python-notebooks
91a97ea5cf374337efa7409e4992ea3f26b99179
[ "MIT" ]
3
2017-05-03T12:23:36.000Z
2020-10-26T17:30:56.000Z
nb_dev_python/python_numba_en.ipynb
jdhp-docs/python-notebooks
91a97ea5cf374337efa7409e4992ea3f26b99179
[ "MIT" ]
null
null
null
nb_dev_python/python_numba_en.ipynb
jdhp-docs/python-notebooks
91a97ea5cf374337efa7409e4992ea3f26b99179
[ "MIT" ]
1
2020-10-26T17:30:57.000Z
2020-10-26T17:30:57.000Z
17.480916
80
0.462882
[ [ [ "# Numba", "_____no_output_____" ], [ "See: http://numba.pydata.org/numba-doc/latest/index.html", "_____no_output_____" ], [ "## Basic exemple: sum elements of an array", "_____no_output_____" ], [ "### Without Numba", "_____no_output_____" ] ], [ [ "from numpy import arange\n\ndef sum2d(arr):\n M, N = arr.shape\n result = 0.0\n for i in range(M):\n for j in range(N):\n result += arr[i,j]\n return result", "_____no_output_____" ], [ "a = arange(1000000).reshape(1000, 1000)", "_____no_output_____" ], [ "%%timeit\n\nsum2d(a)", "_____no_output_____" ], [ "sum2d(a)", "_____no_output_____" ] ], [ [ "### With Numba", "_____no_output_____" ] ], [ [ "from numba import jit\nfrom numpy import arange\n\n# jit decorator tells Numba to compile this function.\n# The argument types will be inferred by Numba when function is called.\n@jit\ndef sum2d_numba(arr):\n M, N = arr.shape\n result = 0.0\n for i in range(M):\n for j in range(N):\n result += arr[i,j]\n return result", "_____no_output_____" ], [ "a = arange(1000000).reshape(1000, 1000)", "_____no_output_____" ], [ "%%timeit\n\nsum2d_numba(a)", "_____no_output_____" ], [ "sum2d_numba(a)", "_____no_output_____" ] ], [ [ "## Fibonacci", "_____no_output_____" ] ], [ [ "n = 10000000", "_____no_output_____" ] ], [ [ "### Without Numba", "_____no_output_____" ] ], [ [ "def fibonacci(max_value):\n a, b = 0, 1\n while b < max_value:\n a, b = b, a+b\n return b", "_____no_output_____" ], [ "%%timeit\n\nfibonacci(n)", "_____no_output_____" ], [ "fibonacci(n)", "_____no_output_____" ] ], [ [ "### With Numba", "_____no_output_____" ] ], [ [ "from numba import jit\n\n@jit\ndef fibonacci_numba(max_value):\n a, b = 0, 1\n while b < max_value:\n a, b = b, a+b\n return b", "_____no_output_____" ], [ "%%timeit\n\nfibonacci_numba(n)", "_____no_output_____" ], [ "fibonacci_numba(n)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e718d57e761bbfff2ba9eeb1c13db7c1740e83b5
50,140
ipynb
Jupyter Notebook
cugraph/link_prediction/Overlap-Similarity.ipynb
Iroy30/notebooks
7133a9f9a5cbde3e7bf978054fd5aefe751d71d6
[ "Apache-2.0" ]
1
2020-11-08T17:03:32.000Z
2020-11-08T17:03:32.000Z
cugraph/link_prediction/Overlap-Similarity.ipynb
etarakci-hvl/notebooks
a17b6907a9d6494c20cb88cb2689eeecf63fdfe3
[ "Apache-2.0" ]
null
null
null
cugraph/link_prediction/Overlap-Similarity.ipynb
etarakci-hvl/notebooks
a17b6907a9d6494c20cb88cb2689eeecf63fdfe3
[ "Apache-2.0" ]
null
null
null
34.108844
695
0.473215
[ [ [ "# Overlap Similarity\n----\n\nIn this notebook we will explore the Overlap Coefficient and compare it again Jaccard. Similarity can be between neighboring vertices (default) or second hop neighbors\n\n\nNotebook Credits\n\n Original Authors: Brad Rees\n Created: 10/14/2019\n Last Edit: 10/28/2019\n\nRAPIDS Versions: 0.10\n\nTest Hardware\n* GV100 32G, CUDA 10.0\n", "_____no_output_____" ], [ "## Introduction - Common Neighbor Similarity \n\nOne of the most common types of vertex similarity is to evaluate the neighborhood of vertex pairs and looks at the number of common neighbors. TThat type of similar comes from statistics and is based on set comparison. Both Jaccard and the Overlap Coefficient operate on sets, and in a graph setting, those sets are the list of neighboring vertices. <br>\nFor those that like math: The neighbors of a vertex, _v_, is defined as the set, _U_, of vertices connected by way of an edge to vertex v, or _N(v) = {U} where v ∈ V and ∀ u ∈ U ∃ edge(v,u)∈ E_.\n\nFor the rest of this introduction, set __A__ will equate to _A = N(i)_ and set __B__ will quate to _B = N(j)_. That just make the rest of the text more readable.", "_____no_output_____" ], [ "\n### Overlap Coefficient", "_____no_output_____" ], [ "The Overlap Coefficient between two sets is defined as the ratio of the volume of their intersection divided by the volume of the smaller set.\nThe Overlap Coefficient can be defined as\n\n<a href=\"https://www.codecogs.com/eqnedit.php?latex=oc(A,B)&space;=&space;\\frac{|A|&space;\\cap&space;|B|}{min(|A|,&space;|B|)&space;}\" target=\"_blank\"><img src=\"https://latex.codecogs.com/gif.latex?oc(A,B)&space;=&space;\\frac{|A&space;\\cap&space;B|}{min(|A|,&space;|B|)&space;}\" title=\"oc(A,B) = \\frac{|A \\cap B|}{min(|A|, |B|) }\" /></a>\n\nTo compute the Overlap Coefficient between all pairs of vertices connected by an edge in cuGraph use: <br>\n\n__df = cugraph.overlap(G)__\n\n G: A cugraph.Graph object\n\nReturns:\n\n df: cudf.DataFrame with three names columns:\n df[\"source\"]: The source vertex id.\n df[\"destination\"]: The destination vertex id.\n df[\"overlap_coeff\"]: The overlap coefficient computed between the source and destination vertex.\n\n__References__\n- https://en.wikipedia.org/wiki/Overlap_coefficient\n", "_____no_output_____" ], [ "#### Refresh on Jaccard\nThe Jaccard similarity between two sets is defined as the ratio of the volume of their intersection divided by the volume of their union. \n\nThe Jaccard Similarity can then be defined as\n\n<a href=\"https://www.codecogs.com/eqnedit.php?latex=js(A,B)&space;=&space;\\frac{|A&space;\\cap&space;B|}{|A&space;\\cup&space;B&space;|&space;}&space;=&space;\\frac{|A&space;\\cap&space;B|}{&space;|A|&space;&plus;&space;|B|&space;-&space;|A&space;\\cup&space;B&space;|&space;}\" target=\"_blank\"><img src=\"https://latex.codecogs.com/gif.latex?js(A,B)&space;=&space;\\frac{|A&space;\\cap&space;B|}{|A&space;\\cup&space;B&space;|&space;}&space;=&space;\\frac{|A&space;\\cap&space;B|}{&space;|A|&space;&plus;&space;|B|&space;-&space;|A&space;\\cup&space;B&space;|&space;}\" title=\"js(A,B) = \\frac{|A \\cap B|}{|A \\cup B | } = \\frac{|A \\cap B|}{ |A| + |B| - |A \\cup B | }\" /></a>\n\n\nTo compute the Jaccard similarity between all pairs of vertices connected by an edge in cuGraph use: <br>\n__df = cugraph.jaccard(G)__\n\n G: A cugraph.Graph object\n\nReturns:\n\n df: cudf.DataFrame with three names columns:\n df[\"source\"]: The source vertex id.\n df[\"destination\"]: The destination vertex id.\n df[\"jaccard_coeff\"]: The jaccard coefficient computed between the source and destination vertex.\n<br>\n\nSee the Jaccard notebook for additional information and background", "_____no_output_____" ], [ "### Additional Reading\n- [Similarity in graphs: Jaccard versus the Overlap Coefficient](https://medium.com/rapids-ai/similarity-in-graphs-jaccard-versus-the-overlap-coefficient-610e083b877d)\n- [Wikipedia: Overlap Coefficient](https://en.wikipedia.org/wiki/Overlap_coefficient)\n", "_____no_output_____" ], [ "#### cuGraph Notice \nThe current version of cuGraph has some limitations:\n\n* Vertex IDs need to be 32-bit integers.\n* Vertex IDs are expected to be contiguous integers starting from 0.\n\ncuGraph provides the renumber function to mitigate this problem. Input vertex IDs for the renumber function can be either 32-bit or 64-bit integers, can be non-contiguous, and can start from an arbitrary number. The renumber function maps the provided input vertex IDs to 32-bit contiguous integers starting from 0. cuGraph still requires the renumbered vertex IDs to be representable in 32-bit integers. These limitations are being addressed and will be fixed soon.", "_____no_output_____" ], [ "## Test Data\nWe will be using the Zachary Karate club dataset \n*W. W. Zachary, An information flow model for conflict and fission in small groups, Journal of\nAnthropological Research 33, 452-473 (1977).*\n\n\n![Karate Club](../img/zachary_black_lines.png)\n\nThis is a small graph which allows for easy visual inspection to validate results. ", "_____no_output_____" ], [ "---\n# Let's get started!", "_____no_output_____" ] ], [ [ "# Import needed libraries\nimport cugraph\nimport cudf\nfrom collections import OrderedDict", "_____no_output_____" ] ], [ [ "----\n### Define some Print functions\n(the `del` are not needed since going out of scope should free memory)", "_____no_output_____" ] ], [ [ "# define a function for printing the top most similar vertices\ndef print_most_similar_jaccard(df):\n \n jmax = df['jaccard_coeff'].max()\n dm = df.query('jaccard_coeff >= @jmax') \n \n #find the best\n for i in range(len(dm)): \n print(\"Vertices \" + str(dm['source'][i]) + \" and \" + \n str(dm['destination'][i]) + \" are most similar with score: \" \n + str(dm['jaccard_coeff'][i]))\n del jmax\n del dm", "_____no_output_____" ], [ "# define a function for printing the top most similar vertices\ndef print_most_similar_overlap(df):\n \n smax = df['overlap_coeff'].max()\n dm = df.query('overlap_coeff >= @smax and source < destination') \n \n for i in range(len(dm)):\n print(\"Vertices \" + str(dm['source'][i]) + \" and \" + \n str(dm['destination'][i]) + \" are most similar with score: \" \n + str(dm['overlap_coeff'][i]))\n \n del smax\n del dm", "_____no_output_____" ], [ "# define a function for printing jaccard similar vertices based on a threshold\ndef print_jaccard_threshold(_d, limit):\n \n filtered = _d.query('jaccard_coeff > @limit')\n \n for i in range(len(filtered)):\n print(\"Vertices \" + str(filtered['source'][i]) + \" and \" + \n str(filtered['destination'][i]) + \" are similar with score: \" + \n str(filtered['jaccard_coeff'][i]))", "_____no_output_____" ], [ "# define a function for printing similar vertices based on a threshold\ndef print_overlap_threshold(_d, limit):\n \n filtered = _d.query('overlap_coeff > @limit')\n \n for i in range(len(filtered)):\n if filtered['source'][i] != filtered['destination'][i] :\n print(\"Vertices \" + str(filtered['source'][i]) + \" and \" + \n str(filtered['destination'][i]) + \" are similar with score: \" + \n str(filtered['overlap_coeff'][i]))", "_____no_output_____" ] ], [ [ "### Read the CSV datafile using cuDF\ndata file is actually _tab_ separated, so we need to set the delimiter", "_____no_output_____" ] ], [ [ "# Test file \ndatafile='../data/karate-data.csv'\n\ngdf = cudf.read_csv(datafile, delimiter='\\t', names=['src', 'dst'], dtype=['int32', 'int32'] )", "_____no_output_____" ], [ "# Let's look at the DataFrame. There should be two columns and 156 records\ngdf.shape", "_____no_output_____" ], [ "# Look at the first few data records - the output should be two colums src and dst\ngdf.head()", "_____no_output_____" ] ], [ [ "### Create a Graph", "_____no_output_____" ] ], [ [ "# create a Graph \nG = cugraph.Graph()\nG.from_cudf_edgelist(gdf, source='src', destination='dst')", "_____no_output_____" ], [ "# How many vertices are in the graph? Remember that Graph is zero based\nG.number_of_vertices()", "_____no_output_____" ] ], [ [ "_The test graph has only 34 vertices, so why is the Graph listing 35?_\n\nAs mentioned above, cuGraph vertex numbering is zero-based, meaning that the first vertex ID starts at zero. The test dataset is 1-based. Because of that, the Graph object adds an extra isolated vertex with an ID of zero. Hence the difference in vertex count. \nWe could have run _renumbering_ on the data, or updated the value of each element _gdf['src'] = gdf['src'] - 1_ \nfor now, we will just state that vertex 0 is not part of the dataset and can be ignored", "_____no_output_____" ], [ "--- \n# Jaccard ", "_____no_output_____" ] ], [ [ "%%time\n# Call cugraph.nvJaccard \njdf = cugraph.jaccard(G)", "CPU times: user 3.75 ms, sys: 0 ns, total: 3.75 ms\nWall time: 3.46 ms\n" ], [ "# Which two vertices are the most similar?\nprint_most_similar_jaccard(jdf)", "Vertices 33 and 34 are most similar with score: 0.5263158\nVertices 34 and 33 are most similar with score: 0.5263158\n" ] ], [ [ "The Most similar shoul be 33 and 34.\nVertex 33 has 12 neighbors, vertex 34 has 17 neighbors. They share 10 neighbors in common:\n$jaccard = 10 / (10 + (12 -10) + (17-10)) = 10 / 19 = 0.526$", "_____no_output_____" ] ], [ [ "### let's look at all similarities over a threshold\nprint_jaccard_threshold(jdf, 0.4)", "Vertices 4 and 8 are similar with score: 0.42857143\nVertices 8 and 4 are similar with score: 0.42857143\nVertices 33 and 34 are similar with score: 0.5263158\nVertices 34 and 33 are similar with score: 0.5263158\n" ], [ "# Since it is a small graph we can print all scores, notice that only vertices that are neighbors are being compared\n#\n# Before printing, let's get rid of the duplicates (x compared to y is the same as y compared to x). We will do that\n# by performing a query. Then let's sort the data by score\n\njdf_s = jdf.query('source < destination').sort_values(by='jaccard_coeff', ascending=False)\n\nprint_jaccard_threshold(jdf_s, 0.0)", "Vertices 33 and 34 are similar with score: 0.5263158\nVertices 4 and 8 are similar with score: 0.42857143\nVertices 1 and 2 are similar with score: 0.3888889\nVertices 4 and 14 are similar with score: 0.375\nVertices 2 and 4 are similar with score: 0.36363637\nVertices 3 and 4 are similar with score: 0.33333334\nVertices 6 and 7 are similar with score: 0.33333334\nVertices 2 and 8 are similar with score: 0.3\nVertices 1 and 4 are similar with score: 0.29411766\nVertices 9 and 31 are similar with score: 0.2857143\nVertices 24 and 30 are similar with score: 0.2857143\nVertices 2 and 14 are similar with score: 0.27272728\nVertices 3 and 8 are similar with score: 0.27272728\nVertices 2 and 3 are similar with score: 0.26666668\nVertices 3 and 14 are similar with score: 0.25\nVertices 1 and 3 are similar with score: 0.23809524\nVertices 9 and 33 are similar with score: 0.21428572\nVertices 5 and 11 are similar with score: 0.2\nVertices 6 and 17 are similar with score: 0.2\nVertices 7 and 17 are similar with score: 0.2\nVertices 25 and 26 are similar with score: 0.2\nVertices 27 and 30 are similar with score: 0.2\nVertices 1 and 8 are similar with score: 0.1764706\nVertices 1 and 14 are similar with score: 0.16666667\nVertices 5 and 7 are similar with score: 0.16666667\nVertices 6 and 11 are similar with score: 0.16666667\nVertices 30 and 34 are similar with score: 0.16666667\nVertices 24 and 34 are similar with score: 0.15789473\nVertices 3 and 9 are similar with score: 0.15384616\nVertices 4 and 13 are similar with score: 0.14285715\nVertices 30 and 33 are similar with score: 0.14285715\nVertices 31 and 33 are similar with score: 0.14285715\nVertices 24 and 33 are similar with score: 0.13333334\nVertices 24 and 28 are similar with score: 0.125\nVertices 25 and 32 are similar with score: 0.125\nVertices 26 and 32 are similar with score: 0.125\nVertices 29 and 32 are similar with score: 0.125\nVertices 1 and 5 are similar with score: 0.11764706\nVertices 1 and 11 are similar with score: 0.11764706\nVertices 1 and 6 are similar with score: 0.11111111\nVertices 1 and 7 are similar with score: 0.11111111\nVertices 31 and 34 are similar with score: 0.10526316\nVertices 2 and 18 are similar with score: 0.1\nVertices 2 and 22 are similar with score: 0.1\nVertices 9 and 34 are similar with score: 0.1\nVertices 32 and 34 are similar with score: 0.0952381\nVertices 2 and 20 are similar with score: 0.09090909\nVertices 15 and 33 are similar with score: 0.07692308\nVertices 16 and 33 are similar with score: 0.07692308\nVertices 19 and 33 are similar with score: 0.07692308\nVertices 21 and 33 are similar with score: 0.07692308\nVertices 23 and 33 are similar with score: 0.07692308\nVertices 1 and 13 are similar with score: 0.05882353\nVertices 1 and 18 are similar with score: 0.05882353\nVertices 1 and 22 are similar with score: 0.05882353\nVertices 32 and 33 are similar with score: 0.05882353\nVertices 1 and 20 are similar with score: 0.055555556\nVertices 15 and 34 are similar with score: 0.055555556\nVertices 16 and 34 are similar with score: 0.055555556\nVertices 19 and 34 are similar with score: 0.055555556\nVertices 21 and 34 are similar with score: 0.055555556\nVertices 23 and 34 are similar with score: 0.055555556\nVertices 27 and 34 are similar with score: 0.055555556\nVertices 29 and 34 are similar with score: 0.05263158\nVertices 1 and 9 are similar with score: 0.05\nVertices 28 and 34 are similar with score: 0.05\nVertices 3 and 33 are similar with score: 0.04761905\n" ] ], [ [ "---\n# Overlap Coefficient", "_____no_output_____" ], [ "Noticed that the Jaccard score is based on the number of common items over the combined (union) set of items. That makes sense when the two sets being compared are relativcely close in size. However, when one set is considerable larger, then it is important to know if one set is a proper subset of the other <br>\nSee: [Similarity in graphs: Jaccard versus the Overlap Coefficient](https://medium.com/rapids-ai/similarity-in-graphs-jaccard-versus-the-overlap-coefficient-610e083b877d)", "_____no_output_____" ] ], [ [ "%%time\n# Call cugraph.nvJaccard \nodf = cugraph.overlap(G)", "CPU times: user 2.3 ms, sys: 594 µs, total: 2.9 ms\nWall time: 2.45 ms\n" ], [ "# print the top similar pair - this function include code to drop duplicates \nprint_most_similar_overlap(odf)", "Vertices 1 and 4 are most similar with score: 0.8333333\nVertices 33 and 34 are most similar with score: 0.8333333\n" ], [ "# print all similarities over a threshold, in this case 0.5\n#also, drop duplicates\nodf_s = odf.query('source < destination').sort_values(by='overlap_coeff', ascending=False)\n\nprint_overlap_threshold(odf_s, 0.5)", "Vertices 1 and 4 are similar with score: 0.8333333\nVertices 33 and 34 are similar with score: 0.8333333\nVertices 1 and 2 are similar with score: 0.7777778\nVertices 1 and 8 are similar with score: 0.75\nVertices 2 and 8 are similar with score: 0.75\nVertices 3 and 8 are similar with score: 0.75\nVertices 4 and 8 are similar with score: 0.75\nVertices 30 and 34 are similar with score: 0.75\nVertices 1 and 5 are similar with score: 0.6666667\nVertices 1 and 11 are similar with score: 0.6666667\nVertices 2 and 4 are similar with score: 0.6666667\nVertices 3 and 4 are similar with score: 0.6666667\nVertices 1 and 14 are similar with score: 0.6\nVertices 2 and 14 are similar with score: 0.6\nVertices 3 and 14 are similar with score: 0.6\nVertices 4 and 14 are similar with score: 0.6\nVertices 9 and 33 are similar with score: 0.6\nVertices 24 and 34 are similar with score: 0.6\n" ] ], [ [ "---", "_____no_output_____" ], [ "# Expanding similarity scoring to 2-hop vertex pair", "_____no_output_____" ] ], [ [ "# get all two-hop vertex pairs\np = G.get_two_hop_neighbors()", "_____no_output_____" ], [ "# Let's look at the Jaccard score\nol2 = cugraph.overlap(G, first=p['first'], second=p['second'])", "_____no_output_____" ], [ "print_most_similar_overlap(odf)", "Vertices 1 and 4 are most similar with score: 0.8333333\nVertices 33 and 34 are most similar with score: 0.8333333\n" ], [ "# print all similarities over a threshold, in this case 0.5\n#also, drop duplicates\nodf_s2 = ol2.query('source < destination').sort_values(by='overlap_coeff', ascending=False)\n\nprint_overlap_threshold(odf_s2, 0.74)", "Vertices 1 and 17 are similar with score: 1.0\nVertices 2 and 12 are similar with score: 1.0\nVertices 2 and 13 are similar with score: 1.0\nVertices 3 and 12 are similar with score: 1.0\nVertices 3 and 13 are similar with score: 1.0\nVertices 3 and 18 are similar with score: 1.0\nVertices 3 and 22 are similar with score: 1.0\nVertices 4 and 12 are similar with score: 1.0\nVertices 4 and 18 are similar with score: 1.0\nVertices 4 and 22 are similar with score: 1.0\nVertices 5 and 6 are similar with score: 1.0\nVertices 5 and 12 are similar with score: 1.0\nVertices 6 and 12 are similar with score: 1.0\nVertices 7 and 11 are similar with score: 1.0\nVertices 7 and 12 are similar with score: 1.0\nVertices 8 and 12 are similar with score: 1.0\nVertices 8 and 13 are similar with score: 1.0\nVertices 8 and 14 are similar with score: 1.0\nVertices 8 and 18 are similar with score: 1.0\nVertices 8 and 22 are similar with score: 1.0\nVertices 9 and 10 are similar with score: 1.0\nVertices 9 and 12 are similar with score: 1.0\nVertices 9 and 15 are similar with score: 1.0\nVertices 9 and 16 are similar with score: 1.0\nVertices 9 and 19 are similar with score: 1.0\nVertices 9 and 21 are similar with score: 1.0\nVertices 9 and 23 are similar with score: 1.0\nVertices 10 and 14 are similar with score: 1.0\nVertices 10 and 28 are similar with score: 1.0\nVertices 10 and 29 are similar with score: 1.0\nVertices 10 and 33 are similar with score: 1.0\nVertices 11 and 12 are similar with score: 1.0\nVertices 12 and 13 are similar with score: 1.0\nVertices 12 and 14 are similar with score: 1.0\nVertices 12 and 18 are similar with score: 1.0\nVertices 12 and 20 are similar with score: 1.0\nVertices 12 and 22 are similar with score: 1.0\nVertices 12 and 32 are similar with score: 1.0\nVertices 13 and 14 are similar with score: 1.0\nVertices 14 and 18 are similar with score: 1.0\nVertices 14 and 20 are similar with score: 1.0\nVertices 14 and 22 are similar with score: 1.0\nVertices 15 and 16 are similar with score: 1.0\nVertices 15 and 19 are similar with score: 1.0\nVertices 15 and 21 are similar with score: 1.0\nVertices 15 and 23 are similar with score: 1.0\nVertices 15 and 24 are similar with score: 1.0\nVertices 15 and 30 are similar with score: 1.0\nVertices 15 and 31 are similar with score: 1.0\nVertices 15 and 32 are similar with score: 1.0\nVertices 16 and 19 are similar with score: 1.0\nVertices 16 and 21 are similar with score: 1.0\nVertices 16 and 23 are similar with score: 1.0\nVertices 16 and 24 are similar with score: 1.0\nVertices 16 and 30 are similar with score: 1.0\nVertices 16 and 31 are similar with score: 1.0\nVertices 16 and 32 are similar with score: 1.0\nVertices 18 and 20 are similar with score: 1.0\nVertices 18 and 22 are similar with score: 1.0\nVertices 19 and 21 are similar with score: 1.0\nVertices 19 and 23 are similar with score: 1.0\nVertices 19 and 24 are similar with score: 1.0\nVertices 19 and 30 are similar with score: 1.0\nVertices 19 and 31 are similar with score: 1.0\nVertices 19 and 32 are similar with score: 1.0\nVertices 20 and 22 are similar with score: 1.0\nVertices 21 and 23 are similar with score: 1.0\nVertices 21 and 24 are similar with score: 1.0\nVertices 21 and 30 are similar with score: 1.0\nVertices 21 and 31 are similar with score: 1.0\nVertices 21 and 32 are similar with score: 1.0\nVertices 23 and 24 are similar with score: 1.0\nVertices 23 and 30 are similar with score: 1.0\nVertices 23 and 31 are similar with score: 1.0\nVertices 23 and 32 are similar with score: 1.0\nVertices 24 and 27 are similar with score: 1.0\nVertices 27 and 33 are similar with score: 1.0\nVertices 29 and 33 are similar with score: 1.0\nVertices 1 and 4 are similar with score: 0.8333333\nVertices 33 and 34 are similar with score: 0.8333333\nVertices 1 and 2 are similar with score: 0.7777778\nVertices 1 and 8 are similar with score: 0.75\nVertices 2 and 8 are similar with score: 0.75\nVertices 3 and 8 are similar with score: 0.75\nVertices 3 and 31 are similar with score: 0.75\nVertices 4 and 8 are similar with score: 0.75\nVertices 28 and 33 are similar with score: 0.75\nVertices 30 and 34 are similar with score: 0.75\n" ] ], [ [ "---", "_____no_output_____" ], [ "## Let's now compare the Overlap Coefficient with the Jaccard Similarity ", "_____no_output_____" ] ], [ [ "# Call cugraph.nvJaccard \njdf = cugraph.jaccard(G)", "_____no_output_____" ], [ "# Which two vertices are the most similar?\nprint_most_similar_jaccard(jdf)", "Vertices 33 and 34 are most similar with score: 0.5263158\nVertices 34 and 33 are most similar with score: 0.5263158\n" ], [ "# Let's combine the Jaccard and Overlap scores\nmdf = jdf.merge(odf, on=['source','destination'])", "_____no_output_____" ], [ "# Also want to include the vertex degree\ndegree = G.degree()", "_____no_output_____" ], [ "dS = degree.rename(columns={'vertex':'source','degree': 'src_degree'})\ndD = degree.rename(columns={'vertex':'destination','degree': 'dst_degree'})", "_____no_output_____" ], [ "m = mdf.merge(dS, how=\"left\", on='source')\nm = m.merge(dD, how=\"left\", on='destination')", "_____no_output_____" ], [ "m.query('source < destination').sort_values(by='jaccard_coeff', ascending=False).head(20)", "_____no_output_____" ], [ "# Now sort on the overlap\nm.query('source < destination').sort_values(by='overlap_coeff', ascending=False).head(20)", "_____no_output_____" ] ], [ [ "___\nCopyright (c) 2019, NVIDIA CORPORATION.\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n___", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
e718d65787853d3162ad7c72286ba04734ffed23
7,165
ipynb
Jupyter Notebook
ex01_2.ipynb
jake-aft/kglab
f5cabf5b378d7c8203df772d62b454ade53232b7
[ "MIT" ]
null
null
null
ex01_2.ipynb
jake-aft/kglab
f5cabf5b378d7c8203df772d62b454ade53232b7
[ "MIT" ]
null
null
null
ex01_2.ipynb
jake-aft/kglab
f5cabf5b378d7c8203df772d62b454ade53232b7
[ "MIT" ]
null
null
null
27.452107
135
0.501326
[ [ [ "# Leveraging the `kglab` abstraction layer\n\nLet's try this again -- building a KG -- but this time using the `kglab` library to make things a wee bit easier...", "_____no_output_____" ] ], [ [ "import kglab\n\nnamespaces = {\n \"wtm\": \"http://purl.org/heals/food/\",\n \"ind\": \"http://purl.org/heals/ingredient/\",\n }\n\nkg = kglab.KnowledgeGraph(\n name = \"A recipe KG example based on Food.com\",\n base_uri = \"https://www.food.com/recipe/\",\n language = \"en\",\n namespaces = namespaces,\n )", "_____no_output_____" ], [ "import rdflib as rdf\nfrom rdflib.namespace import RDF, XSD", "_____no_output_____" ], [ "node = rdf.URIRef(\"https://www.food.com/recipe/327593\")\n\nkg.add(node, RDF.type, kg.get_ns(\"wtm\").Recipe)\nkg.add(node, kg.get_ns(\"wtm\").hasCookTime, rdf.Literal(\"8\", datatype=XSD.integer))\nkg.add(node, kg.get_ns(\"wtm\").hasIngredient, kg.get_ns(\"ind\").ChickenEgg)\nkg.add(node, kg.get_ns(\"wtm\").hasIngredient, kg.get_ns(\"ind\").CowMilk)\nkg.add(node, kg.get_ns(\"wtm\").hasIngredient, kg.get_ns(\"ind\").WholeWheatFlour)", "_____no_output_____" ], [ "for s, p, o in kg._g:\n print(s, p, o)", "https://www.food.com/recipe/327593 http://purl.org/heals/food/hasIngredient http://purl.org/heals/ingredient/ChickenEgg\nhttps://www.food.com/recipe/327593 http://purl.org/heals/food/hasCookTime 8\nhttps://www.food.com/recipe/327593 http://purl.org/heals/food/hasIngredient http://purl.org/heals/ingredient/CowMilk\nhttps://www.food.com/recipe/327593 http://purl.org/heals/food/hasIngredient http://purl.org/heals/ingredient/WholeWheatFlour\nhttps://www.food.com/recipe/327593 http://www.w3.org/1999/02/22-rdf-syntax-ns#type http://purl.org/heals/food/Recipe\n" ], [ "s = kg._g.serialize(format=\"n3\")\nprint(s.decode(\"utf8\"))", "@prefix dc: <https://purl.org/dc/terms/> .\n@prefix dct: <https://purl.org/dc/dcmitype/> .\n@prefix ind: <http://purl.org/heals/ingredient/> .\n@prefix owl: <https://www.w3.org/2002/07/owl#> .\n@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\n@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .\n@prefix skos: <https://www.w3.org/2004/02/skos/core#> .\n@prefix wtm: <http://purl.org/heals/food/> .\n@prefix xml: <http://www.w3.org/XML/1998/namespace> .\n@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .\n\n<https://www.food.com/recipe/327593> a wtm:Recipe ;\n wtm:hasCookTime 8 ;\n wtm:hasIngredient ind:ChickenEgg,\n ind:CowMilk,\n ind:WholeWheatFlour .\n\n\n" ] ], [ [ "Then the serialization becomes much simpler:", "_____no_output_____" ] ], [ [ "kg.save_ttl(\"tmp.ttl\")", "_____no_output_____" ], [ "kg.save_jsonld(\"tmp.jsonld\")", "_____no_output_____" ] ], [ [ "Next, we'll use the [Parquet](https://parquet.apache.org/) format for *columnar storage*.\nThis has been especially effective for Big Data frameworks handling data management and analytics effeciently.", "_____no_output_____" ] ], [ [ "kg.save_parquet(\"tmp.parquet\")", "_____no_output_____" ], [ "import pandas as pd\nimport os\n\nfile_paths = [\"tmp.jsonld\", \"tmp.ttl\", \"tmp.parquet\"]\nfile_sizes = [os.path.getsize(file_path) for file_path in file_paths]\n\ndf = pd.DataFrame({\"file_path\": file_paths, \"file_size\": file_sizes})\ndf", "_____no_output_____" ] ], [ [ "Parquet uses compression based on a \"dictionary\" approach, so it added overhead for small files such as this KG.\nWe'll revisit this comparison across file formats again with a larger KG.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
e718e8bce21f989ede78ab549a34b8c3ed3377ee
239,989
ipynb
Jupyter Notebook
gan_mnist/Intro_to_GANs_Exercises.ipynb
Luke035/dlnd-lessons
b89d2b081eaeafddfa2c81dcf1887d4ec928d248
[ "MIT" ]
null
null
null
gan_mnist/Intro_to_GANs_Exercises.ipynb
Luke035/dlnd-lessons
b89d2b081eaeafddfa2c81dcf1887d4ec928d248
[ "MIT" ]
null
null
null
gan_mnist/Intro_to_GANs_Exercises.ipynb
Luke035/dlnd-lessons
b89d2b081eaeafddfa2c81dcf1887d4ec928d248
[ "MIT" ]
null
null
null
298.122981
91,374
0.898666
[ [ [ "# Generative Adversarial Network\n\nIn this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!\n\nGANs were [first reported on](https://arxiv.org/abs/1406.2661) in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:\n\n* [Pix2Pix](https://affinelayer.com/pixsrv/) \n* [CycleGAN](https://github.com/junyanz/CycleGAN)\n* [A whole list](https://github.com/wiseodd/generative-models)\n\nThe idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks _as close as possible_ to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator.\n\n![GAN diagram](assets/gan_diagram.png)\n\nThe general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can fool the discriminator.\n\nThe output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow.", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nimport pickle as pkl\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "from tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data')", "Extracting MNIST_data/train-images-idx3-ubyte.gz\nExtracting MNIST_data/train-labels-idx1-ubyte.gz\nExtracting MNIST_data/t10k-images-idx3-ubyte.gz\nExtracting MNIST_data/t10k-labels-idx1-ubyte.gz\n" ] ], [ [ "## Model Inputs\n\nFirst we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input `inputs_real` and the generator input `inputs_z`. We'll assign them the appropriate sizes for each of the networks.\n\n>**Exercise:** Finish the `model_inputs` function below. Create the placeholders for `inputs_real` and `inputs_z` using the input sizes `real_dim` and `z_dim` respectively.", "_____no_output_____" ] ], [ [ "def model_inputs(real_dim, z_dim):\n #(Batch_size, Dim)\n inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real')\n inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')\n \n return inputs_real, inputs_z", "_____no_output_____" ] ], [ [ "## Generator network\n\n![GAN Network](assets/gan_network.png)\n\nHere we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.\n\n#### Variable Scope\nHere we need to use `tf.variable_scope` for two reasons. Firstly, we're going to make sure all the variable names start with `generator`. Similarly, we'll prepend `discriminator` to the discriminator variables. This will help out later when we're training the separate networks.\n\nWe could just use `tf.name_scope` to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also _sample from it_ as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the `reuse` keyword for `tf.variable_scope` to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again.\n\nTo use `tf.variable_scope`, you use a `with` statement:\n```python\nwith tf.variable_scope('scope_name', reuse=False):\n # code here\n```\n\nHere's more from [the TensorFlow documentation](https://www.tensorflow.org/programmers_guide/variable_scope#the_problem) to get another look at using `tf.variable_scope`.\n\n#### Leaky ReLU\nTensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can just take the outputs from a linear fully connected layer and pass them to `tf.maximum`. Typically, a parameter `alpha` sets the magnitude of the output for negative values. So, the output for negative input (`x`) values is `alpha*x`, and the output for positive `x` is `x`:\n$$\nf(x) = max(\\alpha * x, x)\n$$\n\n#### Tanh Output\nThe generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1.\n\n>**Exercise:** Implement the generator network in the function below. You'll need to return the tanh output. Make sure to wrap your code in a variable scope, with 'generator' as the scope name, and pass the `reuse` keyword argument from the function to `tf.variable_scope`.", "_____no_output_____" ] ], [ [ "def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):\n ''' Build the generator network.\n \n Arguments\n ---------\n z : Input tensor for the generator\n out_dim : Shape of the generator output\n n_units : Number of units in hidden layer\n reuse : Reuse the variables with tf.variable_scope\n alpha : leak parameter for leaky ReLU\n \n Returns\n -------\n out: \n '''\n with tf.variable_scope('generator', reuse=reuse): # Netowrk creating -> reuse set to false\n # Hidden layer\n #activation_fn is None given that it should be activated through the LRELU layer\n h1 = tf.layers.dense(inputs=z, units=n_units, \n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01), \n activation=None\n )\n # Leaky ReLU\n h1 = tf.maximum(h1 * alpha, h1)\n \n # Logits and tanh output\n #Read out layer\n logits = tf.layers.dense(inputs=h1, units=out_dim, \n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01), \n activation=None\n )\n out = tf.tanh(logits)\n \n return out", "_____no_output_____" ] ], [ [ "## Discriminator\n\nThe discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer.\n\n>**Exercise:** Implement the discriminator network in the function below. Same as above, you'll need to return both the logits and the sigmoid output. Make sure to wrap your code in a variable scope, with 'discriminator' as the scope name, and pass the `reuse` keyword argument from the function arguments to `tf.variable_scope`.", "_____no_output_____" ] ], [ [ "def discriminator(x, n_units=128, reuse=False, alpha=0.01):\n ''' Build the discriminator network.\n \n Arguments\n ---------\n x : Input tensor for the discriminator\n n_units: Number of units in hidden layer\n reuse : Reuse the variables with tf.variable_scope\n alpha : leak parameter for leaky ReLU\n \n Returns\n -------\n out, logits: \n '''\n with tf.variable_scope('discriminator', reuse=reuse): # Netowrk creating -> reuse set to false\n # Hidden layer\n h1 = tf.layers.dense(inputs=x, units=n_units, \n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01), \n activation=None\n )\n # Leaky ReLU\n h1 = tf.maximum(h1 * alpha, h1)\n \n #Out dim is 1, it should be simgmoided, return a 0 to 1 prob value after sigmoid\n logits = tf.layers.dense(inputs=h1, units=1, \n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01), \n activation=None\n )\n \n out = tf.sigmoid(logits)\n \n return out, logits", "_____no_output_____" ] ], [ [ "## Hyperparameters", "_____no_output_____" ] ], [ [ "# Size of input image to discriminator\ninput_size = 784 # 28x28 MNIST images flattened\n# Size of latent vector to generator\nz_size = 100\n# Sizes of hidden layers in generator and discriminator\ng_hidden_size = 128\nd_hidden_size = 128\n# Leak factor for leaky ReLU\nalpha = 0.01\n# Label smoothing \nsmooth = 0.1", "_____no_output_____" ] ], [ [ "## Build network\n\nNow we're building the network from the functions defined above.\n\nFirst is to get our inputs, `input_real, input_z` from `model_inputs` using the sizes of the input and z.\n\nThen, we'll create the generator, `generator(input_z, input_size)`. This builds the generator with the appropriate input and output sizes.\n\nThen the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as `g_model`. So the real data discriminator is `discriminator(input_real)` while the fake discriminator is `discriminator(g_model, reuse=True)`.\n\n>**Exercise:** Build the network from the functions you defined earlier.", "_____no_output_____" ] ], [ [ "tf.reset_default_graph()\n# Create our input placeholders\ninput_real, input_z = model_inputs(real_dim=input_size, z_dim=z_size)\n\n# Generator network here\n#Output dim for generator is the same as real input dim\ng_model = generator(z=input_z, alpha=alpha, n_units=g_hidden_size, out_dim=input_size)\n# g_model is the generator output\n\n# Disriminator network here\nd_model_real, d_logits_real = discriminator(x=input_real, alpha=alpha, n_units=d_hidden_size)\n#Si passa il dato ottenuto dal generatore riutilizzando le variabili\nd_model_fake, d_logits_fake = discriminator(x=g_model, alpha=alpha, n_units=d_hidden_size, reuse=True)", "_____no_output_____" ] ], [ [ "## Discriminator and Generator Losses\n\nNow we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, `d_loss = d_loss_real + d_loss_fake`. The losses will by sigmoid cross-entropies, which we can get with `tf.nn.sigmoid_cross_entropy_with_logits`. We'll also wrap that in `tf.reduce_mean` to get the mean for all the images in the batch. So the losses will look something like \n\n```python\ntf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))\n```\n\nFor the real image logits, we'll use `d_logits_real` which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter `smooth`. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like `labels = tf.ones_like(tensor) * (1 - smooth)`\n\nThe discriminator loss for the fake data is similar. The logits are `d_logits_fake`, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.\n\nFinally, the generator losses are using `d_logits_fake`, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images.\n\n>**Exercise:** Calculate the losses for the discriminator and the generator. There are two discriminator losses, one for real images and one for fake images. For the real image loss, use the real logits and (smoothed) labels of ones. For the fake image loss, use the fake logits with labels of all zeros. The total discriminator loss is the sum of those two losses. Finally, the generator loss again uses the fake logits from the discriminator, but this time the labels are all ones because the generator wants to fool the discriminator.", "_____no_output_____" ] ], [ [ "# Calculate losses\n#Cross entropy tra logits e label sempre a 1 (sono le immagini vere)\nreal_labels = tf.ones_like(d_logits_real) * (1 - smooth) #Smoothed labels\nd_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=real_labels))\n#Fake uguale ma con label a 0\nfake_labels = tf.zeros_like(d_logits_fake)\nd_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=fake_labels))\n\nd_loss = d_loss_real + d_loss_fake\n\n#G loss needs flipped labels, and needs all ones for all the generated fake images\n#La perdita parte dal risultato del discriminatore, non dall'output del generatore e deve essere girata!!\nflipped_fake_labels = tf.ones_like(d_logits_fake)\ng_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=flipped_fake_labels))", "_____no_output_____" ] ], [ [ "## Optimizers\n\nWe want to update the generator and discriminator variables separately. So we need to get the variables for each part and build optimizers for the two parts. To get all the trainable variables, we use `tf.trainable_variables()`. This creates a list of all the variables we've defined in our graph.\n\nFor the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with `generator`. So, we just need to iterate through the list from `tf.trainable_variables()` and keep variables that start with `generator`. Each variable object has an attribute `name` which holds the name of the variable as a string (`var.name == 'weights_0'` for instance). \n\nWe can do something similar with the discriminator. All the variables in the discriminator start with `discriminator`.\n\nThen, in the optimizer we pass the variable lists to the `var_list` keyword argument of the `minimize` method. This tells the optimizer to only update the listed variables. Something like `tf.train.AdamOptimizer().minimize(loss, var_list=var_list)` will only train the variables in `var_list`.\n\n>**Exercise: ** Below, implement the optimizers for the generator and discriminator. First you'll need to get a list of trainable variables, then split that list into two lists, one for the generator variables and another for the discriminator variables. Finally, using `AdamOptimizer`, create an optimizer for each network that update the network variables separately.", "_____no_output_____" ] ], [ [ "for var in tf.trainable_variables():\n if 'generator' in var.name:\n print(var.name)", "generator/dense/kernel:0\ngenerator/dense/bias:0\ngenerator/dense_1/kernel:0\ngenerator/dense_1/bias:0\n" ], [ "# Optimizers\nlearning_rate = 0.002\n\n# Get the trainable_variables, split into G and D parts\nt_vars = tf.trainable_variables()\ng_vars = [var for var in t_vars if var.name.startswith('generator')]\nd_vars = [var for var in t_vars if var.name.startswith('discriminator')]\n\nd_train_opt = tf.train.AdamOptimizer().minimize(d_loss, var_list=d_vars)\ng_train_opt = tf.train.AdamOptimizer().minimize(g_loss, var_list=g_vars)", "_____no_output_____" ] ], [ [ "## Training", "_____no_output_____" ] ], [ [ "batch_size = 100\nepochs = 100\nsamples = []\nlosses = []\nsaver = tf.train.Saver(var_list = g_vars)\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for e in range(epochs):\n for ii in range(mnist.train.num_examples//batch_size):\n batch = mnist.train.next_batch(batch_size)\n \n # Get images, reshape and rescale to pass to D\n batch_images = batch[0].reshape((batch_size, 784))\n batch_images = batch_images*2 - 1\n \n # Sample random noise for G\n batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))\n \n # Run optimizers\n _ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})\n _ = sess.run(g_train_opt, feed_dict={input_z: batch_z})\n \n # At the end of each epoch, get the losses and print them out\n train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})\n train_loss_g = g_loss.eval({input_z: batch_z})\n \n print(\"Epoch {}/{}...\".format(e+1, epochs),\n \"Discriminator Loss: {:.4f}...\".format(train_loss_d),\n \"Generator Loss: {:.4f}\".format(train_loss_g)) \n # Save losses to view after training\n losses.append((train_loss_d, train_loss_g))\n \n # Sample from generator as we're training for viewing afterwards\n sample_z = np.random.uniform(-1, 1, size=(16, z_size))\n gen_samples = sess.run(\n generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),\n feed_dict={input_z: sample_z})\n samples.append(gen_samples)\n saver.save(sess, './checkpoints/generator.ckpt')\n\n# Save training generator samples\nwith open('train_samples.pkl', 'wb') as f:\n pkl.dump(samples, f)", "Epoch 1/100... Discriminator Loss: 1.0371... Generator Loss: 1.2323\nEpoch 2/100... Discriminator Loss: 1.4539... Generator Loss: 2.3134\nEpoch 3/100... Discriminator Loss: 0.8266... Generator Loss: 1.5687\nEpoch 4/100... Discriminator Loss: 1.0409... Generator Loss: 1.7561\nEpoch 5/100... Discriminator Loss: 2.1167... Generator Loss: 1.8357\nEpoch 6/100... Discriminator Loss: 1.2289... Generator Loss: 1.4345\nEpoch 7/100... Discriminator Loss: 1.4484... Generator Loss: 1.5648\nEpoch 8/100... Discriminator Loss: 1.3076... Generator Loss: 0.9751\nEpoch 9/100... Discriminator Loss: 0.9220... Generator Loss: 2.4517\nEpoch 10/100... Discriminator Loss: 1.3634... Generator Loss: 1.3329\nEpoch 11/100... Discriminator Loss: 0.9906... Generator Loss: 1.4102\nEpoch 12/100... Discriminator Loss: 1.0600... Generator Loss: 1.5776\nEpoch 13/100... Discriminator Loss: 1.1464... Generator Loss: 1.2684\nEpoch 14/100... Discriminator Loss: 1.1257... Generator Loss: 1.5422\nEpoch 15/100... Discriminator Loss: 0.9316... Generator Loss: 1.8991\nEpoch 16/100... Discriminator Loss: 1.2950... Generator Loss: 1.2759\nEpoch 17/100... Discriminator Loss: 0.7901... Generator Loss: 2.2897\nEpoch 18/100... Discriminator Loss: 1.3836... Generator Loss: 1.4144\nEpoch 19/100... Discriminator Loss: 0.9142... Generator Loss: 1.8741\nEpoch 20/100... Discriminator Loss: 0.9150... Generator Loss: 2.3037\nEpoch 21/100... Discriminator Loss: 1.4793... Generator Loss: 1.6128\nEpoch 22/100... Discriminator Loss: 0.9740... Generator Loss: 2.1738\nEpoch 23/100... Discriminator Loss: 1.3407... Generator Loss: 1.9528\nEpoch 24/100... Discriminator Loss: 1.3810... Generator Loss: 1.6119\nEpoch 25/100... Discriminator Loss: 1.2114... Generator Loss: 1.4926\nEpoch 26/100... Discriminator Loss: 0.8497... Generator Loss: 1.8838\nEpoch 27/100... Discriminator Loss: 1.1389... Generator Loss: 1.2702\nEpoch 28/100... Discriminator Loss: 1.0600... Generator Loss: 1.5365\nEpoch 29/100... Discriminator Loss: 1.0504... Generator Loss: 2.0048\nEpoch 30/100... Discriminator Loss: 0.9241... Generator Loss: 2.3051\nEpoch 31/100... Discriminator Loss: 1.0507... Generator Loss: 1.5308\nEpoch 32/100... Discriminator Loss: 1.5379... Generator Loss: 1.1642\nEpoch 33/100... Discriminator Loss: 1.0848... Generator Loss: 1.8064\nEpoch 34/100... Discriminator Loss: 0.9425... Generator Loss: 1.8181\nEpoch 35/100... Discriminator Loss: 1.2464... Generator Loss: 1.4767\nEpoch 36/100... Discriminator Loss: 1.0456... Generator Loss: 1.6964\nEpoch 37/100... Discriminator Loss: 1.0377... Generator Loss: 1.6078\nEpoch 38/100... Discriminator Loss: 1.2151... Generator Loss: 1.2085\nEpoch 39/100... Discriminator Loss: 1.1072... Generator Loss: 2.0932\nEpoch 40/100... Discriminator Loss: 0.9257... Generator Loss: 2.0033\nEpoch 41/100... Discriminator Loss: 0.9142... Generator Loss: 2.0663\nEpoch 42/100... Discriminator Loss: 0.9660... Generator Loss: 1.9347\nEpoch 43/100... Discriminator Loss: 1.2125... Generator Loss: 1.3420\nEpoch 44/100... Discriminator Loss: 1.0214... Generator Loss: 1.6756\nEpoch 45/100... Discriminator Loss: 0.9240... Generator Loss: 1.7611\nEpoch 46/100... Discriminator Loss: 1.0930... Generator Loss: 1.6974\nEpoch 47/100... Discriminator Loss: 0.9086... Generator Loss: 1.8238\nEpoch 48/100... Discriminator Loss: 0.9759... Generator Loss: 1.6397\nEpoch 49/100... Discriminator Loss: 1.0800... Generator Loss: 1.4449\nEpoch 50/100... Discriminator Loss: 1.2123... Generator Loss: 1.1471\nEpoch 51/100... Discriminator Loss: 1.0402... Generator Loss: 1.2637\nEpoch 52/100... Discriminator Loss: 0.9749... Generator Loss: 1.8870\nEpoch 53/100... Discriminator Loss: 0.9660... Generator Loss: 1.8921\nEpoch 54/100... Discriminator Loss: 0.9141... Generator Loss: 2.1588\nEpoch 55/100... Discriminator Loss: 1.0154... Generator Loss: 1.7089\nEpoch 56/100... Discriminator Loss: 1.1138... Generator Loss: 1.6059\nEpoch 57/100... Discriminator Loss: 1.0910... Generator Loss: 1.5598\nEpoch 58/100... Discriminator Loss: 0.9865... Generator Loss: 2.1992\nEpoch 59/100... Discriminator Loss: 0.8994... Generator Loss: 1.7488\nEpoch 60/100... Discriminator Loss: 1.0818... Generator Loss: 1.3813\nEpoch 61/100... Discriminator Loss: 1.2005... Generator Loss: 1.0771\nEpoch 62/100... Discriminator Loss: 1.0242... Generator Loss: 1.7216\nEpoch 63/100... Discriminator Loss: 0.8810... Generator Loss: 1.8877\nEpoch 64/100... Discriminator Loss: 1.0824... Generator Loss: 1.6331\nEpoch 65/100... Discriminator Loss: 0.9965... Generator Loss: 1.3533\nEpoch 66/100... Discriminator Loss: 0.9880... Generator Loss: 1.6957\nEpoch 67/100... Discriminator Loss: 0.8739... Generator Loss: 1.6825\nEpoch 68/100... Discriminator Loss: 1.1047... Generator Loss: 1.5360\nEpoch 69/100... Discriminator Loss: 1.0120... Generator Loss: 1.7713\nEpoch 70/100... Discriminator Loss: 1.0143... Generator Loss: 1.6014\nEpoch 71/100... Discriminator Loss: 1.0748... Generator Loss: 1.5939\nEpoch 72/100... Discriminator Loss: 1.1335... Generator Loss: 1.3261\nEpoch 73/100... Discriminator Loss: 1.1057... Generator Loss: 1.5601\nEpoch 74/100... Discriminator Loss: 0.9431... Generator Loss: 1.9850\nEpoch 75/100... Discriminator Loss: 1.0706... Generator Loss: 1.5694\nEpoch 76/100... Discriminator Loss: 0.9311... Generator Loss: 1.5826\nEpoch 77/100... Discriminator Loss: 1.1557... Generator Loss: 1.7118\nEpoch 78/100... Discriminator Loss: 0.9054... Generator Loss: 1.7575\nEpoch 79/100... Discriminator Loss: 0.9300... Generator Loss: 1.6410\nEpoch 80/100... Discriminator Loss: 1.0372... Generator Loss: 1.3538\nEpoch 81/100... Discriminator Loss: 1.1062... Generator Loss: 1.1903\nEpoch 82/100... Discriminator Loss: 1.0133... Generator Loss: 1.3435\nEpoch 83/100... Discriminator Loss: 0.9815... Generator Loss: 1.9008\nEpoch 84/100... Discriminator Loss: 1.0770... Generator Loss: 1.6541\nEpoch 85/100... Discriminator Loss: 1.0438... Generator Loss: 1.8766\nEpoch 86/100... Discriminator Loss: 1.0578... Generator Loss: 1.3683\nEpoch 87/100... Discriminator Loss: 1.2596... Generator Loss: 1.3589\nEpoch 88/100... Discriminator Loss: 1.1389... Generator Loss: 1.2119\nEpoch 89/100... Discriminator Loss: 1.0811... Generator Loss: 1.7022\nEpoch 90/100... Discriminator Loss: 0.9235... Generator Loss: 1.4655\nEpoch 91/100... Discriminator Loss: 0.9506... Generator Loss: 1.5136\nEpoch 92/100... Discriminator Loss: 1.0483... Generator Loss: 1.4910\nEpoch 93/100... Discriminator Loss: 0.8123... Generator Loss: 1.9579\nEpoch 94/100... Discriminator Loss: 1.1142... Generator Loss: 1.3921\nEpoch 95/100... Discriminator Loss: 1.1543... Generator Loss: 1.2755\nEpoch 96/100... Discriminator Loss: 1.0561... Generator Loss: 1.4777\nEpoch 97/100... Discriminator Loss: 1.0740... Generator Loss: 1.6115\nEpoch 98/100... Discriminator Loss: 1.0652... Generator Loss: 1.3117\nEpoch 99/100... Discriminator Loss: 0.9905... Generator Loss: 1.5663\nEpoch 100/100... Discriminator Loss: 0.9343... Generator Loss: 1.7362\n" ] ], [ [ "## Training loss\n\nHere we'll check out the training losses for the generator and discriminator.", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "fig, ax = plt.subplots()\nlosses = np.array(losses)\nplt.plot(losses.T[0], label='Discriminator')\nplt.plot(losses.T[1], label='Generator')\nplt.title(\"Training Losses\")\nplt.legend()", "_____no_output_____" ] ], [ [ "## Generator samples from training\n\nHere we can view samples of images from the generator. First we'll look at images taken while training.", "_____no_output_____" ] ], [ [ "def view_samples(epoch, samples):\n fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)\n for ax, img in zip(axes.flatten(), samples[epoch]):\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')\n \n return fig, axes", "_____no_output_____" ], [ "# Load samples from generator taken while training\nwith open('train_samples.pkl', 'rb') as f:\n samples = pkl.load(f)", "_____no_output_____" ] ], [ [ "These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make.", "_____no_output_____" ] ], [ [ "_ = view_samples(-1, samples)", "_____no_output_____" ] ], [ [ "Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!", "_____no_output_____" ] ], [ [ "rows, cols = 10, 6\nfig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)\n\nfor sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):\n for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):\n ax.imshow(img.reshape((28,28)), cmap='Greys_r')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)", "_____no_output_____" ] ], [ [ "It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3.", "_____no_output_____" ], [ "## Sampling from the generator\n\nWe can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples!", "_____no_output_____" ] ], [ [ "saver = tf.train.Saver(var_list=g_vars)\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n sample_z = np.random.uniform(-1, 1, size=(16, z_size))\n gen_samples = sess.run(\n generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),\n feed_dict={input_z: sample_z})\nview_samples(0, [gen_samples])", "INFO:tensorflow:Restoring parameters from checkpoints/generator.ckpt\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
e718ec968133b8290b0a56d5d65d0121b43e8f8a
182,986
ipynb
Jupyter Notebook
get_stn_alignment.ipynb
agikarasugi/Face-Mask-Invariant-End-to-End-Face-Recognition
eb274ff98246c1bb8748bd8c8351d3494a87dfce
[ "MIT" ]
1
2021-05-21T07:56:26.000Z
2021-05-21T07:56:26.000Z
get_stn_alignment.ipynb
agikarasugi/Face-Mask-Invariant-End-to-End-Face-Recognition
eb274ff98246c1bb8748bd8c8351d3494a87dfce
[ "MIT" ]
null
null
null
get_stn_alignment.ipynb
agikarasugi/Face-Mask-Invariant-End-to-End-Face-Recognition
eb274ff98246c1bb8748bd8c8351d3494a87dfce
[ "MIT" ]
1
2021-08-10T05:34:53.000Z
2021-08-10T05:34:53.000Z
883.990338
90,712
0.955669
[ [ [ "import PIL\nimport kornia\nimport glob\nimport torch\nimport numpy as np\nimport imgaug as ia\nimport imgaug.augmenters as iaa\nimport matplotlib.pyplot as plt\nfrom torchvision import transforms as T\nfrom networks.ResnetFaceSTN import ResnetFaceSTN\nfrom pathlib import Path\nfrom skimage.transform import resize", "_____no_output_____" ] ], [ [ "## Transform Definitions", "_____no_output_____" ] ], [ [ "class UnNormalize(object):\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def __call__(self, tensor):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n Returns:\n Tensor: Normalized image.\n \"\"\"\n for t, m, s in zip(tensor, self.mean, self.std):\n t.mul_(s).add_(m)\n return tensor\n\n \ntransform = T.Compose(\n [\n T.Resize((128, 128)),\n T.ToTensor(),\n T.Normalize(mean=[0.5,0.5,0.5], std=[0.5,0.5,0.5])\n ]\n)", "_____no_output_____" ] ], [ [ "## Weight Paths and Image Paths\nWeight paths", "_____no_output_____" ] ], [ [ "ROOT_LFW = Path(\"datasets/eval/data/lfw/lfw_masked\")\n\nWEIGHT_PATH = \"weights/mask_exp19-resnetSTN/epoch_18/mask_exp19-resnetSTN_ep18.pth\"\n\nP1 = \"Alfredo_Moreno/Alfredo_Moreno_0001.jpg\"\nP2 = \"Dyab_Abou_Jahjah/Dyab_Abou_Jahjah_0001.jpg\"", "_____no_output_____" ], [ "def stn_forward(weight_path, img_path):\n net = ResnetFaceSTN(stn_mode='resnet')\n net.load_state_dict(torch.load(weight_path))\n net.eval()\n\n unorm = UnNormalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n\n img = PIL.Image.open(img_path)\n img_t = transform(img).unsqueeze(0)\n img_stn = net.stn(img_t)\n img_p = kornia.tensor_to_image(unorm(img_stn.clone().detach()))\n \n return img_p", "_____no_output_____" ] ], [ [ "## Preview", "_____no_output_____" ] ], [ [ "img_p = stn_forward(WEIGHT_PATH, ROOT_LFW / P1)\nplt.imshow(img_p)\nplt.show()\n# plt.imsave(\"pair_sample_result/NEG_mCASIAC_P1_2.jpg\", img_p)\n\nimg_p = stn_forward(WEIGHT_PATH, ROOT_LFW / P2)\nplt.imshow(img_p)\nplt.show()\n# plt.imsave(\"pair_sample_result/NEG_mCASIAC_P2_2.jpg\", img_p)", "_____no_output_____" ], [ "sample_images_masked = glob.glob('image_samples/LFW-Masked/*.jpg')\nsample_images = glob.glob('image_samples/LFW/*.jpg')\n\ntop_row = []\nbottom_row = []\nfor i, imgs in enumerate(zip(sample_images_masked, sample_images), 1):\n masked_img, norm_img = imgs\n \n # img_a = stn_forward(WEIGHT_PATH, masked_img)\n img_a = stn_forward(WEIGHT_PATH, norm_img)\n\n # img_o = plt.imread(masked_img)\n img_o = plt.imread(norm_img)\n img_o = resize(np.array(img_o), (112, 112), anti_aliasing=True)\n\n if len(top_row) == 0: top_row = img_o\n else: top_row = np.hstack((top_row, img_o))\n \n if len(bottom_row) == 0: bottom_row = np.array(img_a)\n else: bottom_row = np.hstack((bottom_row, img_a))\n \naligned_img = np.vstack((top_row, bottom_row))\nplt.imsave(\"pair_sample_result/nonmask_ori_align.jpg\", aligned_img)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
e718f2e54c56bd2c8e34376421a4af23d937518b
266,490
ipynb
Jupyter Notebook
object_detection.ipynb
ksun0/cis-581-final-project-video-stiching-detection
4dd61dda530ce0ad9ace55dfc204206942a5983c
[ "MIT" ]
null
null
null
object_detection.ipynb
ksun0/cis-581-final-project-video-stiching-detection
4dd61dda530ce0ad9ace55dfc204206942a5983c
[ "MIT" ]
null
null
null
object_detection.ipynb
ksun0/cis-581-final-project-video-stiching-detection
4dd61dda530ce0ad9ace55dfc204206942a5983c
[ "MIT" ]
2
2020-12-24T10:15:16.000Z
2021-11-10T14:42:36.000Z
321.847826
117,564
0.894679
[ [ [ "import os\nimport pandas as pd\nimport numpy as np\nimport torch\nimport torch.utils.data\nfrom PIL import Image\n\n\nclass DeskDataset(torch.utils.data.Dataset):\n def __init__(self, root, transforms=None):\n self.root = root\n self.transforms = transforms\n self.df = pd.read_csv(\"data/annotations.csv\", names=[\"class\", \"xmin\", \"ymin\", \"xdist\", \"ydist\", \"image\", \"imgx\", \"imgy\"])\n unique_classes = list(self.df['class'].unique())\n self.class_names = {unique_classes[i]: np.arange(1, 1 + len(unique_classes))[i] for i in range(len(unique_classes))}\n # load all image files, sorting them to\n # ensure that they are aligned\n self.imgs = list(sorted(os.listdir(os.path.join(root, \"data\", \"training_data\"))))\n\n def __getitem__(self, idx):\n # load images ad masks\n img_path = os.path.join(self.root, \"data\", \"training_data\", self.imgs[idx])\n img = Image.open(img_path).convert(\"RGB\")\n\n features = self.df[self.df['image'] == self.imgs[idx]] \n boxes = []\n labels = []\n for index, row in features.iterrows():\n xmin = row['xmin']\n xmax = row['xmin'] + row['xdist']\n ymin = row['ymin']\n ymax = row['ymin'] + row['ydist']\n boxes.append([xmin, ymin, xmax, ymax])\n labels.append(self.class_names[row['class']])\n\n boxes = torch.tensor(boxes, dtype=torch.float32)\n labels = torch.tensor(labels, dtype=torch.int64)\n num_objs = len(boxes)\n\n image_id = torch.tensor([idx])\n area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])\n # suppose all instances are not crowd\n iscrowd = torch.zeros((num_objs,), dtype=torch.int64)\n\n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = labels\n target[\"image_id\"] = image_id\n target[\"area\"] = area\n target[\"iscrowd\"] = iscrowd\n\n if self.transforms is not None:\n img, target = self.transforms(img, target)\n\n return img, target\n\n def __len__(self):\n return len(self.imgs)\n\nds = DeskDataset(\".\")", "_____no_output_____" ], [ "print(len(ds.imgs))\nprint(len(ds.class_names))", "48\n4\n" ], [ "import torchvision\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor\n \ndef get_object_detection_model(num_classes):\n # load an instance segmentation model pre-trained on COCO\n model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)\n\n # get the number of input features for the classifier\n in_features = model.roi_heads.box_predictor.cls_score.in_features\n \n # replace the pre-trained head with a new one\n model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n\n return model", "_____no_output_____" ], [ "from helper.engine import train_one_epoch, evaluate\nimport helper.utils\nimport helper.transforms as T\n\ndef get_transform(train):\n transforms = []\n # converts the image, a PIL image, into a PyTorch Tensor\n transforms.append(T.ToTensor())\n if train:\n # during training, randomly flip the training images\n # and ground-truth for data augmentation\n transforms.append(T.RandomHorizontalFlip(0.5))\n return T.Compose(transforms)", "_____no_output_____" ], [ "# use our dataset and defined transformations\ndataset = DeskDataset('.', get_transform(train=True))\ndataset_test = DeskDataset('.', get_transform(train=False))\n\n# split the dataset in train and test set\ntorch.manual_seed(1)\nindices = torch.randperm(len(dataset)).tolist()\ndataset = torch.utils.data.Subset(dataset, indices[:-44])\ndataset_test = torch.utils.data.Subset(dataset_test, indices[-4:])\n\n# define training and validation data loaders\ndata_loader = torch.utils.data.DataLoader(\n dataset, batch_size=2, shuffle=True, num_workers=4,\n collate_fn=utils.collate_fn)\n\ndata_loader_test = torch.utils.data.DataLoader(\n dataset_test, batch_size=1, shuffle=False, num_workers=4,\n collate_fn=utils.collate_fn)", "_____no_output_____" ], [ "device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\n# our dataset has two classes only - background and person\nnum_classes = 1 + len(ds.class_names)\n\n# get the model using our helper function\nmodel = get_object_detection_model(num_classes)\n# move model to the right device\nmodel.to(device)\n\n# construct an optimizer\nparams = [p for p in model.parameters() if p.requires_grad]\noptimizer = torch.optim.SGD(params, lr=0.005,\n momentum=0.9, weight_decay=0.0005)\n\n# and a learning rate scheduler which decreases the learning rate by\n# 10x every 3 epochs\nlr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,\n step_size=3,\n gamma=0.1)", "_____no_output_____" ], [ "# let's train it for 10 epochs\nnum_epochs = 20\n\nfor epoch in range(num_epochs):\n # train for one epoch, printing every 10 iterations\n train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10)\n # update the learning rate\n lr_scheduler.step()\n # evaluate on the test dataset\n # evaluate(model, data_loader_test, device=device)", "Epoch: [0] [0/2] eta: 0:00:01 lr: 0.005000 loss: 0.4968 (0.4968) loss_classifier: 0.3000 (0.3000) loss_box_reg: 0.1733 (0.1733) loss_objectness: 0.0065 (0.0065) loss_rpn_box_reg: 0.0170 (0.0170) time: 0.8917 data: 0.2418 max mem: 2843\nEpoch: [0] [1/2] eta: 0:00:00 lr: 0.005000 loss: 0.4968 (0.6706) loss_classifier: 0.3000 (0.3949) loss_box_reg: 0.1733 (0.2511) loss_objectness: 0.0031 (0.0048) loss_rpn_box_reg: 0.0170 (0.0199) time: 0.7081 data: 0.1245 max mem: 2843\nEpoch: [0] Total time: 0:00:01 (0.7260 s / it)\nEpoch: [1] [0/2] eta: 0:00:01 lr: 0.000500 loss: 0.5810 (0.5810) loss_classifier: 0.2893 (0.2893) loss_box_reg: 0.2557 (0.2557) loss_objectness: 0.0165 (0.0165) loss_rpn_box_reg: 0.0196 (0.0196) time: 0.7282 data: 0.1532 max mem: 2843\nEpoch: [1] [1/2] eta: 0:00:00 lr: 0.000500 loss: 0.5662 (0.5736) loss_classifier: 0.2893 (0.2949) loss_box_reg: 0.2466 (0.2512) loss_objectness: 0.0024 (0.0094) loss_rpn_box_reg: 0.0167 (0.0181) time: 0.6298 data: 0.0798 max mem: 2843\nEpoch: [1] Total time: 0:00:01 (0.6463 s / it)\nEpoch: [2] [0/2] eta: 0:00:01 lr: 0.000500 loss: 0.6649 (0.6649) loss_classifier: 0.3471 (0.3471) loss_box_reg: 0.2943 (0.2943) loss_objectness: 0.0067 (0.0067) loss_rpn_box_reg: 0.0168 (0.0168) time: 0.9193 data: 0.3401 max mem: 2843\nEpoch: [2] [1/2] eta: 0:00:00 lr: 0.000500 loss: 0.4817 (0.5733) loss_classifier: 0.2612 (0.3041) loss_box_reg: 0.1887 (0.2415) loss_objectness: 0.0067 (0.0088) loss_rpn_box_reg: 0.0168 (0.0189) time: 0.7222 data: 0.1714 max mem: 2843\nEpoch: [2] Total time: 0:00:01 (0.7395 s / it)\nEpoch: [3] [0/2] eta: 0:00:01 lr: 0.000500 loss: 0.6028 (0.6028) loss_classifier: 0.3129 (0.3129) loss_box_reg: 0.2604 (0.2604) loss_objectness: 0.0114 (0.0114) loss_rpn_box_reg: 0.0181 (0.0181) time: 0.7076 data: 0.1276 max mem: 2843\nEpoch: [3] [1/2] eta: 0:00:00 lr: 0.000500 loss: 0.5957 (0.5993) loss_classifier: 0.3129 (0.3181) loss_box_reg: 0.2535 (0.2569) loss_objectness: 0.0041 (0.0077) loss_rpn_box_reg: 0.0150 (0.0165) time: 0.6213 data: 0.0670 max mem: 2843\nEpoch: [3] Total time: 0:00:01 (0.6383 s / it)\nEpoch: [4] [0/2] eta: 0:00:01 lr: 0.000050 loss: 0.5893 (0.5893) loss_classifier: 0.3108 (0.3108) loss_box_reg: 0.2416 (0.2416) loss_objectness: 0.0192 (0.0192) loss_rpn_box_reg: 0.0177 (0.0177) time: 0.7653 data: 0.2290 max mem: 2843\nEpoch: [4] [1/2] eta: 0:00:00 lr: 0.000050 loss: 0.5893 (0.6142) loss_classifier: 0.3108 (0.3158) loss_box_reg: 0.2416 (0.2677) loss_objectness: 0.0079 (0.0136) loss_rpn_box_reg: 0.0166 (0.0172) time: 0.6698 data: 0.1166 max mem: 2843\nEpoch: [4] Total time: 0:00:01 (0.6863 s / it)\nEpoch: [5] [0/2] eta: 0:00:01 lr: 0.000050 loss: 0.5878 (0.5878) loss_classifier: 0.2976 (0.2976) loss_box_reg: 0.2517 (0.2517) loss_objectness: 0.0207 (0.0207) loss_rpn_box_reg: 0.0178 (0.0178) time: 0.7313 data: 0.1537 max mem: 2843\nEpoch: [5] [1/2] eta: 0:00:00 lr: 0.000050 loss: 0.5878 (0.6048) loss_classifier: 0.2976 (0.3113) loss_box_reg: 0.2494 (0.2506) loss_objectness: 0.0207 (0.0253) loss_rpn_box_reg: 0.0174 (0.0176) time: 0.6324 data: 0.0801 max mem: 2843\nEpoch: [5] Total time: 0:00:01 (0.6484 s / it)\nEpoch: [6] [0/2] eta: 0:00:01 lr: 0.000050 loss: 0.6036 (0.6036) loss_classifier: 0.3073 (0.3073) loss_box_reg: 0.2721 (0.2721) loss_objectness: 0.0066 (0.0066) loss_rpn_box_reg: 0.0177 (0.0177) time: 0.8199 data: 0.2409 max mem: 2843\nEpoch: [6] [1/2] eta: 0:00:00 lr: 0.000050 loss: 0.5704 (0.5870) loss_classifier: 0.3058 (0.3066) loss_box_reg: 0.2426 (0.2574) loss_objectness: 0.0066 (0.0067) loss_rpn_box_reg: 0.0152 (0.0164) time: 0.6801 data: 0.1231 max mem: 2843\nEpoch: [6] Total time: 0:00:01 (0.6987 s / it)\nEpoch: [7] [0/2] eta: 0:00:01 lr: 0.000005 loss: 0.5215 (0.5215) loss_classifier: 0.2691 (0.2691) loss_box_reg: 0.2178 (0.2178) loss_objectness: 0.0190 (0.0190) loss_rpn_box_reg: 0.0156 (0.0156) time: 0.8777 data: 0.3485 max mem: 2843\nEpoch: [7] [1/2] eta: 0:00:00 lr: 0.000005 loss: 0.5215 (0.6156) loss_classifier: 0.2691 (0.3163) loss_box_reg: 0.2178 (0.2593) loss_objectness: 0.0190 (0.0242) loss_rpn_box_reg: 0.0156 (0.0158) time: 0.7298 data: 0.1755 max mem: 2843\nEpoch: [7] Total time: 0:00:01 (0.7470 s / it)\nEpoch: [8] [0/2] eta: 0:00:01 lr: 0.000005 loss: 0.6514 (0.6514) loss_classifier: 0.3275 (0.3275) loss_box_reg: 0.3013 (0.3013) loss_objectness: 0.0031 (0.0031) loss_rpn_box_reg: 0.0195 (0.0195) time: 0.8789 data: 0.3478 max mem: 2843\nEpoch: [8] [1/2] eta: 0:00:00 lr: 0.000005 loss: 0.5325 (0.5919) loss_classifier: 0.2948 (0.3111) loss_box_reg: 0.2226 (0.2620) loss_objectness: 0.0031 (0.0033) loss_rpn_box_reg: 0.0115 (0.0155) time: 0.7323 data: 0.1777 max mem: 2843\nEpoch: [8] Total time: 0:00:01 (0.7509 s / it)\nEpoch: [9] [0/2] eta: 0:00:01 lr: 0.000005 loss: 0.5161 (0.5161) loss_classifier: 0.2808 (0.2808) loss_box_reg: 0.2148 (0.2148) loss_objectness: 0.0075 (0.0075) loss_rpn_box_reg: 0.0129 (0.0129) time: 0.8480 data: 0.2644 max mem: 2843\nEpoch: [9] [1/2] eta: 0:00:00 lr: 0.000005 loss: 0.5161 (0.5943) loss_classifier: 0.2808 (0.3125) loss_box_reg: 0.2148 (0.2598) loss_objectness: 0.0020 (0.0048) loss_rpn_box_reg: 0.0129 (0.0172) time: 0.6898 data: 0.1337 max mem: 2843\nEpoch: [9] Total time: 0:00:01 (0.7089 s / it)\nEpoch: [10] [0/2] eta: 0:00:01 lr: 0.000001 loss: 0.6857 (0.6857) loss_classifier: 0.3411 (0.3411) loss_box_reg: 0.3179 (0.3179) loss_objectness: 0.0086 (0.0086) loss_rpn_box_reg: 0.0181 (0.0181) time: 0.8597 data: 0.3294 max mem: 2843\nEpoch: [10] [1/2] eta: 0:00:00 lr: 0.000001 loss: 0.5247 (0.6052) loss_classifier: 0.2969 (0.3190) loss_box_reg: 0.2023 (0.2601) loss_objectness: 0.0086 (0.0113) loss_rpn_box_reg: 0.0114 (0.0147) time: 0.7237 data: 0.1679 max mem: 2843\nEpoch: [10] Total time: 0:00:01 (0.7406 s / it)\nEpoch: [11] [0/2] eta: 0:00:01 lr: 0.000001 loss: 0.5165 (0.5165) loss_classifier: 0.2676 (0.2676) loss_box_reg: 0.2175 (0.2175) loss_objectness: 0.0160 (0.0160) loss_rpn_box_reg: 0.0155 (0.0155) time: 0.8861 data: 0.3536 max mem: 2843\nEpoch: [11] [1/2] eta: 0:00:00 lr: 0.000001 loss: 0.5165 (0.6127) loss_classifier: 0.2676 (0.3149) loss_box_reg: 0.2175 (0.2703) loss_objectness: 0.0097 (0.0128) loss_rpn_box_reg: 0.0140 (0.0148) time: 0.7377 data: 0.1808 max mem: 2843\nEpoch: [11] Total time: 0:00:01 (0.7567 s / it)\nEpoch: [12] [0/2] eta: 0:00:01 lr: 0.000001 loss: 0.6150 (0.6150) loss_classifier: 0.3329 (0.3329) loss_box_reg: 0.2568 (0.2568) loss_objectness: 0.0096 (0.0096) loss_rpn_box_reg: 0.0157 (0.0157) time: 0.9514 data: 0.4121 max mem: 2843\nEpoch: [12] [1/2] eta: 0:00:00 lr: 0.000001 loss: 0.6052 (0.6101) loss_classifier: 0.3163 (0.3246) loss_box_reg: 0.2568 (0.2619) loss_objectness: 0.0061 (0.0078) loss_rpn_box_reg: 0.0157 (0.0157) time: 0.7667 data: 0.2081 max mem: 2843\nEpoch: [12] Total time: 0:00:01 (0.7871 s / it)\nEpoch: [13] [0/2] eta: 0:00:01 lr: 0.000000 loss: 0.5357 (0.5357) loss_classifier: 0.2942 (0.2942) loss_box_reg: 0.2147 (0.2147) loss_objectness: 0.0140 (0.0140) loss_rpn_box_reg: 0.0128 (0.0128) time: 0.8426 data: 0.2543 max mem: 2843\nEpoch: [13] [1/2] eta: 0:00:00 lr: 0.000000 loss: 0.5357 (0.6061) loss_classifier: 0.2942 (0.3185) loss_box_reg: 0.2147 (0.2597) loss_objectness: 0.0076 (0.0108) loss_rpn_box_reg: 0.0128 (0.0171) time: 0.6855 data: 0.1286 max mem: 2843\nEpoch: [13] Total time: 0:00:01 (0.7038 s / it)\nEpoch: [14] [0/2] eta: 0:00:01 lr: 0.000000 loss: 0.6998 (0.6998) loss_classifier: 0.3580 (0.3580) loss_box_reg: 0.3230 (0.3230) loss_objectness: 0.0047 (0.0047) loss_rpn_box_reg: 0.0140 (0.0140) time: 0.9931 data: 0.3989 max mem: 2843\nEpoch: [14] [1/2] eta: 0:00:00 lr: 0.000000 loss: 0.5124 (0.6061) loss_classifier: 0.2691 (0.3136) loss_box_reg: 0.2097 (0.2663) loss_objectness: 0.0047 (0.0108) loss_rpn_box_reg: 0.0140 (0.0155) time: 0.7605 data: 0.2008 max mem: 2843\nEpoch: [14] Total time: 0:00:01 (0.7795 s / it)\nEpoch: [15] [0/2] eta: 0:00:02 lr: 0.000000 loss: 0.5291 (0.5291) loss_classifier: 0.2922 (0.2922) loss_box_reg: 0.2225 (0.2225) loss_objectness: 0.0029 (0.0029) loss_rpn_box_reg: 0.0115 (0.0115) time: 1.0265 data: 0.4405 max mem: 2843\n" ], [ "torch.save(model.state_dict(), \"data/object_detection_model.pt\")", "_____no_output_____" ], [ "from torchvision import transforms\n# pick one image from the test set\n# img, _ = dataset[1]\nt = Image.open(\"data/desk-left-right.png\").convert(\"RGB\")\nimg = transforms.ToTensor()(t).squeeze()\n\n# put the model in evaluation mode\nmodel.load_state_dict(torch.load(\"data/object_detection_model.pt\"))\nmodel.eval()\nwith torch.no_grad():\n prediction = model([img.to(device)])", "_____no_output_____" ], [ "prediction", "_____no_output_____" ], [ "import matplotlib\nimport matplotlib.pyplot as plt\n\nimage = Image.fromarray(img.mul(255).permute(1, 2, 0).byte().numpy())\nplt.figure()\nfigure, ax = plt.subplots(1)\ncmap = plt.get_cmap('tab20b')\ncolors = [cmap(i) for i in np.linspace(0, 1, 20)]\n\nfor box, score, class_name in zip(prediction[0]['boxes'], prediction[0]['scores'], prediction[0]['labels']):\n if score > 0.29:\n x, y = box[0], box[1]\n len_x, len_y = box[2] - box[0], box[3] - box[1]\n rect = matplotlib.patches.Rectangle((x, y), len_x, len_y, edgecolor=colors[0], facecolor=\"none\")\n ax.add_patch(rect)\n plt.text(x, y, s=\"monitor\", color='white', verticalalignment='top',\n bbox={'color': colors[0], 'pad': 0})\n\nax.imshow(image)\nplt.axis('off')\nplt.savefig(\"data/desk-left-right-detected\")\nplt.show()", "_____no_output_____" ], [ "# APPENDIX", "_____no_output_____" ], [ "import matplotlib\nimport matplotlib.pyplot as plt\n\nimage = Image.fromarray(img.mul(255).permute(1, 2, 0).byte().numpy())\nplt.figure()\nfigure, ax = plt.subplots(1)\ncmap = plt.get_cmap('tab20b')\ncolors = [cmap(i) for i in np.linspace(0, 1, 20)]\nlabels = {1: 'monitor', 2: 'keyboard', 3: 'desktop', 4: 'plant'}\n\nfor idx, (box, score, class_name) in enumerate(zip(prediction[0]['boxes'], prediction[0]['scores'], prediction[0]['labels'])):\n if idx in [0, 6, 15]:\n x, y = box[0], box[1]\n len_x, len_y = box[2] - box[0], box[3] - box[1]\n rect = matplotlib.patches.Rectangle((x, y), len_x, len_y, edgecolor=colors[int(class_name.cpu().numpy())], facecolor=\"none\")\n ax.add_patch(rect)\n plt.text(x, y, s=labels[int(class_name.cpu().numpy())], color='white', verticalalignment='top',\n bbox={'color': colors[int(class_name.cpu().numpy())], 'pad': 0})\n\nprint(ds.class_names)\nax.imshow(image)\nplt.axis('off')\nplt.savefig(\"training_result\")", "_____no_output_____" ], [ "print(ds.class_names)", "_____no_output_____" ], [ "import matplotlib\nimport matplotlib.pyplot as plt\n\nfrom torchvision import transforms\nt = Image.open(\"data/desk.png\").convert(\"RGB\")\nimg = transforms.ToTensor()(t).squeeze()\n\n# put the model in evaluation mode\n# 1 for background and 4 main classes\nnum_classes = 1 + 4\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\nmodel = get_object_detection_model(num_classes)\nmodel.load_state_dict(torch.load(\"data/object_detection_model.pt\"))\nmodel.to(device)\nmodel.eval()\nwith torch.no_grad():\n prediction = model([img.to(device)])\n \nprint(prediction)\n \nimage = Image.fromarray(img.mul(255).permute(1, 2, 0).byte().numpy())\nplt.figure()\nfigure, ax = plt.subplots(1)\ncmap = plt.get_cmap('tab20b')\ncolors = [cmap(i) for i in np.linspace(0, 1, 20)]\nlabels = {1: 'monitor', 2: 'keyboard', 3: 'desktop', 4: 'plant'}\n\nfor idx, (box, score, class_name) in enumerate(zip(prediction[0]['boxes'], prediction[0]['scores'], prediction[0]['labels'])):\n if idx in [0, 1, 9]:\n x, y = box[0], box[1]\n len_x, len_y = box[2] - box[0], box[3] - box[1]\n rect = matplotlib.patches.Rectangle((x, y), len_x, len_y, edgecolor=colors[int(class_name.cpu().numpy())], facecolor=\"none\")\n ax.add_patch(rect)\n plt.text(x, y, s=labels[int(class_name.cpu().numpy())], color='white', verticalalignment='top',\n bbox={'color': colors[int(class_name.cpu().numpy())], 'pad': 0})\n\nax.imshow(image)\nplt.axis('off')\nplt.savefig(\"data/desk-detected\")\nplt.show()", "[{'boxes': tensor([[0.0000e+00, 4.0036e+02, 1.2729e+03, 1.1794e+03],\n [8.1681e+02, 1.0082e+03, 1.7607e+03, 1.5881e+03],\n [8.4376e+02, 8.9454e+02, 1.4104e+03, 1.6798e+03],\n [3.4775e+02, 5.3363e+02, 1.1411e+03, 1.2992e+03],\n [2.1702e+02, 2.0909e+02, 1.1067e+03, 1.5158e+03],\n [9.8893e+02, 9.5005e+02, 1.5821e+03, 1.7774e+03],\n [0.0000e+00, 1.6689e+00, 8.7493e+02, 4.7119e+02],\n [0.0000e+00, 6.1617e+02, 8.8175e+02, 1.1650e+03],\n [1.5182e+02, 4.2599e+02, 8.6204e+02, 1.0211e+03],\n [1.6140e+03, 8.4490e+02, 1.9490e+03, 1.2913e+03],\n [1.0282e+03, 0.0000e+00, 1.8428e+03, 1.3173e+03],\n [8.5937e+02, 7.4975e+02, 1.7415e+03, 1.9133e+03],\n [1.6104e+02, 5.4143e+02, 9.6917e+02, 1.9398e+03],\n [2.6079e+02, 4.2210e+02, 1.0594e+03, 1.1483e+03],\n [9.7331e+02, 9.5633e+02, 1.6289e+03, 1.5320e+03],\n [1.8426e+02, 5.1531e+02, 7.6813e+02, 1.4638e+03],\n [7.1406e+02, 9.1327e+02, 1.6656e+03, 1.3813e+03],\n [0.0000e+00, 2.8463e+01, 8.0024e+02, 1.2470e+03],\n [1.2500e+03, 8.3847e+02, 1.9473e+03, 1.3026e+03],\n [1.1151e+03, 1.0540e+03, 1.8749e+03, 1.7712e+03],\n [1.6260e+02, 1.0227e+03, 8.6447e+02, 1.7245e+03],\n [1.5738e+02, 1.0349e+03, 8.7013e+02, 1.7261e+03],\n [0.0000e+00, 4.5436e+01, 4.8457e+02, 3.9411e+02],\n [4.5938e+01, 5.4335e+02, 1.0177e+03, 1.1277e+03],\n [0.0000e+00, 4.4848e+02, 1.4488e+03, 1.2865e+03],\n [1.5637e+03, 8.8209e+02, 1.8957e+03, 1.3116e+03],\n [5.5491e+02, 1.0004e+03, 1.4944e+03, 1.7937e+03],\n [8.5876e+02, 9.2593e+02, 1.9370e+03, 1.7024e+03],\n [2.3035e+01, 3.1519e+02, 1.0952e+03, 1.3941e+03],\n [1.6851e+02, 1.0138e+03, 8.6444e+02, 1.7342e+03],\n [3.5210e+02, 2.5016e+02, 9.7758e+02, 1.1444e+03],\n [8.8070e+02, 9.4698e+02, 1.5721e+03, 1.6626e+03],\n [1.6137e+02, 5.6759e+02, 9.7604e+02, 1.9109e+03],\n [2.0743e+02, 1.6787e+02, 1.0952e+03, 1.5448e+03],\n [1.0566e+03, 0.0000e+00, 1.8231e+03, 1.2988e+03],\n [1.2583e+03, 8.3800e+02, 1.9490e+03, 1.2957e+03],\n [1.4129e+03, 7.5029e+02, 1.8782e+03, 1.5065e+03],\n [1.2295e+03, 1.0037e+02, 1.7859e+03, 1.0402e+03],\n [1.5548e+03, 8.8049e+02, 1.9033e+03, 1.3317e+03],\n [4.8731e+00, 1.2623e+02, 1.9490e+03, 1.0831e+03],\n [1.6388e+02, 4.9729e+02, 9.5255e+02, 1.9247e+03],\n [0.0000e+00, 8.6784e+02, 1.1628e+03, 1.7433e+03],\n [0.0000e+00, 0.0000e+00, 8.5974e+02, 4.7567e+02],\n [1.9694e+02, 5.6905e+02, 7.5845e+02, 1.4905e+03],\n [1.5256e+03, 6.1693e+02, 1.9266e+03, 1.3418e+03],\n [7.4680e+02, 9.4964e+02, 1.4038e+03, 1.6526e+03],\n [1.1071e+03, 1.0375e+03, 1.8804e+03, 1.7496e+03],\n [3.5647e+02, 5.1382e+02, 1.1560e+03, 1.2854e+03],\n [1.4135e+03, 8.5130e+02, 1.8161e+03, 1.2316e+03],\n [6.5290e+02, 1.1781e+03, 1.8709e+03, 1.9403e+03],\n [1.7217e+00, 4.1625e+02, 7.7934e+02, 1.1694e+03],\n [0.0000e+00, 4.6960e+01, 4.7470e+02, 4.0855e+02],\n [1.9597e+02, 5.5446e+02, 7.5327e+02, 1.4859e+03],\n [9.4974e+02, 1.0033e+03, 1.4705e+03, 1.8311e+03],\n [3.9425e+02, 4.5513e+02, 1.4393e+03, 9.6582e+02],\n [5.8656e+02, 4.5732e+02, 1.0807e+03, 1.3608e+03],\n [3.5691e+02, 4.7454e+02, 1.3980e+03, 9.8045e+02],\n [2.4601e+02, 4.3842e+02, 1.0463e+03, 1.1443e+03],\n [7.0087e+01, 2.3661e+01, 6.6236e+02, 8.2381e+02],\n [1.9798e+01, 7.0098e+02, 1.0826e+03, 1.2719e+03],\n [1.0359e+03, 0.0000e+00, 1.8602e+03, 1.3257e+03],\n [8.6322e+02, 9.5990e+02, 1.5563e+03, 1.6303e+03],\n [1.3698e+02, 9.8311e+02, 1.0468e+03, 1.4151e+03],\n [1.6569e+02, 1.0349e+03, 8.4612e+02, 1.7391e+03],\n [1.0549e+03, 3.6940e+01, 1.7440e+03, 7.2600e+02],\n [8.7534e+02, 8.8226e+02, 1.7605e+03, 1.3730e+03],\n [0.0000e+00, 4.1888e+02, 6.2274e+02, 1.7311e+03],\n [1.4399e+03, 1.5272e+02, 1.9490e+03, 1.6620e+03],\n [7.5925e+02, 2.8393e+02, 1.9401e+03, 1.1702e+03],\n [1.3223e+03, 7.1473e+02, 1.9472e+03, 1.3637e+03],\n [8.6883e+02, 7.2098e+02, 1.7202e+03, 1.9500e+03],\n [3.1316e+02, 3.7882e+02, 1.0148e+03, 1.0179e+03],\n [1.4465e+02, 9.5693e+02, 1.0522e+03, 1.4188e+03],\n [1.3475e+03, 1.6719e+01, 1.8911e+03, 1.0059e+03],\n [5.7845e+01, 2.0134e+01, 5.1815e+02, 3.8048e+02],\n [1.0241e+03, 1.1242e+03, 1.7398e+03, 1.7524e+03],\n [1.4869e+03, 9.6937e+02, 1.9353e+03, 1.7525e+03],\n [3.2512e+02, 6.6297e+02, 1.3794e+03, 1.1574e+03],\n [1.8930e+01, 8.8221e+01, 1.9490e+03, 1.0564e+03],\n [2.3410e+01, 4.4453e+02, 6.0256e+02, 1.3042e+03],\n [7.6667e+00, 0.0000e+00, 8.5922e+02, 4.6721e+02],\n [3.2811e+01, 0.0000e+00, 1.4102e+03, 1.1763e+03],\n [8.8271e+02, 1.0489e+03, 1.4678e+03, 1.3011e+03],\n [1.6711e+03, 9.7174e+02, 1.9393e+03, 1.3219e+03],\n [8.8451e+02, 8.8311e+02, 1.7749e+03, 1.3638e+03],\n [2.0223e+02, 2.2992e+02, 1.1046e+03, 1.5472e+03],\n [0.0000e+00, 0.0000e+00, 1.0366e+03, 8.0648e+02],\n [0.0000e+00, 4.5373e+02, 7.6854e+02, 1.1430e+03],\n [1.4119e+03, 8.4546e+02, 1.7979e+03, 1.2475e+03],\n [1.5208e+03, 5.4379e+02, 1.9309e+03, 1.3267e+03],\n [7.6462e+01, 1.1072e+03, 6.8525e+02, 1.9231e+03],\n [1.0089e+03, 7.0534e+02, 1.9473e+03, 1.1938e+03],\n [3.5580e+02, 4.9987e+02, 8.6737e+02, 1.4924e+03],\n [1.5969e+03, 7.5850e+02, 1.9445e+03, 1.2204e+03],\n [5.0173e+02, 7.4202e+02, 1.6700e+03, 1.7674e+03],\n [3.8971e+02, 9.6771e+02, 1.9490e+03, 1.9056e+03],\n [1.1872e+03, 6.7860e+02, 1.9490e+03, 1.9500e+03],\n [3.7630e+02, 1.0865e+03, 9.0281e+02, 1.3134e+03],\n [1.2430e+03, 8.4913e+02, 1.9490e+03, 1.2947e+03],\n [1.0049e+03, 1.1561e+03, 1.7357e+03, 1.7830e+03]], device='cuda:0'), 'labels': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 1, 1, 1, 4, 4, 1, 1, 1, 1, 1, 2, 1, 1, 2,\n 4, 1, 1, 4, 2, 4, 1, 2, 2, 4, 4, 4, 1, 1, 2, 1, 4, 4, 2, 2, 1, 4, 2, 2,\n 1, 1, 4, 2, 3, 4, 4, 4, 1, 3, 1, 4, 2, 3, 1, 3, 1, 2, 1, 1, 4, 2, 2, 2,\n 4, 4, 4, 4, 1, 4, 4, 2, 4, 2, 1, 1, 4, 3, 2, 3, 2, 4, 2, 1, 4, 2, 4, 2,\n 4, 2, 3, 3], device='cuda:0'), 'scores': tensor([0.2714, 0.2664, 0.2440, 0.2406, 0.2364, 0.2233, 0.2154, 0.2051, 0.2036,\n 0.2008, 0.1981, 0.1970, 0.1951, 0.1943, 0.1878, 0.1872, 0.1832, 0.1830,\n 0.1772, 0.1754, 0.1754, 0.1752, 0.1744, 0.1735, 0.1733, 0.1723, 0.1718,\n 0.1710, 0.1676, 0.1666, 0.1639, 0.1618, 0.1616, 0.1585, 0.1569, 0.1567,\n 0.1503, 0.1495, 0.1484, 0.1476, 0.1473, 0.1451, 0.1440, 0.1440, 0.1429,\n 0.1407, 0.1399, 0.1393, 0.1392, 0.1391, 0.1382, 0.1380, 0.1379, 0.1379,\n 0.1373, 0.1360, 0.1357, 0.1355, 0.1352, 0.1351, 0.1349, 0.1331, 0.1322,\n 0.1303, 0.1293, 0.1282, 0.1276, 0.1270, 0.1269, 0.1255, 0.1251, 0.1246,\n 0.1234, 0.1234, 0.1232, 0.1230, 0.1227, 0.1225, 0.1225, 0.1225, 0.1219,\n 0.1219, 0.1217, 0.1204, 0.1200, 0.1195, 0.1190, 0.1173, 0.1172, 0.1163,\n 0.1160, 0.1156, 0.1155, 0.1150, 0.1142, 0.1140, 0.1138, 0.1136, 0.1135,\n 0.1133], device='cuda:0')}]\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e719049019fd0a691d7c80951ec219536e59bb1f
555
ipynb
Jupyter Notebook
Deep dream.ipynb
annacarlsson/gender_classification
b36e075d24c23c5cc6ce2b6a40610e3750f24271
[ "MIT" ]
46
2020-06-30T13:44:16.000Z
2022-03-29T03:41:43.000Z
Deep dream.ipynb
annacarlsson/gender_classification
b36e075d24c23c5cc6ce2b6a40610e3750f24271
[ "MIT" ]
18
2020-10-14T09:11:02.000Z
2022-03-01T08:30:45.000Z
FINAL-TF2-FILES/TF_2_Notebooks_and_Data/09-Deployment/.ipynb_checkpoints/01-Deep-Dream-checkpoint.ipynb
tanuja333/Tensorflow_Keras
e29464da56666c675667b491b12d625ffaefddd9
[ "Apache-2.0" ]
11
2021-01-21T13:34:49.000Z
2022-01-31T12:36:23.000Z
16.818182
34
0.526126
[]
[]
[]
e7190ad78de6852f6ca7a872486a49a7b1e54f9c
73,369
ipynb
Jupyter Notebook
Modelagem/outlier_detection.ipynb
lsawakuchi/x1
564a135b4fdaa687a4ef6d470ddaa4730932d429
[ "MIT" ]
null
null
null
Modelagem/outlier_detection.ipynb
lsawakuchi/x1
564a135b4fdaa687a4ef6d470ddaa4730932d429
[ "MIT" ]
null
null
null
Modelagem/outlier_detection.ipynb
lsawakuchi/x1
564a135b4fdaa687a4ef6d470ddaa4730932d429
[ "MIT" ]
null
null
null
40.760556
21,976
0.54508
[ [ [ "- I will aproach the task of detecting outlier behavior related to debt quality by applying **Isolation Forest**\n- Next, I will apply **PCA** to identify the outliers graphically", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nfrom datetime import datetime\nfrom sklearn.ensemble import IsolationForest\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nimport warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "df = pd.read_excel(\"../tabelas/dataset_modelo.xlsx\")", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "#### Outlier, once applied elegibility conditions", "_____no_output_____" ], [ "- Applying elegibility conditions", "_____no_output_____" ] ], [ [ "dataset = df[(df['prop_divida']<1.5) & (df[\"quantidade_cheques\"]==0)]\n# dataset = df.copy()", "_____no_output_____" ], [ "# dataset.drop(columns=['prop_divida', 'quantidade_cheques', 'outros'], axis=1, inplace=True)\ndataset.drop(columns=['outros', 'prop_divida', 'outros', 'quantidade_cheques'], axis=1, inplace=True)", "_____no_output_____" ], [ "dataset.max()", "_____no_output_____" ], [ "dataset.head()", "_____no_output_____" ], [ "dataset = dataset[['cnpj', 'credito', 'infra', 'processos', 'dispersao']]", "_____no_output_____" ], [ "dataset.index = dataset.cnpj", "_____no_output_____" ], [ "dataset.drop(columns=['cnpj'], axis=1, inplace=True)", "_____no_output_____" ], [ "dataset.head()", "_____no_output_____" ] ], [ [ "- The features **idade_maxima** and **tempo_medio** may determine an outlier, but it depends on the values of the features to decide whether or not the outlier should be red flagged", "_____no_output_____" ] ], [ [ "# feature dataframe\n# X = dataset.iloc[:, 1:]\nX = dataset.copy()", "_____no_output_____" ], [ "X.head()", "_____no_output_____" ], [ "X.shape", "_____no_output_____" ] ], [ [ "### Isolation Forest", "_____no_output_____" ] ], [ [ "outlier_detect = IsolationForest(n_estimators=1000,\n max_samples=100, \n contamination=0.05, \n max_features=X.shape[1], \n random_state=1)", "_____no_output_____" ], [ "outlier_detect.fit(X)", "_____no_output_____" ], [ "outliers_predicted = outlier_detect.predict(X)", "_____no_output_____" ], [ "dataset[\"outlier\"] = outliers_predicted", "_____no_output_____" ], [ "df_out = dataset[dataset['outlier']==-1]", "_____no_output_____" ], [ "# df_out[df_out['cnpj']==9189540000130]", "_____no_output_____" ], [ "df_out", "_____no_output_____" ], [ "dataset['credito'].mean()", "_____no_output_____" ], [ "dataset.mean()", "_____no_output_____" ] ], [ [ "### PCA", "_____no_output_____" ] ], [ [ "features = list(X.columns)\nx = X.loc[:, features].values\nx = StandardScaler().fit_transform(x)\nX = pd.DataFrame(x)\nX.columns = features", "_____no_output_____" ], [ "X.head()", "_____no_output_____" ], [ "pca = PCA(n_components=2)\n\nprincipalComponents = pca.fit_transform(X)\n\ndf_pca = pd.DataFrame(data = principalComponents, columns = ['pc1', 'pc2'])\n\ndf_pca.head()", "_____no_output_____" ], [ "df_pca['outlier'] = outliers_predicted", "_____no_output_____" ], [ "df_pca.head()", "_____no_output_____" ], [ "df_pca.shape", "_____no_output_____" ], [ "fig = plt.figure(figsize = (8,8))\nax = fig.add_subplot(1,1,1) \nax.set_xlabel('Principal Component 1', fontsize = 15)\nax.set_ylabel('Principal Component 2', fontsize = 15)\nax.set_title('2 component PCA', fontsize = 20)\ntargets = [\"outlier\", \"inlier\"]\ncolors = ['r', 'b']\nfor target, color in zip(targets,colors):\n indicesToKeep = df_pca['outlier'] == -1 if target == \"outlier\" else df_pca['outlier'] == 1\n ax.scatter(df_pca.loc[indicesToKeep, 'pc1']\n , df_pca.loc[indicesToKeep, 'pc2']\n , c = color\n , s = 50)\nax.legend(targets)\nax.grid()", "_____no_output_____" ], [ "df_pca.index = dataset.index", "_____no_output_____" ], [ "df_out['credito'].max()", "_____no_output_____" ], [ "df_out[df_out['credito']==df_out['credito'].max()]", "_____no_output_____" ], [ "df_out", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7191786c60e7c0f93c281c9dcdf18c136c581d7
17,652
ipynb
Jupyter Notebook
Python-101/11-strings.ipynb
AnkitaxPriya/LearningWire
0be5e5f4bb335661055dc7906815bf29f39a12ce
[ "MIT" ]
null
null
null
Python-101/11-strings.ipynb
AnkitaxPriya/LearningWire
0be5e5f4bb335661055dc7906815bf29f39a12ce
[ "MIT" ]
null
null
null
Python-101/11-strings.ipynb
AnkitaxPriya/LearningWire
0be5e5f4bb335661055dc7906815bf29f39a12ce
[ "MIT" ]
null
null
null
23.380132
548
0.519714
[ [ [ "# Strings", "_____no_output_____" ], [ " A string is a sequence of characters.\n\n Computers do not deal with characters, they deal with numbers (binary). Even though you may see characters on your screen, internally it is stored and manipulated as a combination of 0's and 1's.\n\n This conversion of character to a number is called encoding, and the reverse process is decoding. ASCII and Unicode are some of the popular encoding used.\n\n In Python, string is a sequence of Unicode character.", "_____no_output_____" ], [ "# How to create a string?", "_____no_output_____" ], [ "Strings can be created by enclosing characters inside a single quote or double quotes. \n\nEven triple quotes can be used in Python but generally used to represent multiline strings and docstrings.\n", "_____no_output_____" ] ], [ [ "myString = 'Hello'\n\nprint(myString)\n\n\nmyString = \"Hello\"\nprint(myString)\n\n\nmyString = '''Hello'''\nprint(myString)", "Hello\nHello\nHello\n" ] ], [ [ "# How to access characters in a string?", "_____no_output_____" ], [ "We can access individual characters using indexing and a range of characters using slicing.\n\nIndex starts from 0. \n\nTrying to access a character out of index range will raise an IndexError. \n\nThe index must be an integer. We can't use float or other types, this will result into TypeError.\n\nPython allows negative indexing for its sequences.", "_____no_output_____" ] ], [ [ "myString = \"Hello\"\n\n#print first Character\nprint(myString[0])\n\n#print last character using negative indexing\nprint(myString[-1])\n\n#slicing 2nd to 5th character\nprint(myString[2:5])", "H\no\nllo\n" ] ], [ [ "If we try to access index out of the range or use decimal number, we will get errors.", "_____no_output_____" ] ], [ [ "print(myString[15])", "_____no_output_____" ], [ "print(myString[1.5])", "_____no_output_____" ] ], [ [ "# How to change or delete a string ?", "_____no_output_____" ], [ "Strings are immutable. This means that elements of a string cannot be changed once it has been assigned. \n\nWe can simply reassign different strings to the same name.", "_____no_output_____" ] ], [ [ "myString = \"Hello\"\nmyString[4] = 's' # strings are immutable", "_____no_output_____" ] ], [ [ "We cannot delete or remove characters from a string. But deleting the string entirely is possible using the keyword del.", "_____no_output_____" ] ], [ [ "del myString # delete complete string", "_____no_output_____" ], [ "print(myString)", "_____no_output_____" ] ], [ [ "# String Operations", "_____no_output_____" ], [ "# Concatenation", "_____no_output_____" ], [ "Joining of two or more strings into a single one is called concatenation.\n\nThe + operator does this in Python. Simply writing two string literals together also concatenates them.\n\nThe * operator can be used to repeat the string for a given number of times.", "_____no_output_____" ] ], [ [ "s1 = \"Hello \"\ns2 = \"Satish\"\n\n#concatenation of 2 strings\nprint(s1 + s2)\n\n#repeat string n times\nprint(s1 * 3)", "Hello Satish\nHello Hello Hello \n" ] ], [ [ "# Iterating Through String", "_____no_output_____" ] ], [ [ "count = 0\nfor l in \"Hello World\":\n if l == 'o':\n count += 1\nprint(count, ' letters found')", "2 letters found\n" ] ], [ [ "# String Membership Test", "_____no_output_____" ] ], [ [ "print('l' in 'Hello World') #in operator to test membership", "True\n" ], [ "print('or' in 'Hello World')", "True\n" ] ], [ [ "# String Methods", "_____no_output_____" ], [ " Some of the commonly used methods are lower(), upper(), join(), split(), find(), replace() etc", "_____no_output_____" ] ], [ [ "\"Hello\".lower()", "_____no_output_____" ], [ "\"Hello\".upper()", "_____no_output_____" ], [ "\"This will split all words in a list\".split()", "_____no_output_____" ], [ "' '.join(['This', 'will', 'split', 'all', 'words', 'in', 'a', 'list'])", "_____no_output_____" ], [ "\"Good Morning\".find(\"Mo\")", "_____no_output_____" ], [ "s1 = \"Bad morning\"\n\ns2 = s1.replace(\"Bad\", \"Good\")\n\nprint(s1)\nprint(s2)", "Bad morning\nGood morning\n" ] ], [ [ "# Python Program to Check where a String is Palindrome or not ?", "_____no_output_____" ] ], [ [ "myStr = \"Madam\"\n\n#convert entire string to either lower or upper\nmyStr = myStr.lower()\n\n#reverse string\nrevStr = reversed(myStr)\n\n\n#check if the string is equal to its reverse\nif list(myStr) == list(revStr):\n print(\"Given String is palindrome\")\nelse:\n print(\"Given String is not palindrome\")\n", "Given String is palindrome\n" ] ], [ [ "# Python Program to Sort Words in Alphabetic Order?", "_____no_output_____" ] ], [ [ "myStr = \"python Program to Sort words in Alphabetic Order\"\n\n#breakdown the string into list of words\nwords = myStr.split()\n\n#sort the list\nwords.sort()\n\n#print Sorted words are\nfor word in words:\n print(word)", "Alphabetic\nOrder\nProgram\nSort\nin\npython\nto\nwords\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e71924fee1c132fb0fc61b3ccd6564a19a89615d
59,611
ipynb
Jupyter Notebook
07_Visualization/Titanic_Desaster/Exercises.ipynb
SaqibSayed/Pandas_Excercise
69c954df813d9a84b7a8ff818c5cc322b207bf8f
[ "BSD-3-Clause" ]
null
null
null
07_Visualization/Titanic_Desaster/Exercises.ipynb
SaqibSayed/Pandas_Excercise
69c954df813d9a84b7a8ff818c5cc322b207bf8f
[ "BSD-3-Clause" ]
null
null
null
07_Visualization/Titanic_Desaster/Exercises.ipynb
SaqibSayed/Pandas_Excercise
69c954df813d9a84b7a8ff818c5cc322b207bf8f
[ "BSD-3-Clause" ]
null
null
null
110.186691
33,548
0.812199
[ [ [ "# Visualizing the Titanic Disaster", "_____no_output_____" ], [ "### Introduction:\n\nThis exercise is based on the titanic Disaster dataset avaiable at [Kaggle](https://www.kaggle.com/c/titanic). \nTo know more about the variables check [here](https://www.kaggle.com/c/titanic/data)\n\n\n### Step 1. Import the necessary libraries", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/07_Visualization/Titanic_Desaster/train.csv)", "_____no_output_____" ], [ "### Step 3. Assign it to a variable titanic ", "_____no_output_____" ] ], [ [ "titanic = pd.read_csv('https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/07_Visualization/Titanic_Desaster/train.csv')\ntitanic.head()", "_____no_output_____" ] ], [ [ "### Step 4. Set PassengerId as the index ", "_____no_output_____" ] ], [ [ "titanic.set_index('PassengerId').head()", "_____no_output_____" ] ], [ [ "### Step 5. Create a pie chart presenting the male/female proportion", "_____no_output_____" ] ], [ [ "males = (titanic['Sex']=='male').sum()\nfemales = (titanic['Sex']=='female').sum()\n\nproportion = [males,females]\n\nplt.pie(proportion, labels =['Males','Females'], shadow =False, colors = ['blue','red'], explode = (0.15 , 0), startangle = 90, autopct = '%1.1f%%')\n\nplt.axis('equal')\n\n# Set labels\nplt.title(\"Sex Proportion\")\n\n# View the plot\nplt.tight_layout()\nplt.show()", "_____no_output_____" ] ], [ [ "### Step 6. Create a scatterplot with the Fare payed and the Age, differ the plot color by gender", "_____no_output_____" ] ], [ [ "# creates the plot using\nlm = sns.lmplot(x = 'Age', y = 'Fare', data = titanic, hue = 'Sex', fit_reg=False)\n\n# set title\nlm.set(title = 'Fare x Age')\n\n# get the axes object and tweak it\naxes = lm.axes\naxes[0,0].set_ylim(-5,)\naxes[0,0].set_xlim(-5,85)", "_____no_output_____" ] ], [ [ "### Step 7. How many people survived?", "_____no_output_____" ] ], [ [ "titanic.Survived.sum()", "_____no_output_____" ] ], [ [ "### Step 8. Create a histogram with the Fare payed", "_____no_output_____" ], [ "### BONUS: Create your own question and answer it.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e71934c3439f48275489714464cc4ebd6cc737a8
45,906
ipynb
Jupyter Notebook
pastis/temporal_analysis/time_wfsc_rn.ipynb
spacetelescope/PASTIS
9b534ee9a5d0d2fdf8cc61cd4b450bd4a5559f16
[ "BSD-3-Clause" ]
3
2020-12-13T11:44:44.000Z
2021-09-15T12:02:28.000Z
pastis/temporal_analysis/time_wfsc_rn.ipynb
spacetelescope/PASTIS
9b534ee9a5d0d2fdf8cc61cd4b450bd4a5559f16
[ "BSD-3-Clause" ]
37
2019-09-03T14:24:48.000Z
2021-09-22T14:03:22.000Z
pastis/temporal_analysis/time_wfsc_rn.ipynb
spacetelescope/PASTIS
9b534ee9a5d0d2fdf8cc61cd4b450bd4a5559f16
[ "BSD-3-Clause" ]
3
2020-02-10T20:02:09.000Z
2020-09-21T15:33:14.000Z
39.472055
248
0.577049
[ [ [ "# Temporal analysis with PASTIS matrices\n\nNote how this notebook requires the additional python library `exoscene` which is currently not included in the `environment.yml` file for the standard `pastis` conda environment.", "_____no_output_____" ] ], [ [ "import os\nimport time\n\nimport numpy as np\nfrom astropy.io import fits\nimport astropy.units as u\nimport astropy.constants as c\nimport hcipy\n\nfrom pastis.config import CONFIG_PASTIS \nimport pastis.util as util \nfrom pastis.e2e_simulators.luvoir_imaging import LuvoirA_APLC\n\nimport exoscene.image\nimport exoscene.star\nimport exoscene.planet\nfrom exoscene.planet import Planet", "_____no_output_____" ], [ "print(' Loading basic parameters for this specific telescope \\n')\n\n### Parameters\ndesign = 'small'\n\n# System parameters\nroot_dir = CONFIG_PASTIS.get('local', 'local_data_path')\noverall_dir = util.create_data_path(root_dir, telescope='luvoir_'+design)\n# Moving parts parameters\nanalysis_name = 'LUVOIRA_APLC_' + design\nmax_LO = CONFIG_PASTIS.getint('dm_objects', 'number_of_low_order_modes')\nmax_MID = CONFIG_PASTIS.getint('dm_objects', 'number_of_mid_order_modes')\nmax_HI = CONFIG_PASTIS.getint('dm_objects', 'number_of_high_order_modes')\nnum_DM_act = CONFIG_PASTIS.getint('dm_objects', 'number_of_continuous_dm_actuators')\n# General telescope parameters\nnb_seg = CONFIG_PASTIS.getint('LUVOIR', 'nb_subapertures')\nwvln = CONFIG_PASTIS.getfloat('LUVOIR', 'lambda') * 1e-9 # m\ndiam = CONFIG_PASTIS.getfloat('LUVOIR', 'diameter') # m\nnm_aber = CONFIG_PASTIS.getfloat('LUVOIR', 'calibration_aberration') * 1e-9 # m\n# Image system parameters\nsampling = CONFIG_PASTIS.getfloat('LUVOIR', 'sampling')", "_____no_output_____" ], [ "# Print some of the defined parameters\nprint('LUVOIR apodizer design: {}'.format(design))\nprint()\nprint('Wavelength: {} m'.format(wvln))\nprint('Telescope diameter: {} m'.format(diam))\nprint('Number of segments: {}'.format(nb_seg))\nprint()\nprint('Sampling: {} px per lambda/D'.format(sampling))", "_____no_output_____" ], [ "optics_input = os.path.join(util.find_repo_location(), CONFIG_PASTIS.get('LUVOIR', 'optics_path_in_repo'))\nluvoir = LuvoirA_APLC(optics_input, design, sampling)\nnpup = np.int(np.sqrt(luvoir.pupil_grid.x.shape[0]))\nnimg = np.int(np.sqrt(luvoir.focal_det.x.shape[0]))", "_____no_output_____" ], [ "# Load the matrices\nfilename_matrix = 'EFIELD_Re_matrix_num_LO_' + str(max_LO) +'.fits'\nG_LO_real = fits.getdata(os.path.join(overall_dir, 'matrix_numerical', filename_matrix))\nfilename_matrix = 'EFIELD_Im_matrix_num_LO_' + str(max_LO) +'.fits'\nG_LO_imag = fits.getdata(os.path.join(overall_dir, 'matrix_numerical', filename_matrix))\nfilename_matrix = 'EFIELD_Re_matrix_num_MID_' + str(max_MID) +'.fits'\nG_MID_real = fits.getdata(os.path.join(overall_dir, 'matrix_numerical', filename_matrix))\nfilename_matrix = 'EFIELD_Im_matrix_num_MID_' + str(max_MID) +'.fits'\nG_MID_imag = fits.getdata(os.path.join(overall_dir, 'matrix_numerical', filename_matrix))\nfilename_matrix = 'EFIELD_Re_matrix_num_HI_' + str(max_HI) +'.fits'\nG_HI_real = fits.getdata(os.path.join(overall_dir, 'matrix_numerical', filename_matrix))\nfilename_matrix = 'EFIELD_Im_matrix_num_HI_' + str(max_HI) +'.fits'\nG_HI_imag = fits.getdata(os.path.join(overall_dir, 'matrix_numerical', filename_matrix))\nfilename_matrix = 'EFIELD_LOWFS_Re_matrix_num_LO_' + str(max_LO) +'.fits'\nG_LOWFS_real = fits.getdata(os.path.join(overall_dir, 'matrix_numerical', filename_matrix))\nfilename_matrix = 'EFIELD_LOWFS_Im_matrix_num_LO_' + str(max_LO) +'.fits'\nG_LOWFS_imag = fits.getdata(os.path.join(overall_dir, 'matrix_numerical', filename_matrix))\nfilename_matrix = 'EFIELD_OBWFS_Re_matrix_num_MID_' + str(max_MID) +'.fits'\nG_OBWFS_real = fits.getdata(os.path.join(overall_dir, 'matrix_numerical', filename_matrix))\nfilename_matrix = 'EFIELD_OBWFS_Im_matrix_num_MID_' + str(max_MID) +'.fits'\nG_OBWFS_imag = fits.getdata(os.path.join(overall_dir, 'matrix_numerical', filename_matrix))", "_____no_output_____" ], [ "luvoir.create_global_zernike_mirror(max_LO)\nluvoir.create_segmented_mirror(max_MID)\nluvoir.create_ripple_mirror(max_HI)\nluvoir.create_continuous_deformable_mirror(num_DM_act)\n\nn_LO = luvoir.zernike_mirror.num_actuators\nn_MID = luvoir.sm.num_actuators \nn_HI = luvoir.ripple_mirror.num_actuators\nn_DM = luvoir.dm.num_actuators", "_____no_output_____" ], [ "z_pup_downsample = CONFIG_PASTIS.getfloat('numerical', 'z_pup_downsample')\nN_pup_z = np.int(luvoir.pupil_grid.shape[0] / z_pup_downsample)\ngrid_zernike = hcipy.field.make_pupil_grid(N_pup_z, diameter=luvoir.diam)", "_____no_output_____" ], [ "LO_modes = np.zeros(n_LO)\nMID_modes = np.zeros(n_MID)\nHI_modes = np.zeros(n_HI)\nDM_modes = np.zeros(n_DM)\n\nluvoir.zernike_mirror.actuators = LO_modes\nluvoir.sm.actuators = MID_modes\nluvoir.ripple_mirror.actuators = HI_modes\nluvoir.dm.actuators = DM_modes\n\nunaberrated_coro_psf, ref = luvoir.calc_psf(ref=True, display_intermediate=False)\nnorm = np.max(ref)\ndh_intensity = (unaberrated_coro_psf / norm) * luvoir.dh_mask\ncontrast_floor = np.mean(dh_intensity[np.where(luvoir.dh_mask != 0)])\nprint(f'contrast floor: {contrast_floor}')\n\nnonaberrated_coro_psf, ref,inter_ref = luvoir.calc_psf(ref=True, display_intermediate=False, return_intermediate='efield')\nEfield_ref = nonaberrated_coro_psf.electric_field", "_____no_output_____" ], [ "# Make matrices\nmat_LO = np.zeros([n_LO-1, n_LO-1])\nfor i in range(1, n_LO):\n for j in range(1, n_LO):\n tmpI = (G_LO_real[i]+1j*G_LO_imag[i] - Efield_ref)\n tmpJ = (G_LO_real[j]+1j*G_LO_imag[j] - Efield_ref)\n test = np.real(tmpI*np.conj(tmpJ))\n dh_test = (test / norm) * luvoir.dh_mask\n contrast = np.mean(dh_test[np.where(luvoir.dh_mask != 0)])\n mat_LO[i-1, j-1] = contrast", "_____no_output_____" ], [ "mat_MID = np.zeros([n_MID, n_MID])\nfor i in range(0, n_MID):\n for j in range(0, n_MID):\n tmpI = G_MID_real[i]+1j*G_MID_imag[i] - Efield_ref\n tmpJ = G_MID_real[j]+1j*G_MID_imag[j] - Efield_ref\n test = np.real(tmpI*np.conj(tmpJ))\n dh_test = (test / norm) * luvoir.dh_mask\n contrast = np.mean(dh_test[np.where(luvoir.dh_mask != 0)])\n mat_MID[i, j] = contrast", "_____no_output_____" ], [ "mat_HI = np.zeros([n_HI, n_HI])\nfor i in range(0, n_HI):\n for j in range(0, n_HI):\n tmpI = G_HI_real[i]+1j*G_HI_imag[i] - Efield_ref\n tmpJ = G_HI_real[j]+1j*G_HI_imag[j] - Efield_ref\n test = np.real(tmpI*np.conj(tmpJ))\n dh_test = (test / norm) * luvoir.dh_mask\n contrast = np.mean(dh_test[np.where(luvoir.dh_mask != 0)])\n mat_HI[i, j] = contrast", "_____no_output_____" ], [ "# PASTIS eigenvalues for all scales\n\nevalsLO, evecsLO = np.linalg.eig(mat_LO)\nsorted_evalsLO = np.sort(evalsLO)\nsorted_indicesLO = np.argsort(evalsLO)\nsorted_evecsLO = evecsLO[:, sorted_indicesLO]\n\nevalsMID, evecsMID = np.linalg.eig(mat_MID)\nsorted_evalsMID = np.sort(evalsMID)\nsorted_indicesMID = np.argsort(evalsMID)\nsorted_evecsMID = evecsMID[:, sorted_indicesMID]\n\nevalsHI, evecsHI = np.linalg.eig(mat_HI)\nsorted_evalsHI = np.sort(evalsHI)\nsorted_indicesHI = np.argsort(evalsHI)\nsorted_evecsHI = evecsHI[:, sorted_indicesHI]", "_____no_output_____" ], [ "# Calculate the segment based constraints\nc_target_log = -11\nc_target = 10**(c_target_log)\nn_repeat = 20", "_____no_output_____" ], [ "mu_mapLO = np.sqrt(((c_target) / (n_LO-1)) / (np.diag(mat_LO)))\nmu_mapMID = np.sqrt(((c_target) / (n_MID)) / (np.diag(mat_MID)))\nmu_mapHI = np.sqrt(((c_target) / (n_HI)) / (np.diag(mat_HI)))", "_____no_output_____" ], [ "# Getting the flux together\nsptype = 'A0V' # Put this on config\nVmag = 0.0 # Put this in loop\nminlam = 500 * u.nanometer # Put this on config\nmaxlam = 600 * u.nanometer # Put this on config\nstar_flux = exoscene.star.bpgs_spectype_to_photonrate(spectype=sptype, Vmag=Vmag, minlam=minlam.value, maxlam=maxlam.value)\nNph = star_flux.value*15**2*np.sum(luvoir.apodizer**2) / npup**2\ndark_current = 0.0002\nCIC = 0.01\n# dark_current = 0.0000\n# CIC = 0.00", "_____no_output_____" ], [ "# Reference fluxes for the WF sensors\n# In particular we downsample everything\nLO_modes = np.zeros(n_LO)\nMID_modes = np.zeros(n_MID)\nHI_modes = np.zeros(n_HI)\nDM_modes = np.zeros(n_DM)\n\nluvoir.zernike_mirror.actuators = LO_modes\nluvoir.sm.actuators = MID_modes\nluvoir.ripple_mirror.actuators = HI_modes\nluvoir.dm.actuators = DM_modes", "_____no_output_____" ], [ "nonaberrated_coro_psf, refshit,inter_ref = luvoir.calc_psf(ref=True, display_intermediate=False, return_intermediate='efield')\nEfield_ref = nonaberrated_coro_psf.electric_field", "_____no_output_____" ], [ "zernike_ref = luvoir.calc_low_order_wfs()\nzernike_ref_sub_real = hcipy.field.subsample_field(zernike_ref.real, z_pup_downsample, grid_zernike, statistic='mean')\nzernike_ref_sub_imag = hcipy.field.subsample_field(zernike_ref.imag, z_pup_downsample, grid_zernike, statistic='mean')\nEfield_ref_LOWFS = (zernike_ref_sub_real + 1j*zernike_ref_sub_imag) * z_pup_downsample", "_____no_output_____" ], [ "zernike_ref = luvoir.calc_out_of_band_wfs()\nzernike_ref_sub_real = hcipy.field.subsample_field(zernike_ref.real, z_pup_downsample, grid_zernike, statistic='mean')\nzernike_ref_sub_imag = hcipy.field.subsample_field(zernike_ref.imag, z_pup_downsample, grid_zernike, statistic='mean')\nEfield_ref_OBWFS = (zernike_ref_sub_real + 1j*zernike_ref_sub_imag) * z_pup_downsample", "_____no_output_____" ], [ "nyquist_sampling = 2.\n\n# Actual grid for LUVOIR images\ngrid_test = hcipy.make_focal_grid(\n luvoir.sampling,\n luvoir.imlamD,\n pupil_diameter=luvoir.diam,\n focal_length=1,\n reference_wavelength=luvoir.wvln,\n )\n\n# Actual grid for LUVOIR images that are nyquist sampled\ngrid_det_subsample = hcipy.make_focal_grid(\n nyquist_sampling,\n np.floor(luvoir.imlamD),\n pupil_diameter=luvoir.diam,\n focal_length=1,\n reference_wavelength=luvoir.wvln,\n )\nn_nyquist = np.int(np.sqrt(grid_det_subsample.x.shape[0]))", "_____no_output_____" ], [ "### Dark hole mask\ndh_outer_nyquist = hcipy.circular_aperture(2 * luvoir.apod_dict[design]['owa'] * luvoir.lam_over_d)(grid_det_subsample)\ndh_inner_nyquist = hcipy.circular_aperture(2 * luvoir.apod_dict[design]['iwa'] * luvoir.lam_over_d)(grid_det_subsample)\ndh_mask_nyquist = (dh_outer_nyquist - dh_inner_nyquist).astype('bool')\n\ndh_size = len(np.where(luvoir.dh_mask != 0)[0])\ndh_size_nyquist = len(np.where(dh_mask_nyquist != 0)[0])\ndh_index = np.where(luvoir.dh_mask != 0)[0]\ndh_index_nyquist = np.where(dh_mask_nyquist != 0)[0]", "_____no_output_____" ], [ "### Rebinning everything to the right sampling\n#\nE0_LOWFS = np.zeros([N_pup_z*N_pup_z,1,2])\nE0_LOWFS[:,0,0] = Efield_ref_LOWFS.real\nE0_LOWFS[:,0,1] = Efield_ref_LOWFS.imag\nE0_OBWFS = np.zeros([N_pup_z*N_pup_z,1,2])\nE0_OBWFS[:,0,0] = Efield_ref_OBWFS.real\nE0_OBWFS[:,0,1] = Efield_ref_OBWFS.imag\nE0_coron = np.zeros([nimg*nimg,1,2])\nE0_coron[:,0,0] = Efield_ref.real\nE0_coron[:,0,1] = Efield_ref.imag\nE0_coron_nyquist = np.zeros([n_nyquist*n_nyquist,1,2])\ntmp0 = hcipy.interpolation.make_linear_interpolator_separated(Efield_ref, grid=grid_test)\nEfield_ref_nyquist = (luvoir.sampling/nyquist_sampling)**2*tmp0(grid_det_subsample)\nE0_coron_nyquist[:,0,0] = Efield_ref_nyquist.real\nE0_coron_nyquist[:,0,1] = Efield_ref_nyquist.imag\nE0_coron_DH = np.zeros([dh_size,1,2])\nE0_coron_DH[:,0,0] = Efield_ref.real[dh_index]\nE0_coron_DH[:,0,1] = Efield_ref.imag[dh_index]\nE0_coron_DH_nyquist = np.zeros([dh_size_nyquist,1,2])\nE0_coron_DH_nyquist[:,0,0] = Efield_ref_nyquist.real[dh_index_nyquist]\nE0_coron_DH_nyquist[:,0,1] = Efield_ref_nyquist.real[dh_index_nyquist]", "_____no_output_____" ], [ "G_coron_LO_nyquist = np.zeros([n_nyquist*n_nyquist,2,n_LO-1])\nfor pp in range(1, n_LO):\n tmp0 = G_LO_real[pp] + 1j*G_LO_imag[pp]\n tmp1 = hcipy.interpolation.make_linear_interpolator_separated(tmp0, grid=grid_test)\n tmp2 = (luvoir.sampling/nyquist_sampling)**2*tmp1(grid_det_subsample)\n G_coron_LO_nyquist[:,0,pp-1] = tmp2.real - Efield_ref_nyquist.real\n G_coron_LO_nyquist[:,1,pp-1] = tmp2.real - Efield_ref_nyquist.imag", "_____no_output_____" ], [ "G_coron_MID_nyquist= np.zeros([n_nyquist*n_nyquist,2,n_MID])\nfor pp in range(0, n_MID):\n tmp0 = G_MID_real[pp] + 1j*G_MID_imag[pp]\n tmp1 = hcipy.interpolation.make_linear_interpolator_separated(tmp0, grid=grid_test)\n tmp2 = (luvoir.sampling/nyquist_sampling)**2*tmp1(grid_det_subsample)\n G_coron_MID_nyquist[:,0,pp] = tmp2.real - Efield_ref_nyquist.real\n G_coron_MID_nyquist[:,1,pp] = tmp2.real - Efield_ref_nyquist.imag\nG_coron_HI_nyquist= np.zeros([n_nyquist*n_nyquist,2,n_HI])\nfor pp in range(0, n_HI):\n tmp0 = G_HI_real[pp] + 1j*G_HI_imag[pp]\n tmp1 = hcipy.interpolation.make_linear_interpolator_separated(tmp0, grid=grid_test)\n tmp2 = (luvoir.sampling/nyquist_sampling)**2*tmp1(grid_det_subsample)\n G_coron_HI_nyquist[:,0,pp] = tmp2.real - Efield_ref_nyquist.real\n G_coron_HI_nyquist[:,1,pp] = tmp2.real - Efield_ref_nyquist.imag\n\nG_coron_LO_DH = np.zeros([dh_size,2,n_LO-1])\nfor pp in range(1, n_LO):\n G_coron_LO_DH[:,0,pp-1] = G_LO_real[pp,dh_index] - Efield_ref.real[dh_index]\n G_coron_LO_DH[:,1,pp-1] = G_LO_imag[pp,dh_index] - Efield_ref.imag[dh_index]\nG_coron_MID_DH= np.zeros([dh_size,2,n_MID])\nfor pp in range(0, n_MID):\n G_coron_MID_DH[:,0,pp] = G_MID_real[pp,dh_index] - Efield_ref.real[dh_index]\n G_coron_MID_DH[:,1,pp] = G_MID_imag[pp,dh_index] - Efield_ref.imag[dh_index]\nG_coron_HI_DH= np.zeros([dh_size,2,n_HI])\nfor pp in range(0, n_HI):\n G_coron_HI_DH[:,0,pp] = G_HI_real[pp,dh_index] - Efield_ref.real[dh_index]\n G_coron_HI_DH[:,1,pp] = G_HI_imag[pp,dh_index] - Efield_ref.imag[dh_index]", "_____no_output_____" ], [ "G_coron_LO_DH_nyquist = np.zeros([dh_size_nyquist,2,n_LO-1])\nfor pp in range(1, n_LO):\n tmp0 = G_LO_real[pp] + 1j*G_LO_imag[pp]\n tmp1 = hcipy.interpolation.make_linear_interpolator_separated(tmp0, grid=grid_test)\n tmp2 = (luvoir.sampling/nyquist_sampling)**2*tmp1(grid_det_subsample)\n G_coron_LO_DH_nyquist[:,0,pp-1] = tmp2.real[dh_index_nyquist] - Efield_ref_nyquist.real[dh_index_nyquist]\n G_coron_LO_DH_nyquist[:,1,pp-1] = tmp2.real[dh_index_nyquist] - Efield_ref_nyquist.imag[dh_index_nyquist]\nG_coron_MID_DH_nyquist= np.zeros([dh_size_nyquist,2,n_MID])\nfor pp in range(0, n_MID):\n tmp0 = G_MID_real[pp] + 1j*G_MID_imag[pp]\n tmp1 = hcipy.interpolation.make_linear_interpolator_separated(tmp0, grid=grid_test)\n tmp2 = (luvoir.sampling/nyquist_sampling)**2*tmp1(grid_det_subsample)\n G_coron_MID_DH_nyquist[:,0,pp-1] = tmp2.real[dh_index_nyquist] - Efield_ref_nyquist.real[dh_index_nyquist]\n G_coron_MID_DH_nyquist[:,1,pp-1] = tmp2.real[dh_index_nyquist] - Efield_ref_nyquist.imag[dh_index_nyquist]\nG_coron_HI_DH_nyquist= np.zeros([dh_size_nyquist,2,n_HI])\nfor pp in range(0, n_HI):\n tmp0 = G_HI_real[pp] + 1j*G_HI_imag[pp]\n tmp1 = hcipy.interpolation.make_linear_interpolator_separated(tmp0, grid=grid_test)\n tmp2 = (luvoir.sampling/nyquist_sampling)**2*tmp1(grid_det_subsample)\n G_coron_HI_DH_nyquist[:,0,pp-1] = tmp2.real[dh_index_nyquist] - Efield_ref_nyquist.real[dh_index_nyquist]\n G_coron_HI_DH_nyquist[:,1,pp-1] = tmp2.real[dh_index_nyquist] - Efield_ref_nyquist.imag[dh_index_nyquist]", "_____no_output_____" ], [ "G_coron_LO = np.zeros([nimg*nimg,2,n_LO-1])\nfor pp in range(1, n_LO):\n G_coron_LO[:,0,pp-1] = G_LO_real[pp] - Efield_ref.real\n G_coron_LO[:,1,pp-1] = G_LO_imag[pp] - Efield_ref.imag\nG_coron_MID= np.zeros([nimg*nimg,2,n_MID])\nfor pp in range(0, n_MID):\n G_coron_MID[:,0,pp] = G_MID_real[pp] - Efield_ref.real\n G_coron_MID[:,1,pp] = G_MID_imag[pp] - Efield_ref.imag\nG_coron_HI= np.zeros([nimg*nimg,2,n_HI])\nfor pp in range(0, n_HI):\n G_coron_HI[:,0,pp] = G_HI_real[pp] - Efield_ref.real\n G_coron_HI[:,1,pp] = G_HI_imag[pp] - Efield_ref.imag", "_____no_output_____" ], [ "G_LOWFS = np.zeros([N_pup_z*N_pup_z,2,n_LO-1])\nfor pp in range(1, n_LO):\n G_LOWFS[:,0,pp-1] = G_LOWFS_real[pp]*z_pup_downsample - Efield_ref_LOWFS.real\n G_LOWFS[:,1,pp-1] = G_LOWFS_imag[pp]*z_pup_downsample - Efield_ref_LOWFS.imag\nG_OBWFS= np.zeros([N_pup_z*N_pup_z,2,n_MID])\nfor pp in range(0, n_MID):\n G_OBWFS[:,0,pp] = G_OBWFS_real[pp]*z_pup_downsample - Efield_ref_OBWFS.real\n G_OBWFS[:,1,pp] = G_OBWFS_imag[pp]*z_pup_downsample - Efield_ref_OBWFS.imag", "_____no_output_____" ], [ "def req_closedloop_calc_recursive(Gcoro, Gsensor, E0coro, E0sensor, Dcoro, Dsensor, t_exp, flux, Q, Niter, dh_mask,\n norm):\n P = np.zeros(Q.shape) # WFE modes covariance estimate\n r = Gsensor.shape[2]\n N = Gsensor.shape[0]\n N_img = Gcoro.shape[0]\n c = 1\n # Iterations of ALGORITHM 1\n contrast_hist = np.zeros(Niter)\n intensity_WFS_hist = np.zeros(Niter)\n cal_I_hist = np.zeros(Niter)\n eps_hist = np.zeros([Niter, r])\n averaged_hist = np.zeros(Niter)\n contrasts = []\n for pp in range(Niter):\n eps = np.random.multivariate_normal(np.zeros(r), P + Q * t_exp).reshape((1, 1, r)) # random modes\n G_eps = np.sum(Gsensor * eps, axis=2).reshape((N, 1, 2 * c)) + E0sensor # electric field\n G_eps_squared = np.sum(G_eps * G_eps, axis=2, keepdims=True)\n G_eps_G = np.matmul(G_eps, Gsensor)\n G_eps_G_scaled = G_eps_G / np.sqrt(G_eps_squared + Dsensor / flux / t_exp) # trick to save RAM\n cal_I = 4 * flux * t_exp * np.einsum(\"ijk,ijl->kl\", G_eps_G_scaled, G_eps_G_scaled) # information matrix\n P = np.linalg.inv(np.linalg.inv(P + Q * t_exp / 2) + cal_I)\n # P = np.linalg.inv(cal_I)\n\n # Coronagraph\n G_eps_coron = np.sum(Gcoro * eps, axis=2).reshape((N_img, 1, 2 * c)) + E0coro\n G_eps_coron_squared = np.sum(G_eps_coron * G_eps_coron, axis=2, keepdims=True)\n intensity = G_eps_coron_squared * flux * t_exp + Dcoro\n\n # Wavefront sensor\n intensity_WFS = G_eps_squared * flux * t_exp + Dsensor\n\n # Archive\n test_DH0 = intensity[:, 0, 0] * luvoir.dh_mask\n test_DH = np.mean(test_DH0[np.where(test_DH0 != 0)])\n contrasts.append(test_DH / flux / t_exp / norm)\n intensity_WFS_hist[pp] = np.sum(intensity_WFS) / flux\n cal_I_hist[pp] = np.mean(cal_I) / flux\n eps_hist[pp] = eps\n averaged_hist[pp] = np.mean(contrasts)\n # print(\"est. contrast\", np.mean(contrasts))\n\n outputs = {'intensity_WFS_hist': intensity_WFS_hist,\n 'cal_I_hist': cal_I_hist,\n 'eps_hist': eps_hist,\n 'averaged_hist': averaged_hist,\n 'contrasts': contrasts}\n return outputs", "_____no_output_____" ], [ "def req_closedloop_calc_batch(Gcoro, Gsensor, E0coro, E0sensor, Dcoro, Dsensor, t_exp, flux, Q, Niter, dh_mask, norm):\n P = np.zeros(Q.shape) # WFE modes covariance estimate\n r = Gsensor.shape[2]\n N = Gsensor.shape[0]\n N_img = Gcoro.shape[0]\n c = 1\n # Iterations of ALGORITHM 1\n contrast_hist = np.zeros(Niter)\n intensity_WFS_hist = np.zeros(Niter)\n cal_I_hist = np.zeros(Niter)\n eps_hist = np.zeros([Niter, r])\n averaged_hist = np.zeros(Niter)\n contrasts = []\n for pp in range(Niter):\n eps = np.random.multivariate_normal(np.zeros(r), P + Q * t_exp).reshape((1, 1, r)) # random modes\n G_eps = np.sum(Gsensor * eps, axis=2).reshape((N, 1, 2 * c)) + E0sensor # electric field\n G_eps_squared = np.sum(G_eps * G_eps, axis=2, keepdims=True)\n G_eps_G = np.matmul(G_eps, Gsensor)\n G_eps_G_scaled = G_eps_G / np.sqrt(G_eps_squared + Dsensor / flux / t_exp) # trick to save RAM\n cal_I = 4 * flux * t_exp * np.einsum(\"ijk,ijl->kl\", G_eps_G_scaled, G_eps_G_scaled) # information matrix\n # P = np.linalg.inv(np.linalg.inv(P+Q*t_exp/2) + cal_I)\n P = np.linalg.pinv(cal_I)\n\n # Coronagraph\n G_eps_coron = np.sum(Gcoro * eps, axis=2).reshape((N_img, 1, 2 * c)) + E0coro\n G_eps_coron_squared = np.sum(G_eps_coron * G_eps_coron, axis=2, keepdims=True)\n intensity = G_eps_coron_squared * flux * t_exp + Dcoro\n\n # Wavefront sensor\n intensity_WFS = G_eps_squared * flux * t_exp + Dsensor\n\n # Archive\n test_DH0 = intensity[:, 0, 0] * luvoir.dh_mask\n test_DH = np.mean(test_DH0[np.where(test_DH0 != 0)])\n contrasts.append(test_DH / flux / t_exp / norm)\n intensity_WFS_hist[pp] = np.sum(intensity_WFS) / flux\n cal_I_hist[pp] = np.mean(cal_I) / flux\n eps_hist[pp] = eps\n averaged_hist[pp] = np.mean(contrasts)\n # print(\"est. contrast\", np.mean(contrasts))\n # print(\"est. contrast\", np.mean(contrasts))\n\n outputs = {'intensity_WFS_hist': intensity_WFS_hist,\n 'cal_I_hist': cal_I_hist,\n 'eps_hist': eps_hist,\n 'averaged_hist': averaged_hist,\n 'contrasts': contrasts}\n\n return outputs", "_____no_output_____" ], [ "flux = Nph\nQLO = np.diag(np.asarray(mu_mapLO**2))\nQMID = np.diag(np.asarray(mu_mapMID**2))\nQHI = np.diag(np.asarray(mu_mapHI**2))\n\n# Running a bunch of tests for time series\n\nNtimes = 20\nTimeMinus = -2\nTimePlus = 3.5\nNwavescale = 8\nWaveScaleMinus = -2\nWaveScalePlus = 1\nNflux = 3\nfluxPlus = 10\nfluxMinus = 0\n\ntimeVec = np.logspace(TimeMinus,TimePlus,Ntimes)\nWaveVec = np.logspace(WaveScaleMinus,WaveScalePlus,Nwavescale)\nfluxVec = np.linspace(fluxMinus,fluxPlus,Nflux)\nwavescaleVec = np.logspace(WaveScaleMinus,WaveScalePlus,Nwavescale)", "_____no_output_____" ], [ "niter = 2\n\nprint('LO modes with batch LOWFS and noise')\n\ntimer1 = time.time()\n\n\nres = np.zeros([Ntimes, Nwavescale, Nflux, 1])\npp = 0\nfor tscale in np.logspace(TimeMinus, TimePlus, Ntimes):\n qq = 0\n print(tscale)\n for wavescale in np.logspace(WaveScaleMinus, WaveScalePlus, Nwavescale):\n rr = 0\n for StarMag in np.linspace(fluxMinus, fluxPlus, Nflux):\n Starfactor = 10**(-StarMag/2.5)\n tmp0 = req_closedloop_calc_batch(G_coron_LO, G_LOWFS, E0_coron, E0_LOWFS, dark_current+CIC/tscale,\n dark_current+CIC/tscale, tscale, flux*Starfactor, wavescale**2*QLO,\n niter, luvoir.dh_mask, norm)\n tmp1 = tmp0['averaged_hist']\n n_tmp1 = len(tmp1)\n res[pp,qq,rr] = np.mean(tmp1[np.int(n_tmp1/2):n_tmp1]) - contrast_floor\n rr = rr + 1\n qq = qq + 1\n pp = pp + 1", "_____no_output_____" ], [ "res_line = np.reshape(res, [Ntimes*Nwavescale*Nflux])\ntext_files_name = overall_dir + '/LO_LOWFS_Batch_dark_' + np.str(dark_current) + '_CIC_' + np.str(CIC) + '.csv'\nnp.savetxt(text_files_name, res_line, delimiter=\",\")\n\ntimer2 = time.time()\nprint(timer2 - timer1)\n\n\nprint('LO modes with recursive LOWFS and noise')\n\ntimer1 = time.time()\n\n\nres = np.zeros([Ntimes, Nwavescale, Nflux, 1])\npp = 0\nfor tscale in np.logspace(TimeMinus, TimePlus, Ntimes):\n qq = 0\n print(tscale)\n for wavescale in np.logspace(WaveScaleMinus, WaveScalePlus, Nwavescale):\n rr = 0\n for StarMag in np.linspace(fluxMinus, fluxPlus, Nflux):\n Starfactor = 10**(-StarMag/2.5)\n tmp0 = req_closedloop_calc_recursive(G_coron_LO, G_LOWFS, E0_coron, E0_LOWFS, dark_current+CIC/tscale,\n dark_current+CIC/tscale, tscale, flux*Starfactor, wavescale**2*QLO,\n niter, luvoir.dh_mask,norm)\n tmp1 = tmp0['averaged_hist']\n n_tmp1 = len(tmp1)\n res[pp,qq,rr] = np.mean(tmp1[np.int(n_tmp1/2):n_tmp1]) - contrast_floor\n rr = rr + 1\n qq = qq + 1\n pp = pp + 1", "_____no_output_____" ], [ "res_line = np.reshape(res,[Ntimes*Nwavescale*Nflux])\ntext_files_name = os.path.join(overall_dir, f'LO_LOWFS_Recursive_dark_{dark_current}_CIC_{CIC}.csv')\nnp.savetxt(text_files_name, res_line, delimiter=\",\")\n\ntimer2 = time.time()\nprint(timer2 - timer1)\n\n\nprint('LO modes with batch Nyquist DH and noise')", "_____no_output_____" ], [ "timer1 = time.time()\n\n\nres = np.zeros([Ntimes,Nwavescale,Nflux,1])\npp = 0\nfor tscale in np.logspace(TimeMinus, TimePlus, Ntimes):\n qq = 0\n print(tscale)\n for wavescale in np.logspace(WaveScaleMinus, WaveScalePlus, Nwavescale):\n rr = 0\n for StarMag in np.linspace(fluxMinus,fluxPlus,Nflux):\n Starfactor = 10**(-StarMag/2.5)\n tmp0 = req_closedloop_calc_batch(G_coron_LO, G_coron_LO_DH_nyquist, E0_coron, E0_coron_DH_nyquist,\n dark_current+CIC/tscale, dark_current+CIC/tscale, tscale,\n flux*Starfactor, wavescale**2*QLO, niter, luvoir.dh_mask, norm)\n tmp1 = tmp0['averaged_hist']\n n_tmp1 = len(tmp1)\n res[pp,qq,rr] = np.mean(tmp1[np.int(n_tmp1/2):n_tmp1]) - contrast_floor\n rr = rr + 1\n qq = qq + 1\n pp = pp + 1", "_____no_output_____" ], [ "res_line = np.reshape(res,[Ntimes*Nwavescale*Nflux])\ntext_files_name = os.path.join(overall_dir, f'LO_DH_Batch_dark_{dark_current}_CIC_{CIC}.csv')\nnp.savetxt(text_files_name, res_line, delimiter=\",\")\n\ntimer2 = time.time()\nprint(timer2 - timer1)\n\nprint('LO modes with recursive Nyquist DH and noise')\n\ntimer1 = time.time()", "_____no_output_____" ], [ "res = np.zeros([Ntimes, Nwavescale, Nflux, 1])\npp = 0\nfor tscale in np.logspace(TimeMinus, TimePlus, Ntimes):\n qq = 0\n print(tscale)\n for wavescale in np.logspace(WaveScaleMinus, WaveScalePlus, Nwavescale):\n rr = 0\n for StarMag in np.linspace(fluxMinus,fluxPlus,Nflux):\n Starfactor = 10**(-StarMag/2.5)\n tmp0 = req_closedloop_calc_recursive(G_coron_LO, G_coron_LO_DH_nyquist, E0_coron, E0_coron_DH_nyquist,\n dark_current+CIC/tscale, dark_current+CIC/tscale, tscale,\n flux*Starfactor, wavescale**2*QLO, niter, luvoir.dh_mask, norm)\n tmp1 = tmp0['averaged_hist']\n n_tmp1 = len(tmp1)\n res[pp,qq,rr] = np.mean(tmp1[np.int(n_tmp1/2):n_tmp1]) - contrast_floor\n rr = rr + 1\n qq = qq + 1\n pp = pp + 1", "_____no_output_____" ] ], [ [ "# takes a long time to run", "_____no_output_____" ] ], [ [ "res_line = np.reshape(res,[Ntimes*Nwavescale*Nflux])\ntext_files_name = os.path.join(overall_dir, f'LO_DH_Recursive_dark_{dark_current}_CIC_{CIC}.csv')\nnp.savetxt(text_files_name, res_line, delimiter=\",\")\n\ntimer2 = time.time()\nprint(timer2 - timer1)\n\nprint('MID modes with batch OBWFS and noise')\n\ntimer1 = time.time()\n\n\nres = np.zeros([Ntimes, Nwavescale, Nflux, 1])\npp = 0\nfor tscale in np.logspace(TimeMinus, TimePlus, Ntimes):\n qq = 0\n print(tscale)\n for wavescale in np.logspace(WaveScaleMinus, WaveScalePlus, Nwavescale):\n rr = 0\n for StarMag in np.linspace(fluxMinus, fluxPlus, Nflux):\n Starfactor = 10**(-StarMag/2.5)\n tmp0 = req_closedloop_calc_batch(G_coron_MID, G_OBWFS, E0_coron, E0_OBWFS, dark_current+CIC/tscale,\n dark_current+CIC/tscale, tscale, flux*Starfactor, wavescale**2*QMID,\n niter, luvoir.dh_mask, norm)\n tmp1 = tmp0['averaged_hist']\n n_tmp1 = len(tmp1)\n res[pp,qq,rr] = np.mean(tmp1[np.int(n_tmp1/2):n_tmp1]) - contrast_floor\n rr = rr + 1\n qq = qq + 1\n pp = pp + 1", "_____no_output_____" ], [ "res_line = np.reshape(res,[Ntimes*Nwavescale*Nflux])\ntext_files_name = os.path.join(overall_dir, f'MID_OBWFS_Batch_dark_{dark_current}_CIC_{CIC}.csv')\nnp.savetxt(text_files_name, res_line, delimiter=\",\")\n\ntimer2 = time.time()\nprint(timer2 - timer1)\n\n\nprint('MID modes with recursive OBWFS and noise')\n\ntimer1 = time.time()\n\n\nres = np.zeros([Ntimes, Nwavescale, Nflux, 1])\npp = 0\nfor tscale in np.logspace(TimeMinus, TimePlus, Ntimes):\n qq = 0\n print(tscale)\n for wavescale in np.logspace(WaveScaleMinus, WaveScalePlus, Nwavescale):\n rr = 0\n for StarMag in np.linspace(fluxMinus,fluxPlus,Nflux):\n Starfactor = 10**(-StarMag/2.5)\n tmp0 = req_closedloop_calc_recursive(G_coron_MID, G_OBWFS, E0_coron, E0_OBWFS, dark_current+CIC/tscale,\n dark_current+CIC/tscale, tscale, flux*Starfactor, wavescale**2*QMID,\n niter, luvoir.dh_mask, norm)\n tmp1 = tmp0['averaged_hist']\n n_tmp1 = len(tmp1)\n res[pp,qq,rr] = np.mean(tmp1[np.int(n_tmp1/2):n_tmp1]) - contrast_floor\n rr = rr + 1\n qq = qq + 1\n pp = pp + 1", "_____no_output_____" ], [ "res_line = np.reshape(res,[Ntimes*Nwavescale*Nflux])\ntext_files_name = os.path.join(overall_dir, f'MID_OBWFS_Recursive_dark_{dark_current}_CIC_{CIC}.csv')\nnp.savetxt(text_files_name, res_line, delimiter=\",\")\n\ntimer2 = time.time()\nprint(timer2 - timer1)\n\n\nprint('MID modes with batch Nyquist DH and noise')\n\ntimer1 = time.time()\n\n\nres = np.zeros([Ntimes, Nwavescale, Nflux, 1])\npp = 0\nfor tscale in np.logspace(TimeMinus, TimePlus, Ntimes):\n qq = 0\n print(tscale)\n for wavescale in np.logspace(WaveScaleMinus, WaveScalePlus, Nwavescale):\n rr = 0\n for StarMag in np.linspace(fluxMinus, fluxPlus, Nflux):\n Starfactor = 10**(-StarMag/2.5)\n tmp0 = req_closedloop_calc_batch(G_coron_MID, G_coron_MID_DH_nyquist, E0_coron, E0_coron_DH_nyquist, dark_current+CIC/tscale, dark_current+CIC/tscale, tscale, flux*Starfactor, wavescale**2*QMID, niter, luvoir.dh_mask, norm)\n tmp1 = tmp0['averaged_hist']\n n_tmp1 = len(tmp1)\n res[pp,qq,rr] = np.mean(tmp1[np.int(n_tmp1/2):n_tmp1]) - contrast_floor\n rr = rr + 1\n qq = qq + 1\n pp = pp + 1\n", "_____no_output_____" ], [ "res_line = np.reshape(res,[Ntimes*Nwavescale*Nflux])\ntext_files_name = os.path.join(overall_dir, f'MID_DH_Batch_dark_{dark_current}_CIC_{CIC}.csv')\nnp.savetxt(text_files_name, res_line, delimiter=\",\")\n\ntimer2 = time.time()\nprint(timer2 - timer1)\n\nprint('MID modes with recursive Nyquist DH and noise')\n\ntimer1 = time.time()\n\n\nres = np.zeros([Ntimes, Nwavescale, Nflux, 1])\npp = 0\nfor tscale in np.logspace(TimeMinus, TimePlus, Ntimes):\n qq = 0\n print(tscale)\n for wavescale in np.logspace(WaveScaleMinus, WaveScalePlus, Nwavescale):\n rr = 0\n for StarMag in np.linspace(fluxMinus, fluxPlus, Nflux):\n Starfactor = 10**(-StarMag/2.5)\n tmp0 = req_closedloop_calc_recursive(G_coron_MID, G_coron_MID_DH_nyquist, E0_coron, E0_coron_DH_nyquist, dark_current+CIC/tscale, dark_current+CIC/tscale, tscale, flux*Starfactor, wavescale**2*QMID, niter, luvoir.dh_mask, norm)\n tmp1 = tmp0['averaged_hist']\n n_tmp1 = len(tmp1)\n res[pp,qq,rr] = np.mean(tmp1[np.int(n_tmp1/2):n_tmp1]) - contrast_floor\n rr = rr + 1\n qq = qq + 1\n pp = pp + 1\n", "_____no_output_____" ], [ "res_line = np.reshape(res,[Ntimes*Nwavescale*Nflux])\ntext_files_name = os.path.join(overall_dir, f'MID_DH_Recursive_dark_{dark_current}_CIC_{CIC}.csv')\nnp.savetxt(text_files_name, res_line, delimiter=\",\")\n\ntimer2 = time.time()\nprint(timer2 - timer1)\n\nprint('HI modes with batch Nyquist DH and noise')\n\ntimer1 = time.time()\n\n\nres = np.zeros([Ntimes, Nwavescale, Nflux, 1])\npp = 0\nfor tscale in np.logspace(TimeMinus, TimePlus, Ntimes):\n qq = 0\n print(tscale)\n for wavescale in np.logspace(WaveScaleMinus, WaveScalePlus Nwavescale):\n rr = 0\n for StarMag in np.linspace(fluxMinus, fluxPlus, Nflux):\n Starfactor = 10**(-StarMag/2.5)\n tmp0 = req_closedloop_calc_batch(G_coron_HI, G_coron_HI_DH_nyquist, E0_coron, E0_coron_DH_nyquist, dark_current+CIC/tscale, dark_current+CIC/tscale, tscale, flux*Starfactor, wavescale**2*QHI, niter, luvoir.dh_mask, norm)\n tmp1 = tmp0['averaged_hist']\n n_tmp1 = len(tmp1)\n res[pp,qq,rr] = np.mean(tmp1[np.int(n_tmp1/2):n_tmp1]) - contrast_floor\n rr = rr + 1\n qq = qq + 1\n pp = pp + 1\n", "_____no_output_____" ], [ "res_line = np.reshape(res,[Ntimes*Nwavescale*Nflux])\ntext_files_name = os.path.join(overall_dir, f'HI_DH_Batch_dark_{dark_current}_CIC_{CIC}.csv')\nnp.savetxt(text_files_name, res_line, delimiter=\",\")\n\ntimer2 = time.time()\nprint(timer2 - timer1)\n\nprint('HI modes with recursive Nyquist DH and noise')\n\ntimer1 = time.time()\n\n\nres = np.zeros([Ntimes, Nwavescale, Nflux, 1])\npp = 0\nfor tscale in np.logspace(TimeMinus, TimePlus, Ntimes):\n qq = 0\n print(tscale)\n for wavescale in np.logspace(WaveScaleMinus, WaveScalePlus, Nwavescale):\n rr = 0\n for StarMag in np.linspace(fluxMinus,fluxPlus,Nflux):\n Starfactor = 10**(-StarMag/2.5)\n tmp0 = req_closedloop_calc_recursive(G_coron_HI, G_coron_HI_DH_nyquist, E0_coron, E0_coron_DH_nyquist,\n dark_current+CIC/tscale, dark_current+CIC/tscale, tscale,\n flux*Starfactor, wavescale**2*QHI, niter, luvoir.dh_mask, norm)\n tmp1 = tmp0['averaged_hist']\n n_tmp1 = len(tmp1)\n res[pp,qq,rr] = np.mean(tmp1[np.int(n_tmp1/2):n_tmp1]) - contrast_floor\n rr = rr + 1\n qq = qq + 1\n pp = pp + 1", "_____no_output_____" ], [ "res_line = np.reshape(res,[Ntimes*Nwavescale*Nflux])\ntext_files_name = os.path.join(overall_dir, f'HI_DH_Recursive_dark_{dark_current}_CIC_{CIC}.csv')\nnp.savetxt(text_files_name, res_line, delimiter=\",\")\n\ntimer2 = time.time()\nprint(timer2 - timer1)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
e719375b78fc7539dce8863d2550185b6ec7f3da
26,171
ipynb
Jupyter Notebook
guided_project_large_data_handling/Using_Sqlite_Pandas_on_Large_Data.ipynb
Amyylam/sentimentscale
c736fd0a2fcfeb84da576bf4bc5b0e6aedc6d6eb
[ "MIT" ]
2
2021-02-24T00:17:01.000Z
2021-04-02T03:23:10.000Z
guided_project_large_data_handling/Using_Sqlite_Pandas_on_Large_Data.ipynb
kumar-abhishek/sentimentscale
c736fd0a2fcfeb84da576bf4bc5b0e6aedc6d6eb
[ "MIT" ]
null
null
null
guided_project_large_data_handling/Using_Sqlite_Pandas_on_Large_Data.ipynb
kumar-abhishek/sentimentscale
c736fd0a2fcfeb84da576bf4bc5b0e6aedc6d6eb
[ "MIT" ]
2
2021-04-02T03:26:35.000Z
2021-06-01T05:25:57.000Z
26.302513
416
0.460777
[ [ [ "## Guided project under course Using Sqlite and Pandas on Large Data\n\n- Analyzing Startup Fundraising Deals from Crunchbase \n- dataquest.com Course Mission 167", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "fundraising_iter = pd.read_csv('crunchbase-investments.csv',chunksize=5000,encoding='latin-1')\n\ntotal_mem_fp = 0\nmissing_dicts = {}\nfor chunk in fundraising_iter:\n #print(chunk.columns)\n #missing value counts of each column\n #print(chunk.isnull().sum().to_dict())\n missing_dicts.update(chunk.isnull().sum().to_dict())\n #mem footprint\n print(chunk.memory_usage(deep=True).sum())\n total_mem_fp += chunk.memory_usage(deep=True).sum()\n # each chunk consumes about 6mb in memory\n ", "5850258\n5796772\n5803921\n5796739\n5792696\n5823207\n5800132\n5777296\n5658251\n4864866\n2793107\n" ], [ "missing_dicts.keys()", "_____no_output_____" ], [ "no_missing_cols =[k for k,v in missing_dicts.items() if v == 0]\nno_missing_cols", "_____no_output_____" ], [ "total_mem_fp", "_____no_output_____" ], [ "#which columns to drop?\nchunk.describe()", "_____no_output_____" ], [ "test_df = pd.read_csv('crunchbase-investments.csv',nrows=10,encoding='latin-1')", "_____no_output_____" ], [ "test_df.isnull().sum()", "_____no_output_____" ], [ "test_df['investor_category_code'].value_counts() #only 1 value -- finance", "_____no_output_____" ], [ "test_df['investor_country_code'].value_counts()", "_____no_output_____" ], [ "##based on test_df, these 3 columns should be dropped\nfor col in test_df.columns:\n if test_df[col].nunique() == 1:\n print(col)", "company_country_code\ninvestor_category_code\ninvestor_country_code\n" ], [ "drop_cols = ['company_country_code',\n'investor_category_code','investor_country_code']", "_____no_output_____" ], [ "keep_cols = test_df.columns.drop(drop_cols)", "_____no_output_____" ], [ "keep_cols", "_____no_output_____" ], [ "#check column dtypes\ntest_df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 10 entries, 0 to 9\nData columns (total 20 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 company_permalink 10 non-null object\n 1 company_name 10 non-null object\n 2 company_category_code 10 non-null object\n 3 company_country_code 10 non-null object\n 4 company_state_code 9 non-null object\n 5 company_region 10 non-null object\n 6 company_city 9 non-null object\n 7 investor_permalink 10 non-null object\n 8 investor_name 10 non-null object\n 9 investor_category_code 7 non-null object\n 10 investor_country_code 8 non-null object\n 11 investor_state_code 8 non-null object\n 12 investor_region 10 non-null object\n 13 investor_city 8 non-null object\n 14 funding_round_type 10 non-null object\n 15 funded_at 10 non-null object\n 16 funded_month 10 non-null object\n 17 funded_quarter 10 non-null object\n 18 funded_year 10 non-null int64 \n 19 raised_amount_usd 10 non-null int64 \ndtypes: int64(2), object(18)\nmemory usage: 1.7+ KB\n" ], [ "#Identify the numeric columns we can represent using more space efficient types.\n\ntest_df.select_dtypes(include=['float','integer'])", "_____no_output_____" ], [ "int_cols = test_df.select_dtypes(include=['float','integer']).columns", "_____no_output_____" ], [ "int_cols", "_____no_output_____" ], [ "# downcast from int64 to int16\n#test_df['funded_year'] = pd.to_numeric(test_df['funded_year'],downcast='integer')", "_____no_output_____" ], [ "# downcast from int64 to int32 possible, maybe not across all chunks\ntest_df['raised_amount_usd'] = pd.to_numeric(test_df['raised_amount_usd'],downcast='integer')", "_____no_output_____" ], [ "test_df['raised_amount_usd']", "_____no_output_____" ], [ "text_cols = test_df.select_dtypes(include=['object']).columns\ntext_cols", "_____no_output_____" ], [ "# For text columns:\n# Analyze the unique value counts across all of the chunks to see if we can convert them to a numeric type.\n\nnuniques_dict = test_df.nunique().to_dict()\n#turn some into category dtype\nfor k,v in nuniques_dict.items():\n if v/len(test_df[k]) < 0.5:\n test_df[k] = test_df[k].astype('category')\n\n", "_____no_output_____" ], [ "#11 objects columns turned into space-efficient dtype 'category'\ncat_columns = test_df.select_dtypes(include=['category']).columns", "_____no_output_____" ], [ "# for col in text_cols:\n# print(test_df[col].value_counts())", "_____no_output_____" ], [ "no_missing_cols", "_____no_output_____" ], [ "dtype_dict = {t:'category' for t in cat_columns if t in no_missing_cols}\n\ndtype_dict", "_____no_output_____" ], [ "# Make changes to the code from loading csv so that the overall memory the data consumes stays under 10 megabytes.\n", "_____no_output_____" ], [ "test_df.columns[18] # col19 'raised_amount_usd' cannot be \"int32\" as it has NA\n# col18 'raised_year' cannot be \"int16\" as it has NA", "_____no_output_____" ], [ "#use only keep_cols\nchunk_iter = pd.read_csv('crunchbase-investments.csv',encoding='latin-1',dtype=dtype_dict,chunksize=5000,usecols=keep_cols)\n\nfor chunk in chunk_iter:\n print(chunk.memory_usage(deep=True).sum()/(2**20)) \n#each chunk now consumes around 4.3 mb in memory-- can double up chunksize", "3.773927688598633\n3.643202781677246\n3.647123336791992\n3.611940383911133\n3.6208715438842773\n3.6335649490356445\n3.646841049194336\n3.633734703063965\n3.658021926879883\n3.45751953125\n1.971994400024414\n" ], [ "## best chunksize is 13000 -- so that the overall memory the data consumes stays under 10 megabytes.\nchunk_iter = pd.read_csv('crunchbase-investments.csv',encoding='latin-1',dtype=dtype_dict,chunksize=13000,usecols=keep_cols)\n\nfor chunk in chunk_iter:\n print(chunk.memory_usage(deep=True).sum()/(2**20)) ", "9.607172966003418\n9.379837989807129\n9.41344165802002\n9.562762260437012\n0.6198701858520508\n" ], [ "# next step is to load each chunk into a table in a SQLite database so we can query the full data set.", "_____no_output_____" ], [ "import sqlite3\nconn = sqlite3.connect('fundraising.db')", "_____no_output_____" ], [ "chunk_iter = pd.read_csv('crunchbase-investments.csv',encoding='latin-1',dtype=dtype_dict,chunksize=13000)\n\nfor chunk in chunk_iter:\n #print(chunk.memory_usage(deep=True).sum()/(2**20)) \n chunk.to_sql('fundraising',conn,if_exists='append',index=False)\n \nq = 'PRAGMA table_info(fundraising)' # Query the table and make sure the data types match up to what you had in mind for each column.\npd.read_sql(q,conn)", "_____no_output_____" ], [ "q = 'SELECT * FROM fundraising'\nsql_iter = pd.read_sql(q,conn,chunksize=500)", "_____no_output_____" ], [ "for chunk in sql_iter:\n print(chunk.head(5))\n break", "_____no_output_____" ], [ "next(sql_iter)", "_____no_output_____" ], [ "!wc -l 'fundraising.db' \n#no. of lines", "_____no_output_____" ], [ "#!wc --help\n!wc -c 'fundraising.db' \n# byte counts", "_____no_output_____" ], [ "10878976/(2**20) # file size of the database is 10.4mb", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e71938005fda505557b336768f2993cd651bd6d8
29,433
ipynb
Jupyter Notebook
CS246_Colab_0_(Spark_Tutorial).ipynb
Sundragon1993/Mining-Massive-Data-Sets-CS246
66c7a4007c22866a0302ec1ef20caaf2c9c9b1fe
[ "MIT" ]
null
null
null
CS246_Colab_0_(Spark_Tutorial).ipynb
Sundragon1993/Mining-Massive-Data-Sets-CS246
66c7a4007c22866a0302ec1ef20caaf2c9c9b1fe
[ "MIT" ]
null
null
null
CS246_Colab_0_(Spark_Tutorial).ipynb
Sundragon1993/Mining-Massive-Data-Sets-CS246
66c7a4007c22866a0302ec1ef20caaf2c9c9b1fe
[ "MIT" ]
null
null
null
27.898578
396
0.507186
[ [ [ "<a href=\"https://colab.research.google.com/github/Sundragon1993/Mining-Massive-Data-Sets-CS246/blob/main/CS246_Colab_0_(Spark_Tutorial).ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# CS246 - Colab 0\n## Spark Tutorial\n\nIn this tutorial you will learn how to use [Apache Spark](https://spark.apache.org) in local mode on a Colab enviroment.\n\nCredits to [Tiziano Piccardi](http://piccardi.me/) for his Spark Tutorial used in the Applied Data Analysis class at EPFL.", "_____no_output_____" ], [ "### Setup", "_____no_output_____" ], [ "Let's setup Spark on your Colab environment. Run the cell below!", "_____no_output_____" ] ], [ [ "!pip install pyspark\n!pip install -U -q PyDrive\n!apt install openjdk-8-jdk-headless -qq\nimport os\nos.environ[\"JAVA_HOME\"] = \"/usr/lib/jvm/java-8-openjdk-amd64\"", "_____no_output_____" ] ], [ [ "Now we authenticate a Google Drive client to download the file we will be processing in our Spark job.\n\n**Make sure to follow the interactive instructions.**", "_____no_output_____" ] ], [ [ "from pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive\nfrom google.colab import auth\nfrom oauth2client.client import GoogleCredentials\n\n# Authenticate and create the PyDrive client\nauth.authenticate_user()\ngauth = GoogleAuth()\ngauth.credentials = GoogleCredentials.get_application_default()\ndrive = GoogleDrive(gauth)", "_____no_output_____" ], [ "id='1L6pCQkldvdBoaEhRFzL0VnrggEFvqON4'\ndownloaded = drive.CreateFile({'id': id}) \ndownloaded.GetContentFile('Bombing_Operations.json.gz')\n\nid='14dyBmcTBA32uXPxDbqr0bFDIzGxMTWwl'\ndownloaded = drive.CreateFile({'id': id}) \ndownloaded.GetContentFile('Aircraft_Glossary.json.gz') ", "_____no_output_____" ] ], [ [ "If you executed the cells above, you should be able to see the files *Bombing_Operations.json.gz* and *Aircraft_Glossary.json.gz* under the \"Files\" tab on the left panel.", "_____no_output_____" ] ], [ [ "# Let's import the libraries we will need\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport pyspark\nfrom pyspark.sql import *\nfrom pyspark.sql.functions import *\nfrom pyspark import SparkContext, SparkConf", "_____no_output_____" ] ], [ [ "Let's initialize the Spark context.\n", "_____no_output_____" ] ], [ [ "# create the session\nconf = SparkConf().set(\"spark.ui.port\", \"4050\")\n\n# create the context\nsc = pyspark.SparkContext(conf=conf)\nspark = SparkSession.builder.getOrCreate()", "_____no_output_____" ] ], [ [ "You can easily check the current version and get the link of the web interface. In the Spark UI, you can monitor the progress of your job and debug the performance bottlenecks (if your Colab is running with a **local runtime**).", "_____no_output_____" ] ], [ [ "spark", "_____no_output_____" ] ], [ [ "If you are running this Colab on the Google hosted runtime, the cell below will create a *ngrok* tunnel which will allow you to still check the Spark UI.", "_____no_output_____" ] ], [ [ "!wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip\n!unzip ngrok-stable-linux-amd64.zip\nget_ipython().system_raw('./ngrok http 4050 &')\n!curl -s http://localhost:4040/api/tunnels | python3 -c \\\n \"import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'])\"", "_____no_output_____" ] ], [ [ "# Vietnam War\n\n**Pres. Johnson**: _What do you think about this Vietnam thing? I’d like to hear you talk a little bit._\n\n**Sen. Russell**: _Well, frankly, Mr. President, it’s the damn worse mess that I ever saw, and I don’t like to brag and I never have been right many times in my life, but I knew that we were going to get into this sort of mess when we went in there._\n\nMay 27, 1964\n\n![banner](https://raw.githubusercontent.com/epfl-ada/2019/c17af0d3c73f11cb083717b7408fedd86245dc4d/Tutorials/04%20-%20Scaling%20Up/img/banner.jpg)", "_____no_output_____" ], [ "----\n\nThe Vietnam War, also known as the Second Indochina War, and in Vietnam as the Resistance War Against America or simply the American War, was a conflict that occurred in Vietnam, Laos, and Cambodia from 1 November 1955 to the fall of Saigon on 30 April 1975. It was the second of the Indochina Wars and was officially fought between North Vietnam and the government of South Vietnam.\n\n**The dataset describes all the air force operation in during the Vietnam War.**\n\n**Bombing_Operations** [Get the dataset here](https://drive.google.com/a/epfl.ch/file/d/1L6pCQkldvdBoaEhRFzL0VnrggEFvqON4/view?usp=sharing)\n\n- AirCraft: _Aircraft model (example: EC-47)_\n- ContryFlyingMission: _Country_\n- MissionDate: _Date of the mission_\n- OperationSupported: _Supported War operation_ (example: [Operation Rolling Thunder](https://en.wikipedia.org/wiki/Operation_Rolling_Thunder))\n- PeriodOfDay: _Day or night_\n- TakeoffLocation: _Take off airport_\n- TimeOnTarget\n- WeaponType\n- WeaponsLoadedWeight\n\n**Aircraft_Glossary** [Get the dataset here](https://drive.google.com/a/epfl.ch/file/d/14dyBmcTBA32uXPxDbqr0bFDIzGxMTWwl/view?usp=sharing)\n\n- AirCraft: _Aircraft model (example: EC-47)_\n- AirCraftName\n- AirCraftType\n\n**Dataset Information:**\n\nTHOR is a painstakingly cultivated database of historic aerial bombings from World War I through Vietnam. THOR has already proven useful in finding unexploded ordnance in Southeast Asia and improving Air Force combat tactics:\nhttps://www.kaggle.com/usaf/vietnam-war-bombing-operations", "_____no_output_____" ], [ "Load the datasets:", "_____no_output_____" ] ], [ [ "Bombing_Operations = spark.read.json(\"Bombing_Operations.json.gz\")\nAircraft_Glossary = spark.read.json(\"Aircraft_Glossary.json.gz\")", "_____no_output_____" ] ], [ [ "Check the schema:", "_____no_output_____" ] ], [ [ "Bombing_Operations.printSchema()", "_____no_output_____" ], [ "Aircraft_Glossary.printSchema()", "_____no_output_____" ] ], [ [ "Get a sample with `take()`:", "_____no_output_____" ] ], [ [ "Bombing_Operations.take(3)", "_____no_output_____" ] ], [ [ "Get a formatted sample with `show()`:", "_____no_output_____" ] ], [ [ "Aircraft_Glossary.show()", "_____no_output_____" ], [ "print(\"In total there are {0} operations\".format(Bombing_Operations.count()))", "_____no_output_____" ] ], [ [ "## Question 1: Which countries are involved and in how many missions? \n\nKeywords: `Dataframe API`, `SQL`, `group by`, `sort`", "_____no_output_____" ], [ "Let's group the missions by `ContryFlyingMission` and count how many records exist:", "_____no_output_____" ] ], [ [ "missions_counts = Bombing_Operations.groupBy(\"ContryFlyingMission\")\\\n .agg(count(\"*\").alias(\"MissionsCount\"))\\\n .sort(desc(\"MissionsCount\"))\nmissions_counts.show()", "_____no_output_____" ] ], [ [ "In this case we used the DataFrame API, but we could rewite the `groupBy` using pure SQL:", "_____no_output_____" ] ], [ [ "Bombing_Operations.registerTempTable(\"Bombing_Operations\")\n\nquery = \"\"\"\nSELECT ContryFlyingMission, count(*) as MissionsCount\nFROM Bombing_Operations\nGROUP BY ContryFlyingMission\nORDER BY MissionsCount DESC\n\"\"\"\n\nmissions_counts = spark.sql(query)\nmissions_counts.show()", "_____no_output_____" ] ], [ [ "The Dataframe is small enough to be moved to Pandas:", "_____no_output_____" ] ], [ [ "missions_count_pd = missions_counts.toPandas()\nmissions_count_pd.head()", "_____no_output_____" ] ], [ [ "Let's plot a barchart with the number of missions by country:", "_____no_output_____" ] ], [ [ "pl = missions_count_pd.plot(kind=\"bar\", \n x=\"ContryFlyingMission\", y=\"MissionsCount\", \n figsize=(10, 7), log=True, alpha=0.5, color=\"olive\")\npl.set_xlabel(\"Country\")\npl.set_ylabel(\"Number of Missions (Log scale)\")\npl.set_title(\"Number of missions by Country\")", "_____no_output_____" ] ], [ [ "----", "_____no_output_____" ], [ "## Questions 2: Show the number of missions in time for each of the countries involved.\n\nKeywords: `group by`, `parse date`, `plot`\n\nLet's select the relevant columns:", "_____no_output_____" ] ], [ [ "missions_countries = Bombing_Operations.selectExpr([\"to_date(MissionDate) as MissionDate\", \"ContryFlyingMission\"])\nmissions_countries", "_____no_output_____" ] ], [ [ "The filed MissionDate is converted to a Python `date` object.\n\nNow we can group by `MissionDate` and `ContryFlyingMission` to get the count:", "_____no_output_____" ] ], [ [ "missions_by_date = missions_countries\\\n .groupBy([\"MissionDate\", \"ContryFlyingMission\"])\\\n .agg(count(\"*\").alias(\"MissionsCount\"))\\\n .sort(asc(\"MissionDate\")).toPandas()\nmissions_by_date.head()", "_____no_output_____" ] ], [ [ "Now we can plot the content with a different series for each country:", "_____no_output_____" ] ], [ [ "fig = plt.figure(figsize=(10, 6))\n\n# iterate the different groups to create a different series\nfor country, missions in missions_by_date.groupby(\"ContryFlyingMission\"): \n plt.plot(missions[\"MissionDate\"], missions[\"MissionsCount\"], label=country)\n\nplt.legend(loc='best')", "_____no_output_____" ] ], [ [ "We can observe how South Vietnam increased its missions starting from 1970. The drop in 1973 is motivated by the [Paris Peace Accords](https://en.wikipedia.org/wiki/Paris_Peace_Accords) that took place on January 27th, 1973, to establish peace in Vietnam and end the war.", "_____no_output_____" ], [ "----", "_____no_output_____" ], [ "## Question 3: Who bombed this location?\n\nKeywords: `RDD map reduce` `cache` `save results`\n\n<img style=\"float: right;\" src=\"https://raw.githubusercontent.com/epfl-ada/2019/c17af0d3c73f11cb083717b7408fedd86245dc4d/Tutorials/04%20-%20Scaling%20Up/img/Hanoi_POL1966.jpg\">\n\nThis picture is the Hanoi POL facility (North Vietnam) burning after it was attacked by the U.S. Air Force on 29 June 1966 in the context of the Rolling Thunder operation. \n\nWe are interested in discovering what was the most common take-off location during that day.", "_____no_output_____" ] ], [ [ "jun_29_operations = Bombing_Operations.where(\"MissionDate = '1966-06-29' AND TargetCountry='NORTH VIETNAM'\")", "_____no_output_____" ] ], [ [ "Which coutries scheduled missions that day?", "_____no_output_____" ] ], [ [ "jun_29_operations.groupBy(\"ContryFlyingMission\").agg(count(\"*\").alias(\"MissionsCount\")).toPandas()", "_____no_output_____" ] ], [ [ "Most of the operation that day were performed by USA airplanes.", "_____no_output_____" ] ], [ [ "jun_29_operations.take(1)", "_____no_output_____" ] ], [ [ "You can specify to cache the content in memory:", "_____no_output_____" ] ], [ [ "jun_29_operations.cache()", "_____no_output_____" ] ], [ [ "Now you can count the number of rows and move the content to the cache:", "_____no_output_____" ] ], [ [ "%time jun_29_operations.count()", "_____no_output_____" ] ], [ [ "The second time the content is cached and the operation is much faster:", "_____no_output_____" ] ], [ [ "%time jun_29_operations.count()", "_____no_output_____" ] ], [ [ "You can also save the results on a file...", "_____no_output_____" ] ], [ [ "jun_29_operations.write.mode('overwrite').json(\"jun_29_operations.json\")", "_____no_output_____" ] ], [ [ "... and read from the file:", "_____no_output_____" ] ], [ [ "jun_29_operations = spark.read.json(\"jun_29_operations.json\")", "_____no_output_____" ] ], [ [ "We can use the simple DataFrame API...", "_____no_output_____" ] ], [ [ "TakeoffLocationCounts = jun_29_operations\\\n .groupBy(\"TakeoffLocation\").agg(count(\"*\").alias(\"MissionsCount\"))\\\n .sort(desc(\"MissionsCount\"))\nTakeoffLocationCounts.show()", "_____no_output_____" ] ], [ [ "... or the explicit Map/Reduce format with RDDs.\n\nFirst we emit a pair in the format (Location, 1):", "_____no_output_____" ] ], [ [ "all_locations = jun_29_operations.rdd.map(lambda row: (row.TakeoffLocation, 1))\nall_locations.take(3)", "_____no_output_____" ] ], [ [ "Then, we sum counters in the reduce step, and we sort by count:", "_____no_output_____" ] ], [ [ "locations_counts_rdd = all_locations.reduceByKey(lambda a, b: a+b).sortBy(lambda r: -r[1])\nlocations_counts_rdd.take(3)", "_____no_output_____" ] ], [ [ "Now we can convert the RDD in dataframe by mapping the pairs to objects of type `Row`", "_____no_output_____" ] ], [ [ "locations_counts_with_schema = locations_counts_rdd.map(lambda r: Row(TakeoffLocation=r[0], MissionsCount=r[1]))\nlocations_counts = spark.createDataFrame(locations_counts_with_schema)\nlocations_counts.show()", "_____no_output_____" ] ], [ [ "<img style=\"float: right;\" src=\"https://raw.githubusercontent.com/epfl-ada/2019/c17af0d3c73f11cb083717b7408fedd86245dc4d/Tutorials/04%20-%20Scaling%20Up/img/USS_Constellation.jpg\">\n\n\nThat day the most common take-off location was the ship USS Constellation (CV-64). We cannot univocally identify one take off location, but we can reduce the possible candidates. Next steps: explore TimeOnTarget feature.\n\n_USS Constellation (CV-64), a Kitty Hawk-class supercarrier, was the third ship of the United States Navy to be named in honor of the \"new constellation of stars\" on the flag of the United States. One of the fastest ships in the Navy, as proven by her victory during a battlegroup race held in 1985, she was nicknamed \"Connie\" by her crew and officially as \"America's Flagship\"._", "_____no_output_____" ], [ "----", "_____no_output_____" ], [ "## Questions 4: What is the most used aircraft type during the Vietnam war (number of missions)?\n\nKeywords: `join` `group by`", "_____no_output_____" ], [ "Let's check the content of `Aircraft_Glossary`:", "_____no_output_____" ] ], [ [ "Aircraft_Glossary.show(5)", "_____no_output_____" ] ], [ [ "We are interested in the filed `AirCraftType`.", "_____no_output_____" ] ], [ [ "Bombing_Operations.select(\"AirCraft\").show(5)", "_____no_output_____" ] ], [ [ "We can join on the column `AirCraft` of both dataframes.", "_____no_output_____" ], [ "With Dataframe API:", "_____no_output_____" ] ], [ [ "missions_joined = Bombing_Operations.join(Aircraft_Glossary, \n Bombing_Operations.AirCraft == Aircraft_Glossary.AirCraft)\nmissions_joined", "_____no_output_____" ] ], [ [ "We can select only the field we are interested in:", "_____no_output_____" ] ], [ [ "missions_aircrafts = missions_joined.select(\"AirCraftType\")\nmissions_aircrafts.show(5)", "_____no_output_____" ] ], [ [ "And finally we can group by `AirCraftType` and count:", "_____no_output_____" ] ], [ [ "missions_aircrafts.groupBy(\"AirCraftType\").agg(count(\"*\").alias(\"MissionsCount\"))\\\n .sort(desc(\"MissionsCount\"))\\\n .show()", "_____no_output_____" ] ], [ [ "In alternative we can rewrite this in pure SQL:", "_____no_output_____" ] ], [ [ "Bombing_Operations.registerTempTable(\"Bombing_Operations\")\nAircraft_Glossary.registerTempTable(\"Aircraft_Glossary\")\n\nquery = \"\"\"\nSELECT AirCraftType, count(*) MissionsCount\nFROM Bombing_Operations bo\nJOIN Aircraft_Glossary ag\nON bo.AirCraft = ag.AirCraft\nGROUP BY AirCraftType\nORDER BY MissionsCount DESC\n\"\"\"\n\nspark.sql(query).show()", "_____no_output_____" ] ], [ [ "The aircrafts of type `Fighter Jet Bomber` participated in most of the missions in the Vietnam war.\n\nNote: This dataset would require further cleaning and normalization. See `Fighter Jet Bomber`, `Jet Fighter Bomber`, `Fighter bomber jet`", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e7193ff69583c320ed48952405b45b462542fda9
26,716
ipynb
Jupyter Notebook
class_materials/File_Writing_and_Dictionaries/2019/lab_exercise/lab5_exercises.ipynb
sklasfeld/python_bootcamp
3b48a70f1bfd620788f46d869485b55e5622222f
[ "MIT" ]
null
null
null
class_materials/File_Writing_and_Dictionaries/2019/lab_exercise/lab5_exercises.ipynb
sklasfeld/python_bootcamp
3b48a70f1bfd620788f46d869485b55e5622222f
[ "MIT" ]
8
2017-09-19T13:31:32.000Z
2020-09-20T15:59:46.000Z
class_materials/File_Writing_and_Dictionaries/2019/lab_exercise/lab5_exercises.ipynb
sklasfeld/python_bootcamp
3b48a70f1bfd620788f46d869485b55e5622222f
[ "MIT" ]
10
2017-09-18T17:58:44.000Z
2020-07-11T04:10:28.000Z
26.583085
456
0.559665
[ [ [ "# Programming Bootcamp 2018\n# Lesson 5 Exercises\n---", "_____no_output_____" ], [ "**Earning points (optional)**\n\nIf you would like to get points/feedback for your work, please **submit your notebook to Piazza**. To do this, follow these steps:\n\n1. Click \"New Post\"\n2. For \"Post Type\" select `Question`\n3. For \"Post to\" select `Individual Student(s)/Instructors(s)`\n4. Where is says \"Enter one or more names...\" type `Instructors`\n5. For \"Select Folder(s)\" select `lab5`\n6. In \"Summary\" type in \"[Your Full Name] Lab5 Submission\"\n7. In the \"Details\" click \"Insert\" > \"Insert File\" and then insert your python notebook.\n8. You can then write whatever else you want in the Summary.\n9. Then click \"Post My Question to PROGRAMMING BOOTCAMP!\":\n\nPlease also **write your name below**. You do not need to complete all the problems to get points. Points will be assigned, but completion is all that actually matters. Those who consistenly participate throughout bootcamp will get a ~prize~.\n\n**Due Date:** Sunday 7/28", "_____no_output_____" ], [ "**Name**: ", "_____no_output_____" ], [ "---\n## 1. Guess the output: dictionary practice (1pt)\n\nFor the following blocks of code, first try to **guess what the output will be, and then run the code yourself**. Points will be given for filling in the guesses; guessing wrong won't be penalized. **If you expect an error, please explain why you expect that.**", "_____no_output_____" ] ], [ [ "# run this cell first!\nfruits = {\"apple\":\"red\", \"banana\":\"yellow\", \"grape\":\"purple\"}", "_____no_output_____" ], [ "print (fruits[\"banana\"])", "_____no_output_____" ] ], [ [ "Your guess: ", "_____no_output_____" ], [ " ", "_____no_output_____" ] ], [ [ "query = \"apple\"\nprint (fruits[query])", "_____no_output_____" ] ], [ [ "Your guess: ", "_____no_output_____" ], [ " ", "_____no_output_____" ] ], [ [ "print (fruits[0])", "_____no_output_____" ] ], [ [ "Your guess: ", "_____no_output_____" ], [ " ", "_____no_output_____" ] ], [ [ "print (list(fruits.keys()))", "_____no_output_____" ] ], [ [ "Your guess: ", "_____no_output_____" ], [ " ", "_____no_output_____" ] ], [ [ "print (list(fruits.values()))", "_____no_output_____" ] ], [ [ "Your guess: ", "_____no_output_____" ] ], [ [ "print (list(fruits.items()))", "_____no_output_____" ] ], [ [ "Your guess: ", "_____no_output_____" ], [ " ", "_____no_output_____" ] ], [ [ "for key in fruits:\n print (fruits[key])", "_____no_output_____" ] ], [ [ "Your guess: ", "_____no_output_____" ], [ " ", "_____no_output_____" ] ], [ [ "del fruits[\"banana\"]\nprint (fruits)", "_____no_output_____" ] ], [ [ "Your guess: ", "_____no_output_____" ], [ " ", "_____no_output_____" ] ], [ [ "print (fruits[\"pear\"]) ", "_____no_output_____" ] ], [ [ "Your guess: ", "_____no_output_____" ], [ " ", "_____no_output_____" ] ], [ [ "fruits[\"pear\"] = \"green\"\nprint (fruits[\"pear\"])", "_____no_output_____" ] ], [ [ "Your guess: ", "_____no_output_____" ], [ " ", "_____no_output_____" ] ], [ [ "fruits[\"apple\"] = fruits[\"apple\"] + \" or green\"\nprint (fruits[\"apple\"])", "_____no_output_____" ] ], [ [ "Your guess: ", "_____no_output_____" ], [ "> Since fruits[\"apple\"] contains a string, we can concatenate to that string just like we would normally. And if instead the value was a number, we could add to it. Basically, you can treat the \"dict[key]\" as a variable that holds whatever the value is. ", "_____no_output_____" ], [ "---\n## 2. On your own: using dictionaries (6pts)\n\nUsing the info in the table below, write code to accomplish the following tasks.\n\n| Name | Favorite Food |\n|:---------:|:-------------:|\n| Wilfred | Steak |\n| Manfred | French fries |\n| Wadsworth | Spaghetti |\n| Jeeves | Ice cream |\n", "_____no_output_____" ], [ "**(A)** Create a dictionary based on the data above, where each person's name is a key, and their favorite foods are the values.", "_____no_output_____" ], [ "**(B)** Using a `for` loop, go through the dictionary you created above and print each name and food combination in the format:\n\n <NAME>'s favorite food is <FOOD>", "_____no_output_____" ], [ "**(C)** (1) Change the dictionary so that Wilfred's favorite food is Pizza. (2) Add a new entry for Mitsworth, whose favorite food is Tuna. \n\nDo not recreate the whole dictionary while doing these things. Edit the dictionary you created in (A) using the syntax described in the lecture.", "_____no_output_____" ], [ "**(D)** Prompt the user to input a name. Check if the name they entered is a valid key in the dictionary using an `if` statement. If the name is in the dictionary, print out the corresponding favorite food. If not, print a message saying \"That name is not in our database\".", "_____no_output_____" ], [ "**(E)** Print just the names in the dictionary in alphabetical order. Use the sorting example from the slides.", "_____no_output_____" ], [ "**(F)** Print just the names in sorted order based on their favorite food. Use the value-sorting example from the slides.", "_____no_output_____" ], [ "---\n## 3. File writing (3pts)", "_____no_output_____" ], [ "**(A)** Write code that prints \"Hello, world\" to a file called `hello.txt`", "_____no_output_____" ], [ "**(B)** Write code that prints the following text to a file called `meow.txt`. It must be formatted exactly as it here (you will need to use \\n and \\t):\n```\nDear Mitsworth,\n \n Meow, meow meow meow.\n \nSincerely,\nA friend\n```", "_____no_output_____" ], [ "**(C)** Write code that reads in the gene IDs from `genes.txt` and prints the **unique** gene IDs to a **new file** called `genes_unique.txt`. (You can re-use your code or the answer sheet from lab4 for getting the unique IDs.)", "_____no_output_____" ], [ "---\n## 4. The \"many counters\" problem (4pts)", "_____no_output_____" ], [ "**(A)** Write code that reads a file of sequences and tallies how many sequences there are of each length. Use `sequences3.txt` as input. \n\n*Hint: you can use a dictionary to keep track of all the tallies. For example:*", "_____no_output_____" ] ], [ [ "# hint code:\n\ntallyDict = {}\nseq = \"ATGCTGATCGATATA\"\nlength = len(seq)\n\nif length not in tallyDict:\n tallyDict[length] = 1 #initialize to 1 if this is the first occurrence of the length...\nelse:\n tallyDict[length] = tallyDict[length] + 1 #...otherwise just increment the count.", "_____no_output_____" ] ], [ [ "**(B)** Using the tally dictionary you created above, figure out which sequence length was the most common, and print it to the screen.", "_____no_output_____" ], [ "---\n## 5. Codon table (6pts)\n\nFor this question, use `codon_table.txt`, which contains a list of all possible codons and their corresponding amino acids. We will be using this info to create a dictionary, which will allow us to translate a nucleotide sequence into amino acids. Each part of this question builds off the previous parts.", "_____no_output_____" ], [ "**(A)** Thinkin' question (short answer, not code): If we want to create a codon dictionary and use it to translate nucleotide sequences, would it be better to use the codons or amino acids as keys? ", "_____no_output_____" ], [ "Your answer:", "_____no_output_____" ], [ "**(B)** Read in `codon_table.txt` (note that it has a header line) and use it to create a codon dictionary. Then use `input()` to prompt the user to enter a single codon (e.g. ATG) and print the amino acid corresponding to that codon to the screen.", "_____no_output_____" ], [ "**(C)** Now we will adapt the code in (B) to translate a longer sequence. Instead of prompting the user for a single codon, allow them to enter a longer sequence. First, check that the sequence they entered has a length that is a multiple of 3 (Hint: use the mod operator, %), and print an error message if it is not. If it is valid, then go on to translate every three nucleotides to an amino acid. Print the final amino acid sequence to the screen.", "_____no_output_____" ], [ "**(D)** Now, instead of taking user input, you will apply your translator to a set of sequences stored in a file. Read in the sequences from `sequences3.txt` (assume each line is a separate sequence), translate it to amino acids, and print it to a new file called `proteins.txt`.", "_____no_output_____" ], [ "---\n## 6A. Data structure woes (2pt)\n\n**(A) Passing a data structure to a function.** Guess the output of the following lines of code if you were to run them immediately following the code block below. Then run the code yourself to see if you're right.", "_____no_output_____" ] ], [ [ "# run this first!\n\ndef getMax(someList):\n someList.sort()\n x = someList[-1]\n return x\n\nscores = [9, 5, 7, 1, 8]\nmaxScore = getMax(scores)", "_____no_output_____" ], [ "print(maxScore)", "_____no_output_____" ] ], [ [ "Your guess: ", "_____no_output_____" ] ], [ [ "print(someList)", "_____no_output_____" ] ], [ [ "Your guess: ", "_____no_output_____" ] ], [ [ "print(scores)", "_____no_output_____" ] ], [ [ "Your guess: ", "_____no_output_____" ], [ "> Why does scores get sorted? \n\n> When you pass a data structure as a parameter to a function, it's not a **copy** of the data structure that gets passed (as what happens with regular variables). What gets passed is a **direct reference** to the data structure itself. \n\n> The reason this is done is because data structures are typically expected to be fairly large, and copying/re-assigning the whole thing can be both time- and memory-consuming. So doing things this way is more efficient. It can also surprise you, though, if you're not aware it's happening. If you would like to learn more about this, look up \"Pass by reference vs pass by value\".", "_____no_output_____" ], [ "**(B) Copying data structures.** Guess the output of the following code if you were to run them immediately following the code block below. Then run the code yourself to see if you're right.", "_____no_output_____" ] ], [ [ "# run this first!\nlist1 = [1, 2, 3, 4]\nlist2 = list1\nlist2[0] = \"HELLO\"", "_____no_output_____" ], [ "print(list2)", "_____no_output_____" ] ], [ [ "Your guess: ", "_____no_output_____" ] ], [ [ "print(list1)", "_____no_output_____" ] ], [ [ "Your guess: ", "_____no_output_____" ], [ "> Yes, that's right--even when you try to make a new copy of a list, it's actually just a reference to the same list! This is called aliasing. The same thing will happen with a dictionary. This can really trip you up if you don't know it's happening.", "_____no_output_____" ], [ "So what if we want to make a truly separate copy? Here's a way for lists:", "_____no_output_____" ] ], [ [ "# for lists\nlist1 = [1, 2, 3, 4]\nlist2 = list(list1) #make a true copy of the list\nlist2[0] = \"HELLO\"\n\nprint(list2)\nprint(list1)", "_____no_output_____" ] ], [ [ "And here's a way for dictionaries:", "_____no_output_____" ] ], [ [ "# for dictionaries\ndict1 = {'A':1, 'B':2, 'C':3}\ndict2 = dict1.copy() #make a true copy of the dict\ndict2['A'] = 99\n\nprint(dict2)\nprint(dict1)", "_____no_output_____" ] ], [ [ "---\n## 7. Writing and Using Custom Function with Dictionaries (2 pt)", "_____no_output_____" ], [ "**(A)** (1pt) Create a function called \"reverse_compl\" that takes a single sequence as a parameter and returns the reverse complement. Note that we created this function previously in lab3, but now we can use a dictionary to shorten our code.", "_____no_output_____" ], [ "**(B)** (1pt) Create a function called \"read_fasta\" that takes a file name as a parameter (which is assumed to be in fasta format), puts each fasta entry into a dictionary (using the header line as a key and the sequence as a value), and then returns the dictionary.", "_____no_output_____" ], [ "**(C)** (1pt) Run the code below to show that your functions work. Try to fix any that have problems.", "_____no_output_____" ] ], [ [ "##### testing reverse_compl\nrevCompl = reverse_compl(\"GGGGTCGATGCAAATTCAAA\")\n\nif type(revCompl) != str:\n print (\">> Problem with reverse_compl: answer is not a string, it is a %s.\" % type(revCompl)) \nelif revCompl != \"TTTGAATTTGCATCGACCCC\":\n print (\">> Problem with reverse_compl: answer (%s) does not match expected (%s)\" % (revCompl, \"TTTGAATTTGCATCGACCCC\")) \nelse:\n print (\"reverse_compl: Passed.\")\n \n\n##### testing read_fasta\ntry:\n ins = open(\"horrible.fasta\", 'r')\nexcept IOError:\n print (\">> Can not test read_fasta because horrible.fasta is missing. Please add it to the directory with this notebook.\")\nelse:\n seqDict = read_fasta(\"horrible.fasta\")\n \n if type(seqDict) != dict:\n print (\">> Problem with read_fasta: answer is not a dictionary, it is a %s.\" % type(seqDict))\n elif len(seqDict) != 22:\n print (\">> Problem with read_fasta: # of keys in dictionary (%s) does not match expected (%s)\" % (len(seqDict), 22))\n else:\n print (\"read_fasta: Passed.\")", "_____no_output_____" ] ], [ [ "**(D)** (1pt) Read in `horrible.fasta` into a dictionary. Use **functions you created above** to print the reverse complement of each sequence to the screen.", "_____no_output_____" ], [ "**(E)** (3pts) Read in horrible.fasta into a dictionary. For each sequence, find the length and the gc content. GC content is the number of G's and C's in a DNA sequence divided by the total sequence length. Print the results to the screen in the following format:\n```\nSeqID Len GC\n... ... ...\n```\nThat is, print the header shown above (separating each column's title by a tab (`\\t`)), followed by the corresponding info about each sequence on a separate line. The \"columns\" should be separated by tabs. Remember that you can do this printing as you loop through the dictionary... that way you don't have to store the length and gc content.\n\n(In general, this is the sort of formatting you should use when printing data files!)\n\nFor this problem, you may want to use your `gc` function from Lab3. If you didn't finish those problems, feel free to use the code from the answer sheet, just make sure you understand why the code works! ", "_____no_output_____" ], [ "---\n\n---\n## Bonus question: Parsing fasta files (+2 bonus pts)\n\nThis question is optional, but if you complete it, I'll give you two bonus points. You won't lose points if you skip it.", "_____no_output_____" ], [ "Write code that reads sequences from a fasta file and stores them in a dictionary according to their header (i.e. use the header line as the key and sequence as the value). You will use `horrible.fasta` to test your code. \n\nIf you are not familiar with fasta files, they have the following general format:\n\n```\n>geneName1\nATCGCTAGTCGATCGATGGTTTCGCGTAGCGTTGCTAGCGTAGCTGATG\nTCGATCGATGGTTTCGCGTAGCGTTGCTAGCGTAGCTGATGATGCTCAA\nGCTGGATGGCTAGCTGATGCTAG\n>geneName2\nATCGATGGGCTGGATCGATGCGGCTCGGCGATCGA\n...\n```\n\nThere are many slight variations; for example the header often contains different information, depending where you got the file from, and the sequence for a given entry may span any number of lines. To write a good fasta parser, you must make as few assumptions about the formatting as possible. This will make your code more \"robust\". \n\nFor fasta files, pretty much the only things you can safely assume are that a new entry will be marked by the `>` sign, which is immediately followed by a (usually) unique header, and all sequence belonging to that entry will be located immediately below. However, you can't assume how many lines the sequence will take up.\n\nWith this in mind, write a robust fasta parser that reads in `horrible.fasta` and stores each sequence in a dictionary according to its header line. Call the dictionary `seqDict`. Remove any newline characters. Don't include the `>` sign in the header. *Hint: use string slicing or `.lstrip()`*", "_____no_output_____" ], [ "After you've written your code above and you think it works, run it and then run the following unit tests to to spot-check whether you did everything correctly. If you didn't name your dictionary `seqDict`, you'll need to change it below to whatever you named your dictionary. ", "_____no_output_____" ] ], [ [ "error = False\nif \">varlen2_uc001pmn.3_3476\" in seqDict:\n print (\"Remove > chars from headers!\")\n error = True\nelif \"varlen2_uc001pmn.3_3476\" not in seqDict:\n print (\"Something's wrong with your dictionary: missing keys\")\n error = True\nif \"varlen2_uc021qfk.1>2_1472\" not in seqDict:\n print (\"Only remove the > chars from the beginning of the header!\")\n error = True\nif len(seqDict[\"varlen2_uc009wph.3_423\"]) > 85:\n if \"\\n\" in seqDict[\"varlen2_uc009wph.3_423\"]:\n print (\"Remove newline chars from sequences\")\n error = True\n else:\n print (\"Length of sequences longer than expected for some reason\")\n error = True\nelif len(seqDict[\"varlen2_uc009wph.3_423\"]) < 85:\n print (\"Length of sequences shorter than expected for some reason\")\n error = True\n\nif error == False:\n print (\"Congrats, you passed all my tests!\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ] ]
e7194f8f99fa9d3f1b06687149da2626027624fe
56,199
ipynb
Jupyter Notebook
trash/smile-detection v2.ipynb
akshaypatel1811/smile-detection
268db6e963a10e2ee07963f2a55bc3555a45e0c9
[ "MIT" ]
null
null
null
trash/smile-detection v2.ipynb
akshaypatel1811/smile-detection
268db6e963a10e2ee07963f2a55bc3555a45e0c9
[ "MIT" ]
null
null
null
trash/smile-detection v2.ipynb
akshaypatel1811/smile-detection
268db6e963a10e2ee07963f2a55bc3555a45e0c9
[ "MIT" ]
null
null
null
71.318528
20,392
0.652609
[ [ [ "# Transfer Learning", "_____no_output_____" ] ], [ [ "from keras.applications import VGG16", "_____no_output_____" ], [ "# Load the convolutional base from imagenet\nconv_base = VGG16(weights='imagenet',\n include_top=False,\n input_shape=(64,64,3))", "_____no_output_____" ], [ "#conv_base.summary()", "_____no_output_____" ], [ "import os\nimport numpy as np\nfrom keras.preprocessing.image import ImageDataGenerator", "_____no_output_____" ], [ "# train_dir = 'images/training'\n# validation_dir = 'images/testing'\n\ntrain_dir = 'SMILE_Dataset/train/'\nvalidation_dir = 'SMILE_Dataset/test/'", "_____no_output_____" ], [ "datagen = ImageDataGenerator(rescale=1./255) # Rescale images to 0-1\nbatch_size = 32 # Number of batches to read from default 32", "_____no_output_____" ], [ "def extract_features(directory, sample_count):\n \"\"\"\n extract the features when processing images through the convolutional base. The features will be fed to \n classification neural network\n \"\"\"\n # Create emtpy array\n features = np.zeros(shape=(sample_count, 2, 2, 512))\n # Here I added the shape \n labels = np.zeros(shape=(sample_count,2))\n # define the generator that is going to grab 20 images at a time and loop infinitly.\n generator = datagen.flow_from_directory(\n directory,\n target_size=(64,64),\n batch_size=batch_size,\n class_mode='categorical'\n )\n # Usefull for mapping label names\n label_map = (generator.class_indices)\n i=0\n \n # Extract features\n for inputs_batch, labels_batch in generator:\n features_batch = conv_base.predict(inputs_batch)\n features[i * batch_size : (i + 1) * batch_size] = features_batch\n labels[i * batch_size : (i + 1) * batch_size] = labels_batch\n i += 1\n # Break because the generator will loop infinitly\n if i * batch_size >= sample_count:\n break\n return features, labels, label_map", "_____no_output_____" ], [ "total_training = 0\nfor folder in os.listdir(\"images/testing\"):\n total_training += len(os.listdir( os.path.join('images/testing',folder)))\n print(len(os.listdir( os.path.join('images/testing',folder))))\nprint(total_training)", "100\n100\n200\n" ], [ "train_features, train_labels, label_map = extract_features(train_dir, 320)\nvalidation_features, validation_labels, label_map = extract_features(validation_dir, 80)", "Found 320 images belonging to 2 classes.\nFound 80 images belonging to 2 classes.\n" ], [ "train_features.shape", "_____no_output_____" ], [ "train_features = np.reshape(train_features, (320, 2 * 2 * 512))\nvalidation_features = np.reshape(validation_features, (80, 2 * 2 * 512))", "_____no_output_____" ], [ "train_features.shape", "_____no_output_____" ], [ "label_map2 = label_map", "_____no_output_____" ] ], [ [ "WE have transferer learned, now lets take this and feed it as input to our classification model.\n", "_____no_output_____" ] ], [ [ "from keras import models\nfrom keras import layers\nfrom keras import optimizers", "_____no_output_____" ], [ "model = models.Sequential()\n\nmodel.add(layers.Dense(256, activation='relu', input_dim= 2 * 2 * 512))\nmodel.add(layers.Dropout(0.2))\nmodel.add(layers.Dense(128, activation='relu'))\nmodel.add(layers.Dropout(0.2))\nmodel.add(layers.Dense(64, activation='relu'))\nmodel.add(layers.Dropout(0.5))\nmodel.add(layers.Dense(2, activation='softmax'))\n\n\nmodel.compile(optimizer=optimizers.RMSprop(lr=2e-5),\n loss='binary_crossentropy',\n metrics=['acc'])\n\nhistory = model.fit(train_features, train_labels,\n epochs = 150,\n batch_size = 32,\n validation_data=(validation_features, validation_labels))", "WARNING:tensorflow:From c:\\users\\justin\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\tensorflow\\python\\ops\\math_grad.py:1250: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.where in 2.0, which has the same broadcast rule as np.where\nTrain on 320 samples, validate on 80 samples\nEpoch 1/150\n320/320 [==============================] - 0s 2ms/step - loss: 0.9090 - acc: 0.5312 - val_loss: 0.6782 - val_acc: 0.5000\nEpoch 2/150\n320/320 [==============================] - 0s 474us/step - loss: 0.8665 - acc: 0.5437 - val_loss: 0.6492 - val_acc: 0.5000\nEpoch 3/150\n320/320 [==============================] - 0s 439us/step - loss: 0.8548 - acc: 0.5312 - val_loss: 0.6285 - val_acc: 0.5375\nEpoch 4/150\n320/320 [==============================] - 0s 496us/step - loss: 0.7510 - acc: 0.5469 - val_loss: 0.6100 - val_acc: 0.6375\nEpoch 5/150\n320/320 [==============================] - 0s 411us/step - loss: 0.7656 - acc: 0.5719 - val_loss: 0.5955 - val_acc: 0.6500\nEpoch 6/150\n320/320 [==============================] - 0s 471us/step - loss: 0.7537 - acc: 0.6156 - val_loss: 0.5786 - val_acc: 0.7750\nEpoch 7/150\n320/320 [==============================] - 0s 446us/step - loss: 0.7273 - acc: 0.5750 - val_loss: 0.5622 - val_acc: 0.8125\nEpoch 8/150\n320/320 [==============================] - 0s 443us/step - loss: 0.7161 - acc: 0.6000 - val_loss: 0.5472 - val_acc: 0.8125\nEpoch 9/150\n320/320 [==============================] - 0s 452us/step - loss: 0.6653 - acc: 0.6219 - val_loss: 0.5329 - val_acc: 0.8000\nEpoch 10/150\n320/320 [==============================] - 0s 511us/step - loss: 0.6223 - acc: 0.6469 - val_loss: 0.5206 - val_acc: 0.8125\nEpoch 11/150\n320/320 [==============================] - 0s 446us/step - loss: 0.5877 - acc: 0.7156 - val_loss: 0.5092 - val_acc: 0.8125\nEpoch 12/150\n320/320 [==============================] - 0s 396us/step - loss: 0.5891 - acc: 0.6594 - val_loss: 0.4956 - val_acc: 0.8125\nEpoch 13/150\n320/320 [==============================] - 0s 408us/step - loss: 0.5918 - acc: 0.6781 - val_loss: 0.4826 - val_acc: 0.8125\nEpoch 14/150\n320/320 [==============================] - 0s 511us/step - loss: 0.5483 - acc: 0.7125 - val_loss: 0.4713 - val_acc: 0.8125\nEpoch 15/150\n320/320 [==============================] - 0s 424us/step - loss: 0.5508 - acc: 0.7094 - val_loss: 0.4622 - val_acc: 0.8125\nEpoch 16/150\n320/320 [==============================] - 0s 480us/step - loss: 0.5251 - acc: 0.7406 - val_loss: 0.4512 - val_acc: 0.8250\nEpoch 17/150\n320/320 [==============================] - 0s 499us/step - loss: 0.5083 - acc: 0.7656 - val_loss: 0.4431 - val_acc: 0.8125\nEpoch 18/150\n320/320 [==============================] - 0s 424us/step - loss: 0.4778 - acc: 0.7969 - val_loss: 0.4329 - val_acc: 0.8250\nEpoch 19/150\n320/320 [==============================] - 0s 427us/step - loss: 0.5008 - acc: 0.7531 - val_loss: 0.4252 - val_acc: 0.8250\nEpoch 20/150\n320/320 [==============================] - 0s 492us/step - loss: 0.5029 - acc: 0.7688 - val_loss: 0.4189 - val_acc: 0.8250\nEpoch 21/150\n320/320 [==============================] - 0s 467us/step - loss: 0.5012 - acc: 0.7594 - val_loss: 0.4133 - val_acc: 0.8250\nEpoch 22/150\n320/320 [==============================] - 0s 415us/step - loss: 0.4813 - acc: 0.7750 - val_loss: 0.4064 - val_acc: 0.8125\nEpoch 23/150\n320/320 [==============================] - 0s 436us/step - loss: 0.4656 - acc: 0.7656 - val_loss: 0.4006 - val_acc: 0.8375\nEpoch 24/150\n320/320 [==============================] - 0s 508us/step - loss: 0.4617 - acc: 0.7937 - val_loss: 0.3953 - val_acc: 0.8250\nEpoch 25/150\n320/320 [==============================] - 0s 399us/step - loss: 0.4388 - acc: 0.8219 - val_loss: 0.3908 - val_acc: 0.8250\nEpoch 26/150\n320/320 [==============================] - 0s 458us/step - loss: 0.3999 - acc: 0.8500 - val_loss: 0.3877 - val_acc: 0.8250\nEpoch 27/150\n320/320 [==============================] - 0s 539us/step - loss: 0.4025 - acc: 0.8281 - val_loss: 0.3818 - val_acc: 0.8250\nEpoch 28/150\n320/320 [==============================] - 0s 386us/step - loss: 0.4026 - acc: 0.8156 - val_loss: 0.3787 - val_acc: 0.8250\nEpoch 29/150\n320/320 [==============================] - 0s 365us/step - loss: 0.3742 - acc: 0.8375 - val_loss: 0.3725 - val_acc: 0.8250\nEpoch 30/150\n320/320 [==============================] - 0s 458us/step - loss: 0.3428 - acc: 0.8656 - val_loss: 0.3741 - val_acc: 0.8250\nEpoch 31/150\n320/320 [==============================] - 0s 408us/step - loss: 0.3835 - acc: 0.8344 - val_loss: 0.3682 - val_acc: 0.8250\nEpoch 32/150\n320/320 [==============================] - 0s 415us/step - loss: 0.3728 - acc: 0.8469 - val_loss: 0.3655 - val_acc: 0.8250\nEpoch 33/150\n320/320 [==============================] - 0s 458us/step - loss: 0.3499 - acc: 0.8562 - val_loss: 0.3639 - val_acc: 0.8250\nEpoch 34/150\n320/320 [==============================] - 0s 514us/step - loss: 0.3243 - acc: 0.8875 - val_loss: 0.3630 - val_acc: 0.8250\nEpoch 35/150\n320/320 [==============================] - 0s 508us/step - loss: 0.3546 - acc: 0.8469 - val_loss: 0.3566 - val_acc: 0.8250\nEpoch 36/150\n320/320 [==============================] - 0s 486us/step - loss: 0.3782 - acc: 0.8500 - val_loss: 0.3603 - val_acc: 0.8250\nEpoch 37/150\n320/320 [==============================] - 0s 458us/step - loss: 0.3253 - acc: 0.8750 - val_loss: 0.3568 - val_acc: 0.8250\nEpoch 38/150\n320/320 [==============================] - 0s 477us/step - loss: 0.3005 - acc: 0.8875 - val_loss: 0.3561 - val_acc: 0.8250\nEpoch 39/150\n320/320 [==============================] - 0s 439us/step - loss: 0.3053 - acc: 0.8875 - val_loss: 0.3532 - val_acc: 0.8250\nEpoch 40/150\n320/320 [==============================] - 0s 505us/step - loss: 0.2912 - acc: 0.8844 - val_loss: 0.3513 - val_acc: 0.8250\nEpoch 41/150\n320/320 [==============================] - 0s 483us/step - loss: 0.3153 - acc: 0.8750 - val_loss: 0.3451 - val_acc: 0.8375\nEpoch 42/150\n320/320 [==============================] - 0s 433us/step - loss: 0.2866 - acc: 0.8969 - val_loss: 0.3463 - val_acc: 0.8250\nEpoch 43/150\n320/320 [==============================] - 0s 474us/step - loss: 0.3226 - acc: 0.8531 - val_loss: 0.3449 - val_acc: 0.8250\nEpoch 44/150\n320/320 [==============================] - 0s 402us/step - loss: 0.3226 - acc: 0.8469 - val_loss: 0.3406 - val_acc: 0.8375\nEpoch 45/150\n320/320 [==============================] - 0s 421us/step - loss: 0.2783 - acc: 0.9094 - val_loss: 0.3379 - val_acc: 0.8375\nEpoch 46/150\n320/320 [==============================] - 0s 415us/step - loss: 0.2603 - acc: 0.9125 - val_loss: 0.3354 - val_acc: 0.8375\nEpoch 47/150\n320/320 [==============================] - 0s 486us/step - loss: 0.3150 - acc: 0.8719 - val_loss: 0.3385 - val_acc: 0.8375\nEpoch 48/150\n320/320 [==============================] - 0s 464us/step - loss: 0.2902 - acc: 0.8906 - val_loss: 0.3328 - val_acc: 0.8500\nEpoch 49/150\n320/320 [==============================] - 0s 477us/step - loss: 0.2747 - acc: 0.8906 - val_loss: 0.3397 - val_acc: 0.8375\nEpoch 50/150\n320/320 [==============================] - 0s 505us/step - loss: 0.2914 - acc: 0.8813 - val_loss: 0.3368 - val_acc: 0.8375\nEpoch 51/150\n320/320 [==============================] - 0s 527us/step - loss: 0.2528 - acc: 0.9062 - val_loss: 0.3431 - val_acc: 0.8250\nEpoch 52/150\n320/320 [==============================] - 0s 505us/step - loss: 0.2489 - acc: 0.9250 - val_loss: 0.3431 - val_acc: 0.8250\nEpoch 53/150\n320/320 [==============================] - 0s 499us/step - loss: 0.2325 - acc: 0.9094 - val_loss: 0.3396 - val_acc: 0.8250\nEpoch 54/150\n320/320 [==============================] - 0s 455us/step - loss: 0.2594 - acc: 0.9031 - val_loss: 0.3388 - val_acc: 0.8375\nEpoch 55/150\n320/320 [==============================] - 0s 477us/step - loss: 0.2708 - acc: 0.9000 - val_loss: 0.3392 - val_acc: 0.8250\nEpoch 56/150\n320/320 [==============================] - 0s 530us/step - loss: 0.2472 - acc: 0.9250 - val_loss: 0.3346 - val_acc: 0.8375\nEpoch 57/150\n320/320 [==============================] - 0s 561us/step - loss: 0.2389 - acc: 0.9094 - val_loss: 0.3324 - val_acc: 0.8375\nEpoch 58/150\n" ], [ "from sklearn.metrics import classification_report\n\ny_true = [0, 1, 1, 0, 1]\ny_pred = [0, 0, 1, 0, 1]\nlabels = ['no_smile','smile']\n\nprint(classification_report(y_true, y_pred, target_names=labels))", " precision recall f1-score support\n\n no_smile 0.67 1.00 0.80 2\n smile 1.00 0.67 0.80 3\n\n accuracy 0.80 5\n macro avg 0.83 0.83 0.80 5\nweighted avg 0.87 0.80 0.80 5\n\n" ], [ "import matplotlib.pyplot as plt\n\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(1, len(acc) + 1)\n\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and Validation accuracy')\nplt.legend()\nplt.figure()\n\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and Validation loss')\nplt.legend()\nplt.plot()", "_____no_output_____" ] ], [ [ "## Let's try it out with our own image!", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nfrom keras.preprocessing import image", "_____no_output_____" ], [ "label_map2", "_____no_output_____" ], [ "def return_class(numeric_id):\n for class_pred, id_class in label_map2.items(): # for name, age in dictionary.iteritems(): (for Python 2.x)\n if id_class == numeric_id:\n return class_pred", "_____no_output_____" ], [ "def return_prediction(img_src,img_src_name):\n test_features, test_labels, label_map = extract_features(img_src, 1)\n test_features = np.reshape(test_features, (1, 2 * 2 * 512))\n class_id = return_class(model.predict_classes(test_features)[0])\n \n img = image.load_img(img_src_name, target_size=(64, 64))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x /= 255.\n\n plt.imshow(x[0]) \n plt.axis('off')\n plt.title(class_id)\n plt.show()", "_____no_output_____" ], [ "return_prediction('images/unique_test','images/unique_test/class/justin_smile.jpg')", "Found 1 images belonging to 1 classes.\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
e7197224a1579e8f5bac8886c69f3fe933a777a6
14,167
ipynb
Jupyter Notebook
examples/tutorials-zh/Introduction_to_Graph_Convolutions.ipynb
TaiCZ/deepchem
66de10d4f862e0077a82fd7460eea2b11b9472aa
[ "MIT" ]
3
2022-03-06T11:43:46.000Z
2022-03-07T07:22:06.000Z
examples/tutorials-zh/Introduction_to_Graph_Convolutions.ipynb
TaiCZ/deepchem
66de10d4f862e0077a82fd7460eea2b11b9472aa
[ "MIT" ]
null
null
null
examples/tutorials-zh/Introduction_to_Graph_Convolutions.ipynb
TaiCZ/deepchem
66de10d4f862e0077a82fd7460eea2b11b9472aa
[ "MIT" ]
null
null
null
31.136264
568
0.610221
[ [ [ "# 图卷积介绍\n\n在本教程中,我们将学习更多关于\"图卷积\"的知识。这些是处理分子数据最强大的深度学习工具之一,因为分子可以自然地被看作是图。\n\n![Molecular Graph](https://github.com/deepchem/deepchem/blob/master/examples/tutorials/assets/basic_graphs.gif?raw=1)\n\n请注意我们在高中时习惯的那种标准化学图是如何自然地将分子可视化为图形的。在本教程的剩余部分,我们将更详细地进行研究,这将使我们更深入地了解这些系统的工作原理。\n\n## Colab\n\n本教程和目录中的其余部分都是在 Google colab 中完成。如果您想在 colab 中打开此笔记本,您可以点击以下链接。\n\n[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/BioGavin/deepchem/blob/master/examples/tutorials-zh/Introduction_to_Graph_Convolutions.ipynb)\n\n", "_____no_output_____" ] ], [ [ "!pip install --pre deepchem", "_____no_output_____" ] ], [ [ "# 什么是图卷积?\n\n考虑一个标准的卷积神经网络(CNN),即通常用于处理图像的那种。输入是一个像素的网格,每个像素都有一个数据值的向量,例如红、绿、蓝三色通道。数据通过一系列的卷积层,每一层都将来自一个像素和它的邻居的数据结合起来,为这些像素产生一个新的数据向量。前期层检测小规模的局部模式,而后期层检测更大、更抽象的模式。卷积层通常与池化层交替进行,后者对局部区域进行一些操作,如最大池化或最小池化。\n\n图卷积也是类似的,但它们是在图上操作的。它们从图的每个节点的数据向量开始(例如,代表原子化学属性的节点数据)。卷积层和池化层汇聚来自相连节点的信息(例如,相互结合的原子),为每个节点产生一个新的数据向量。\n\n# 训练一个 GraphConvModel\n\n让我们使用 MoleculeNet 套件来加载 Tox21 数据集。为了使用图卷积网络,我们将特征生成器选项设置为 \"GraphConv\"。MoleculeNet 调用返回一个训练集、一个验证集和一个测试集供我们使用。它还返回`tasks`,一个任务名称的列表,以及 `transformers`,一个应用于预处理数据集的数据转换器的列表。(大多数深度网络是相当棘手的,需要一组数据转换器来确保训练的稳定进行。)", "_____no_output_____" ] ], [ [ "import deepchem as dc\n\ntasks, datasets, transformers = dc.molnet.load_tox21(featurizer='GraphConv')\ntrain_dataset, valid_dataset, test_dataset = datasets", "_____no_output_____" ] ], [ [ "现在让我们在这个数据集上训练一个图卷积网络。DeepChem 有一个 `GraphConvModel` 类,为了方便用户,它将一个标准的图卷积架构进行了包装。让我们实例化这个类取得一个对象并在我们的数据集上训练它。", "_____no_output_____" ] ], [ [ "n_tasks = len(tasks)\nmodel = dc.models.GraphConvModel(n_tasks, mode='classification')\nmodel.fit(train_dataset, nb_epoch=50)", "_____no_output_____" ] ], [ [ "让我们试着评估一下我们所训练的模型的性能。为此,我们需要定义一个指标,一个衡量模型性能的标准。`dc.metrics` 已经包含了一个指标集合。对于这个数据集,标准的做法是使用 ROC-AUC 分数,即接收者操作特征曲线下的面积(衡量精度和召回率之间的权衡)。幸运的是,ROC-AUC 分数已经在 DeepChem 中可用。\n\n为了衡量模型在这个指标下的性能,我们可以很方便地使用 `model.evaluate()` 函数。", "_____no_output_____" ] ], [ [ "metric = dc.metrics.Metric(dc.metrics.roc_auc_score)\nprint('Training set score:', model.evaluate(train_dataset, [metric], transformers))\nprint('Test set score:', model.evaluate(test_dataset, [metric], transformers))", "Training set score: {'roc_auc_score': 0.96959686893055}\nTest set score: {'roc_auc_score': 0.795793783300876}\n" ] ], [ [ "结果还不错,`GraphConvModel` 非常好用。但是引擎盖下发生了什么?我们可以自己构建 GraphConvModel 吗? 当然! DeepChem 为图卷积中涉及的所有计算提供 Keras 层。 我们将应用 DeepChem 的以下层。\n\n- `GraphConv` 层:该层实现图卷积。图卷积以非线性方式将每个节点的特征向量与相邻节点的特征向量组合在一起。这将信息“混合”在图的本地邻域中。\n\n- `GraphPool` 层:该层对邻域中原子的特征向量进行最大池化。您可以将此层视为类似于 2D 卷积的最大池化层,但它在图上运行。\n\n- `GraphGather` 层:许多图卷积网络操作每个图节点的特征向量。例如,对于一个分子,每个节点可能代表一个原子,网络将操纵原子特征向量,总结原子的局部化学性质。 但是,在应用程序结束时,我们可能希望使用分子级别的特征表示,该层通过组合所有节点级特征向量来创建图级特征向量。\n\n除此之外,我们将应用标准的神经网络层,例如 [Dense](https://keras.io/api/layers/core_layers/dense/), [BatchNormalization](https://keras.io/api/layers/normalization_layers/batch_normalization/) 和 [Softmax](https://keras.io/api/layers/activation_layers/softmax/) 层。", "_____no_output_____" ] ], [ [ "from deepchem.models.layers import GraphConv, GraphPool, GraphGather\nimport tensorflow as tf\nimport tensorflow.keras.layers as layers\n\nbatch_size = 100\n\nclass MyGraphConvModel(tf.keras.Model):\n\n def __init__(self):\n super(MyGraphConvModel, self).__init__()\n self.gc1 = GraphConv(128, activation_fn=tf.nn.tanh)\n self.batch_norm1 = layers.BatchNormalization()\n self.gp1 = GraphPool()\n\n self.gc2 = GraphConv(128, activation_fn=tf.nn.tanh)\n self.batch_norm2 = layers.BatchNormalization()\n self.gp2 = GraphPool()\n\n self.dense1 = layers.Dense(256, activation=tf.nn.tanh)\n self.batch_norm3 = layers.BatchNormalization()\n self.readout = GraphGather(batch_size=batch_size, activation_fn=tf.nn.tanh)\n\n self.dense2 = layers.Dense(n_tasks*2)\n self.logits = layers.Reshape((n_tasks, 2))\n self.softmax = layers.Softmax()\n\n def call(self, inputs):\n gc1_output = self.gc1(inputs)\n batch_norm1_output = self.batch_norm1(gc1_output)\n gp1_output = self.gp1([batch_norm1_output] + inputs[1:])\n\n gc2_output = self.gc2([gp1_output] + inputs[1:])\n batch_norm2_output = self.batch_norm1(gc2_output)\n gp2_output = self.gp2([batch_norm2_output] + inputs[1:])\n\n dense1_output = self.dense1(gp2_output)\n batch_norm3_output = self.batch_norm3(dense1_output)\n readout_output = self.readout([batch_norm3_output] + inputs[1:])\n\n logits_output = self.logits(self.dense2(readout_output))\n return self.softmax(logits_output)", "_____no_output_____" ] ], [ [ "我们现在可以更清楚地看到正在发生什么。有两个卷积块,每个卷积块包括一个 \"GraphConv\",然后是批量归一化,接着是一个 \"GraphPool\" 来做最大池化。我们最后有一个稠密层,另一个批量归一化,一个 `GraphGather` 来合并所有不同节点的数据,最后一个密集层来产生全局输出。\n\n现在让我们创建 DeepChem 模型,它将是我们刚刚创建的 Keras 模型的一个封装器。我们还将指定损失函数,以便模型知道要最小化的目标。", "_____no_output_____" ] ], [ [ "model = dc.models.KerasModel(MyGraphConvModel(), loss=dc.models.losses.CategoricalCrossEntropy())", "_____no_output_____" ] ], [ [ "这个模型的输入是什么?图卷积需要对每个分子进行完整的描述,包括节点(原子)的列表和对哪些节点之间相互结合的描述。事实上,如果我们检查数据集,就会发现特征数组包含了 `ConvMol` 类型的 Python 对象。", "_____no_output_____" ] ], [ [ "test_dataset.X[0]", "_____no_output_____" ] ], [ [ "模型期望数组作为它们的输入,而不是 Python 对象。我们必须将 `ConvMol` 对象转换成 `GraphConv`、`GraphPool` 和 `GraphGather` 层所期望的特定数组。幸运的是,`ConvMol` 类包括了这样做的代码,以及将所有的分子合并到一个批次中来创建一个单一的数组。\n\n以下代码创建了一个 Python 生成器,给定一批数据,生成输入、标签和权重的列表,其值为 Numpy 数组。`atom_features` 为每个原子保存一个长度为 75 的特征向量。其他的输入是为了支持 TensorFlow 的 minibatching。`degree_slice` 是一个索引便利,使其能够轻松地从所有具有指定程度的分子中找到原子。`membership` 决定了原子在分子中的成员资格(原子 `i` 属于分子 `membership[i]`)。`deg_adjs` 是一个包含按原子程度分组的邻接矩阵的列表。更多详细的内容,请查看[代码](https://github.com/deepchem/deepchem/blob/master/deepchem/feat/mol_graphs.py)。", "_____no_output_____" ] ], [ [ "from deepchem.metrics import to_one_hot\nfrom deepchem.feat.mol_graphs import ConvMol\nimport numpy as np\n\ndef data_generator(dataset, epochs=1):\n for ind, (X_b, y_b, w_b, ids_b) in enumerate(dataset.iterbatches(batch_size, epochs,\n deterministic=False, pad_batches=True)):\n multiConvMol = ConvMol.agglomerate_mols(X_b)\n inputs = [multiConvMol.get_atom_features(), multiConvMol.deg_slice, np.array(multiConvMol.membership)]\n for i in range(1, len(multiConvMol.get_deg_adjacency_lists())):\n inputs.append(multiConvMol.get_deg_adjacency_lists()[i])\n labels = [to_one_hot(y_b.flatten(), 2).reshape(-1, n_tasks, 2)]\n weights = [w_b]\n yield (inputs, labels, weights)", "_____no_output_____" ] ], [ [ "现在,我们可以使用 `fit_generator(generator)` 来训练模型,它将使用我们定义的数据生成器来训练模型。", "_____no_output_____" ] ], [ [ "model.fit_generator(data_generator(train_dataset, epochs=50))", "_____no_output_____" ] ], [ [ "现在我们已经训练了我们的图卷积方法,让我们来评估其性能。我们又要使用我们定义的数据生成器来评估模型的性能。", "_____no_output_____" ] ], [ [ "print('Training set score:', model.evaluate_generator(data_generator(train_dataset), [metric], transformers))\nprint('Test set score:', model.evaluate_generator(data_generator(test_dataset), [metric], transformers))", "Training set score: {'roc_auc_score': 0.8425638289185731}\nTest set score: {'roc_auc_score': 0.7378436684114341}\n" ] ], [ [ "成功了! 我们构建的模型与 `GraphConvModel` 的行为几乎相同。如果你想建立你自己的自定义模型,你可以按照我们在这里提供的例子来做。我们希望很快就能看到来自你的令人兴奋的构建!", "_____no_output_____" ], [ "# 恭喜!是时候加入社区了!\n\n恭喜您完成本教程笔记本!如果您喜欢本教程并希望继续使用 DeepChem,我们鼓励您完成本系列的其余教程。您还可以通过以下方式帮助 DeepChem 社区:\n\n## 在 [GitHub](https://github.com/deepchem/deepchem) 上为 DeepChem 点亮小星星\n这有助于大家建立对 DeepChem 项目和我们正在尝试构建的开源药物发现工具的共识。\n\n## 加入 DeepChem Gitter\nDeepChem [Gitter](https://gitter.im/deepchem/Lobby) 聚集了许多对生命科学深度学习感兴趣的科学家、开发人员和爱好者,欢迎加入!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e71972279dbe70da6e72b93efc91172d8455f473
28,114
ipynb
Jupyter Notebook
Code/1_PK/2_intracellular/estimate_ka.ipynb
kimheeye/ISL_PKPD
16774eeb9c9e8da299ce71e27e0e94b9b007b1a3
[ "MIT" ]
null
null
null
Code/1_PK/2_intracellular/estimate_ka.ipynb
kimheeye/ISL_PKPD
16774eeb9c9e8da299ce71e27e0e94b9b007b1a3
[ "MIT" ]
null
null
null
Code/1_PK/2_intracellular/estimate_ka.ipynb
kimheeye/ISL_PKPD
16774eeb9c9e8da299ce71e27e0e94b9b007b1a3
[ "MIT" ]
null
null
null
128.374429
20,180
0.859358
[ [ [ "import numpy as np\nimport pandas as pd\nfrom scipy.integrate import solve_ivp\nfrom lmfit import minimize, Parameters, report_fit\nimport warnings\nimport matplotlib.pyplot as plt\nimport scipy.optimize\nfrom intersect import intersection", "_____no_output_____" ], [ "#Load and prepare data\npath_tp = '../../../Data/'\ndataTP_05mg = pd.read_csv(path_tp + 'Matthews2017/Digitized/PK/05mg_TP.csv')\ndataTP_10mg_ = pd.read_csv(path_tp + 'Matthews2017/Digitized/PK/10mg_TP.csv')\ndataTP_10mg_w1 = pd.read_csv(path_tp + 'Grobler2016/Digitized/PK/10mg_TP_W1.csv')\ndataTP_10mg_w3 = pd.read_csv(path_tp + 'Grobler2016/Digitized/PK/10mg_TP_W3.csv')\ndataTP_30mg_ = pd.read_csv(path_tp + 'Matthews2017/Digitized/PK/30mg_TP.csv')\ndataTP_30mg_w1 = pd.read_csv(path_tp + 'Grobler2016/Digitized/PK/30mg_TP_W1.csv')\ndataTP_30mg_w3 = pd.read_csv(path_tp + 'Grobler2016/Digitized/PK/30mg_TP_W3.csv')\ndataTP_100mg_w1 = pd.read_csv(path_tp + 'Grobler2016/Digitized/PK/100mg_TP_W1.csv')\ndataTP_100mg_w3 = pd.read_csv(path_tp + 'Grobler2016/Digitized/PK/100mg_TP_W3.csv')\ndataTP_1mg = pd.read_csv(path_tp + 'Matthews2017/Digitized/PK/1mg_TP.csv')\ndataTP_2mg = pd.read_csv(path_tp + 'Matthews2017/Digitized/PK/2mg_TP.csv')\n\ndataTP_05mg = dataTP_05mg.dropna(axis='columns')\ndataTP_1mg = dataTP_1mg.dropna(axis='columns')\ndataTP_2mg = dataTP_2mg.dropna(axis='columns')\ndataTP_10mg_ = dataTP_10mg_.dropna(axis='columns')\ndataTP_10mg_w1 = dataTP_10mg_w1.dropna(axis='columns')\ndataTP_10mg_w3 = dataTP_10mg_w3.dropna(axis='columns')\ndataTP_30mg_ = dataTP_30mg_.dropna(axis='columns')\ndataTP_30mg_w1 = dataTP_30mg_w1.dropna(axis='columns')\ndataTP_30mg_w3 = dataTP_30mg_w3.dropna(axis='columns')\ndataTP_100mg_w1 = dataTP_100mg_w1.dropna(axis='columns')\ndataTP_100mg_w3 = dataTP_100mg_w3.dropna(axis='columns')\n\ndataTP_05mg.columns = ['time','conc']\ndataTP_1mg.columns = ['time','conc']\ndataTP_2mg.columns = ['time','conc']\ndataTP_10mg_.columns = ['time','conc']\ndataTP_10mg_w1.columns = ['time','conc']\ndataTP_10mg_w3.columns = ['time','conc']\ndataTP_30mg_.columns = ['time','conc']\ndataTP_30mg_w1.columns = ['time','conc']\ndataTP_30mg_w3.columns = ['time','conc']\ndataTP_100mg_w1.columns = ['time','conc']\ndataTP_100mg_w3.columns = ['time','conc']\n\ndataTP_10mg = pd.concat([dataTP_10mg_, dataTP_10mg_w1, dataTP_10mg_w3]).sort_values(by=['time']).reset_index(drop=True)\ndataTP_30mg = pd.concat([dataTP_30mg_, dataTP_30mg_w1, dataTP_30mg_w3]).sort_values(by=['time']).reset_index(drop=True)\ndataTP_100mg = pd.concat([dataTP_100mg_w1, dataTP_100mg_w3]).sort_values(by=['time']).reset_index(drop=True)\n\ndataTP_10mg.time.iloc[0] = abs(dataTP_10mg.time.iloc[0])\n\ndatalist_TP = [dataTP_10mg, dataTP_30mg, dataTP_100mg, dataTP_2mg, dataTP_1mg, dataTP_05mg]\ndata_TP = pd.concat([dataTP_10mg, dataTP_30mg, dataTP_100mg, dataTP_2mg, dataTP_1mg, dataTP_05mg]) #assembled data\n\n#convert to unit nM\ndata_TP.conc = 6 - np.log10(180) + data_TP.conc.tolist()\nfor n in range(len(datalist_TP)): #to nM \n datalist_TP[n].conc = 6 - np.log10(180) + datalist_TP[n].conc.tolist()\n\nt_observed_TP = []; x_observed_TP = []\nfor d in range(len(datalist_TP)):\n t_observed_TP.append(datalist_TP[d].time.tolist())\n x_observed_TP.append(datalist_TP[d].conc.tolist())\nt_observed_TP[0][0] = 0.2", "_____no_output_____" ], [ "#Compartment model linear PK\ndef model_TP_linear(t, z, params):\n #parameters to estimate\n ka = params['ka'].value\n k13 = params['k13'].value\n Z0 = z[0]; Z1 = z[1]; Z2 = z[2]; Z3 = z[3]\n dZ0 = -ka*Z0\n dZ1 = (ka/Vc)*Z0 - k10*Z1 - k12*Z1 + k21*Z2\n dZ2 = k12*Z1 - k21*Z2\n dZ3 = k13*Z1 - k30*Z3\n d = [dZ0,dZ1,dZ2,dZ3]\n return d\n\ndef solve_ode_TP(z0, params):\n logZ3 = [] #store intracellular concentration\n for j in range(len(z0)):\n t_obs = t_observed_TP[j]\n res = solve_ivp(model_TP_linear, (tstart,tfinal[j]), z0[j], t_eval=t_obs,args=(params,))\n for i in range(len(res.y[3])):\n if res.y[3][i] > 0: logZ3.append(np.log10(res.y[3][i])) #filter negative values\n else: logZ3.append(-10)\n return logZ3\n\ndef residual_TP(params, z0, data_mat):\n logZ3 = solve_ode_TP(z0, params)\n return np.power(np.subtract(logZ3,data_TP.conc.tolist()),2)", "_____no_output_____" ], [ "dose = 3410 #nM\n#initial and final time\ntstart = 0\ntfinal = []\nfor i in range(len(datalist_TP)):\n tfinal.append(t_observed_TP[i][-1])\n#initial satte of the system \nz0 = [[10*dose,0,0,0],[30*dose,0,0,0], [100*dose,0,0,0],[2*dose,0,0,0],[dose,0,0,0],[0.5*dose,0,0,0]] \n\n#Estimated PK parameters\nk10 = 0.2355\nk12 = 0.1750\nk21 = 0.0259\nVc = 162.6864\nk30 = 0.0098", "_____no_output_____" ], [ "count = 0; maxcount = 10 #initial and maximal number of iterations\nRSSList = []\nk13List = np.arange(50,35,-(15/100))\nkaList= np.arange(30,70,(40/100))\n\nfor i in range(len(kaList)):\n #sample ka and k13\n va = kaList[i]\n v13 = k13List[i]\n parameters = Parameters()\n parameters.add('ka', value=va, vary=False)\n parameters.add('k13',value=v13, vary=False)\n #solve ODE for intracellular conc.\n log_Z3 = res = solve_ode_TP(z0, parameters)\n rss = np.sum(np.power(np.subtract(data_TP.conc.tolist(),log_Z3),2))\n RSSList.append(rss)", "_____no_output_____" ], [ "#plot result\nx, y = intersection(kaList, RSSList, k13List, RSSList) #calculate intersection between ka and k10 w.r.t. RSS\nplt.plot(k13List,RSSList,'r-',label = 'k13')\nplt.plot(kaList,RSSList,'g-',label = 'ka')\nplt.plot(x[0],y[0],'ko',label='intersect. '+str(round(x[0],2)))\nplt.xlabel('parameters')\nplt.ylabel('RSS')\nplt.legend(loc='best')\n#plt.title('ka vs. k13')\nplt.show() ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
e7197292ca848bfb8d0d5d3fb5ef61f43d4c33a5
198,435
ipynb
Jupyter Notebook
Chapter9-Reinforcement-Learning/ML_in_Finance_MarketMaking.ipynb
JJJerome/ML_Finance_Codes
8ab55dfb0530f7416ed3916cc30eb2faba7c23e8
[ "Unlicense" ]
415
2020-01-23T21:48:32.000Z
2022-03-27T20:38:15.000Z
Chapter9-Reinforcement-Learning/ML_in_Finance_MarketMaking.ipynb
huning2009/ML_Finance_Codes
047a9a456fa847ab438538aff658751675f3adc2
[ "Unlicense" ]
4
2021-12-31T20:21:30.000Z
2022-03-01T19:21:11.000Z
Chapter9-Reinforcement-Learning/ML_in_Finance_MarketMaking.ipynb
huning2009/ML_Finance_Codes
047a9a456fa847ab438538aff658751675f3adc2
[ "Unlicense" ]
190
2019-10-28T07:35:17.000Z
2022-03-30T10:31:21.000Z
76.057877
69,804
0.715126
[ [ [ "# ML_in_Finance_Market_Impact\n# Author: Matthew Dixon and Igor Halperin\n# Version: 1.0 (14.10.2019)\n# License: MIT\n# Email: [email protected]\n# Notes: tested on Mac OS X with Python 3.6.9 with the following packages:\n# numpy=1.18.1, matplotlib=3.1.3, tqdm=4.46\n# Citation: Please cite the following reference if this notebook is used for research purposes:\n# Bilokon P., Dixon M.F. and I. Halperin, Machine Learning in Finance: From Theory to Practice, Springer Graduate textbook Series, 2020. ", "_____no_output_____" ] ], [ [ "# The Market Making Problem", "_____no_output_____" ], [ "We can build on the previous two examples by considering the problem of high frequency market making. Unlike the previous example, we shall learn a time independent optimal policy.\n\nAssume that a market maker seeks to capture the bid-ask spread by placing one lot best bid and ask limit orders. They are required to strictly keep their inventory between -1 and 1. The problem is when to optimally quote either a bid or ask, or simply wait, each time there is a limit order book update. For example, sometimes it may be more advantageous to quote a bid to close out a short position if it will almost surely give an instantaneous net reward, other times it may be better to wait and capture a larger spread.\n\nIn this toy example, the agent uses the liquidity imbalance in the top of the order book as a proxy for price movement and, hence, fill probabilities. The example does not use market orders, knowledge of queue positions, cancellations and limit order placement at different levels of the ladder. These are left to later material and exercises.\n\nAt each non-uniform time update, $t$, the market feed provides best prices and depths $\\{p^a_t, q^a_t, p^b_t, q^b_t\\}$. The state space is the product of the inventory, $X_t\\in\\{-1,0,1\\}$, and gridded liquidity ratio $\\hat{R}_t= \\lfloor{\\frac{q^a_t}{q^a_t+q^b_t}N\\rfloor}\\in [0,1]$, where $N$ is the number of grid points and $q^a_t$ and $q^b_t$ are the depths of the best ask and bid. $\\hat{R}_t \\rightarrow 0$ is the regime where the mid-price will go up and an ask is filled. Vice versa for $\\hat{R}_t \\rightarrow 1$. The dimension of the state space is chosen to be $ 3 \\cdot 10 = 30$.\n\nA bid is filled with probability $\\epsilon_t:=\\hat{R}_t$ and an ask is filled with probability $1-\\epsilon_t$. The rewards are chosen to be the expected total P\\&L. If a bid is filled to close out a short holding, then the expected reward $r_t=-\\epsilon_t (\\Delta p_t+c)$, where $\\Delta p_t$ is the difference between the exit and entry price and $c$ is the transaction cost. For example, if the agent entered a short position at time $s<t$ with a filled ask at $p^a_s=100$ and closed out the position with a filled bid at $p^b_t=99$, then $\\Delta p_t=1$. The agent is penalized for quoting an ask or bid when the position is already short or long respectively.\n\nWe can now apply SARSA or Q-learning to learn optimal market making in such a simplified setting. For exploration needed for on-line learning, one can use a\n$\\varepsilon $-greedy policy.", "_____no_output_____" ], [ "### Import the necessary libraries", "_____no_output_____" ] ], [ [ "import time\nimport copy\nimport random\nimport sys\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.gridspec import GridSpec\n\nfrom tqdm.notebook import tqdm ", "_____no_output_____" ], [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "### Setting up \n#### Setting some global parameters ", "_____no_output_____" ], [ "Parameters of the reinforcement learning algorithms", "_____no_output_____" ] ], [ [ "EPSILON = 0.5 # Probability for exploration\n\nALPHA = 0.05 # Step size\n\nGAMMA = 1 # Discount factor for Q-Learning and Sarsa", "_____no_output_____" ] ], [ [ "Some parameters describing the problem and our implementation of it", "_____no_output_____" ] ], [ [ "ACTIONS = [0, 1, 2] # Possible actions\n\nNUM_INV_STEPS = 3 # Number of inventory states: long, short, flat\n\nNUM_PROB_STEPS = 10 # Number of discrete probabilities\n\n# Dimensions of the action-state value array:\nQ_DIMS = (NUM_INV_STEPS, NUM_PROB_STEPS, len(ACTIONS))\n\nFILL_PROBS = np.linspace(0, 1, 10) # Possible probability values\n\nc = 0 # Transaction cost\n\nMAX_ITER = np.float('inf') # Maximum number of iterations in one episode\n# (with `MAX_ITER = np.float('inf')`, the entire dataset will be used)", "_____no_output_____" ] ], [ [ "Note the discrete probability values in `FILL_PROBS`. These represent the probability of a bid being fulfilled, and the complement of the probability of an ask being fulfilled.", "_____no_output_____" ] ], [ [ "FILL_PROBS", "_____no_output_____" ] ], [ [ "These dictionaries map the names of the actions and positions to their index along the corresponding axis of the state-action value array `q_value` in the learning algorithms", "_____no_output_____" ] ], [ [ "actions = {'buy': 2, 'sell': 0, 'hold': 1} \npositions = {'flat': 0, 'long': 2, 'short': 1}", "_____no_output_____" ] ], [ [ "#### The data generator\n\nThe training data are in a .csv file. The data generator object yields the next Limit Order Book update from the file. When it reaches the end of the file, it raises `StopIteration`, and its `rewind()` method must be called to reset it.", "_____no_output_____" ] ], [ [ "class DataFeed(object):\n def __init__(self, data_RA):\n self.data_RA = data_RA\n self.rewind()\n def next(self):\n try:\n return self.__gen.__next__()\n except StopIteration as e:\n raise e\n def rewind(self):\n self.__gen = (row for row in self.data_RA)", "_____no_output_____" ], [ "csv_path = '../data/AMZN-L1.csv'\n\ndata_RA = np.genfromtxt(csv_path, delimiter=',', dtype=float)\ndata_generator = DataFeed(data_RA)", "_____no_output_____" ] ], [ [ "#### State\nThe state has four elements: \n- position (flat, long, short); \n- probability of ask fill (index in the array of probabilities)\n- prices (a dictionary of bid and ask)\n- entry price\n\nWe note, however, that the q-value is only a function of the position and probability, and the action taken.", "_____no_output_____" ], [ "Here, we define a function to \"rewind\" the data generator to the beginning of the dataset and initialise the state vector", "_____no_output_____" ] ], [ [ "def get_initial_state(data_generator):\n data_generator.rewind()\n \n # By convention we start with a flat position\n # and, therefore, no entry price\n position = positions['flat']\n entry_price = None\n \n ask, ask_depth, bid, bid_depth = data_generator.next()\n \n price = {'bid': bid/1000.0, 'ask': ask/1000.0} \n \n # Estimate the fill probability\n q = bid_depth / (bid_depth + ask_depth)\n # Quantise q and scale it to the integer index \n # q_ind is an index of the vector `FILL_PROBS`\n q_ind = np.int(q * NUM_PROB_STEPS) \n \n initial_state = position, q_ind, price, entry_price\n \n return initial_state", "_____no_output_____" ], [ "START = get_initial_state(data_generator)\nprint(START)", "(0, 5, {'bid': 2231.8, 'ask': 2239.5}, None)\n" ] ], [ [ "#### Setting up the environment", "_____no_output_____" ], [ "The step function that describes how the next state is obtained from the current state and the action taken. \n\nThe function returns the next state and the immediate reward obtained from the action taken. ", "_____no_output_____" ] ], [ [ "def step(state, action):\n position, q, price, entry_price = state\n reward = 0 \n instant_pnl = 0\n done = False\n \n # The ask/bid fill probabilities always sum to 1, meaning\n # that either bid or ask orders can always be executed \n if FILL_PROBS[q] < np.random.rand():\n fill_bid = True\n fill_ask = False\n else:\n fill_bid = False\n fill_ask = True\n \n # Calculate the result of taking the selected action\n if (action == actions['buy']) and (fill_bid):\n reward = -c\n if (position == positions['flat']): \n position = positions['long']\n entry_price = price['bid'] \n elif(position == positions['short']): # closing out a short position \n position = positions['flat']\n exit_price = price['bid']\n instant_pnl = entry_price - exit_price\n entry_price = None\n elif position == positions['long']:\n raise ValueError(\"can't buy already got\")\n \n elif (action == actions['sell']) and (fill_ask):\n reward = -c\n if (position == positions['flat']):\n position = positions['short']\n entry_price = price['ask']\n elif (position == positions['long']): # closing out a long position \n exit_price = price['ask']\n position = positions['flat']\n instant_pnl = exit_price - entry_price\n entry_price = None\n elif position == positions['short']:\n raise ValueError(\"can't sell already short\")\n \n reward += instant_pnl\n \n try: \n # Get the next limit order book update\n ask, ask_depth, bid, bid_depth = data_generator.next()\n \n # Calculate the price and bid/ask fill probabilities for the next state\n price = {'bid': bid/1000.0, 'ask': ask/1000.0} \n\n # Estimate the fill probability\n q = bid_depth / (bid_depth + ask_depth)\n \n # Quantise q and scale it to the integer index \n # q_ind is an index of the vector `FILL_PROBS`\n q_ind = np.int(q * NUM_PROB_STEPS) \n \n except StopIteration as e:\n # This happens when the data generator reaches the end of the dataset\n raise e\n \n next_state = position, q_ind, price, entry_price\n return next_state, reward", "_____no_output_____" ], [ "# Check START state, action pairs and the associated reward\nprint(actions)\nstate = get_initial_state\nprint(step(START, 0))\nprint(step(START, 1))", "{'buy': 2, 'sell': 0, 'hold': 1}\n((1, 1, {'bid': 2238.1, 'ask': 2239.5}, 2239.5), 0)\n((0, 5, {'bid': 2237.5, 'ask': 2239.5}, None), 0)\n" ] ], [ [ "### Set up the agent's action policy\nGiven $S_t$ and $Q_t\\left( s_t, a_t\\right)$, this function chooses an action based on the epsilon-greedy algorithm", "_____no_output_____" ] ], [ [ "def choose_action(state, q_value, eps=EPSILON):\n position, q, price, entry_price = state\n \n # With probability eps we choose randomly among allowed actions\n if np.random.binomial(1, eps) == 1: \n if position == positions['long']:\n action = np.random.choice([actions['hold'], actions['sell']])\n elif position == positions['short']:\n action = np.random.choice([actions['hold'], actions['buy']])\n else:\n action = np.random.choice([actions['hold'], actions['buy'], actions['sell']]) \n \n # Otherwise the best available action is selected\n else:\n # Make a list of the actions available from the current state\n if position == positions['long']:\n actions_ = [actions['hold'], actions['sell']] \n elif position == positions['short']:\n actions_ = [actions['hold'], actions['buy']]\n else:\n actions_ = [actions['hold'], actions['buy'], actions['sell']]\n # Get the state-action values for the current state\n values_ = q_value[state[0], state[1], actions_]\n # In case of a tie, choose from those with the highest value\n action = np.random.choice([actions_[action_] for action_, value_ in enumerate(values_) \n if value_ == np.max(values_)])\n return action", "_____no_output_____" ] ], [ [ "To demonstrate the ", "_____no_output_____" ] ], [ [ "# Set a random state-action value function\nq_value_example = np.random.random(Q_DIMS) \n\n# Show the initial state\nstate = get_initial_state(data_generator)\nprint(state)\n\n# The action values for the initial state. \n# state[0] is the position; state[1] is the bid fill probability\nprint(q_value_example[state[0], state[1], :])\n\n# With epsilon = 0, the selected action is always that with the highest Q-value\nprint(choose_action(state, q_value_example, eps=0))", "(0, 5, {'bid': 2231.8, 'ask': 2239.5}, None)\n[0.13177221 0.60988732 0.26877103]\n1\n" ] ], [ [ "### Set up the learning algorithms", "_____no_output_____" ], [ "#### Sarsa and Expected Sarsa", "_____no_output_____" ], [ "The function below runs through a learning episode with Sarsa. It takes the state-action value array `q_value` as an argument, initialises the state to `START`, defined above, and updates `q_value` according to the Sarsa algorithm, until reaching either the end of the training data or the maximum number of iterations. The cumulative reward earned is returned.", "_____no_output_____" ] ], [ [ "def sarsa(q_value, expected=False, step_size=ALPHA, eps=EPSILON):\n \n state = get_initial_state(data_generator)\n \n action = choose_action(state, q_value, eps)\n rewards = 0.0\n done = False\n iteration = 0\n \n while (iteration < MAX_ITER) and not done:\n # The step function will raise StopIteration when there\n # is no more data available to calculate the next state:\n try:\n next_state, reward = step(state, action)\n except StopIteration:\n # Skip the rest of the loop and end the episode.\n # As there is no new `next_state`, updating\n # q_value again doesn't make sense\n done = True\n continue\n next_action = choose_action(next_state, q_value, eps)\n \n rewards += reward\n \n if not expected:\n target = q_value[next_state[0], next_state[1], next_action]\n else:\n # Calculate the expected value of new state for expected SARSA\n target = 0.0\n q_next = q_value[next_state[0], next_state[1], :]\n best_actions = np.argwhere(q_next == np.max(q_next))\n for action_ in ACTIONS: \n if action_ in best_actions:\n target += ((1.0 - eps) / len(best_actions) \n + eps / len(ACTIONS)) * q_value[next_state[0], next_state[1], action_]\n else:\n target += eps / len(ACTIONS) * q_value[next_state[0], next_state[1], action_]\n target *= GAMMA\n \n # SARSA update\n q_value[state[0], state[1], action] += step_size * (reward\n + target - q_value[state[0], state[1], action])\n \n state = next_state\n action = next_action\n iteration += 1\n return rewards", "_____no_output_____" ] ], [ [ "#### Q-learning", "_____no_output_____" ], [ "This function simulates an episode with Q-learning. It takes the state-action value array `q_value` as an argument, initialises the state to `START`, defined above, and updates `q_value` according to the Q-learning algorithm, until the $T$ time steps have passed, or the stocks have all been sold. The cumulative reward earned is returned.", "_____no_output_____" ] ], [ [ "def q_learning(q_value, step_size=ALPHA, eps=EPSILON):\n \n state = get_initial_state(data_generator)\n \n rewards = 0.0\n done = False\n iteration = 0\n \n while (iteration < MAX_ITER) and not done:\n action = choose_action(state, q_value, eps)\n # The step function will raise StopIteration when there\n # is no more data available to calculate the next state:\n try:\n next_state, reward = step(state, action)\n except StopIteration:\n # Skip the rest of the loop and end the episode.\n # As there is no new `next_state`, updating\n # q_value again doesn't make sense\n done = True\n continue\n \n rewards += reward\n \n # Q-Learning update\n q_value[state[0], state[1], action] += step_size * (\n reward + GAMMA * np.max(q_value[next_state[0], next_state[1], :]) -\n q_value[state[0], state[1], action])\n state = next_state\n iteration +=1\n return rewards", "_____no_output_____" ] ], [ [ "#### Printing the learned policies", "_____no_output_____" ], [ "This function will allow us to inspect the optimal action learned for each of the possible states", "_____no_output_____" ] ], [ [ "def print_optimal_policy(q_value):\n \n optimal_policy = np.argmax(q_value, axis=-1)\n print(\"ask fill prob:\", *['%.2f' % q for q in FILL_PROBS])\n \n for i in range(0, NUM_INV_STEPS):\n \n # positions ={'flat': 0, 'long': 2, 'short':1}\n str_=\"\"\n if (i==0):\n str_ += ' flat '\n elif(i==1):\n str_ += ' short '\n else:\n str_ += ' long '\n \n for j in range(0, NUM_PROB_STEPS): \n a = np.int(optimal_policy[i,j])\n # actions = {'buy':2, 'sell':0, 'hold': 1}\n if a == 0:\n str_ += 's '\n elif a ==1:\n str_ += 'h ' \n else:\n str_ += 'b ' \n print(str_)", "_____no_output_____" ] ], [ [ "#### Set up the epsilon decay\n\nWe decrease the value of epsilon with each epoch - epsilon must approach zero as the number of episodes increases in order to ensure that the q-value function converges to the optimum\n\nThe following figure demonstrates the exponential decay we are going to use.", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\neps = 0.5\nepis = 150\nepoch = 15\n\nplt.plot([eps*((1-eps)**(i//epoch)) for i in range(epis)])\nplt.xlabel('Episode')\nplt.ylabel('Epsilon');", "_____no_output_____" ] ], [ [ "### Running Sarsa and Q-learning ", "_____no_output_____" ] ], [ [ "def train(policy, episodes=150):\n if policy == 'sarsa':\n learning_alg = sarsa\n elif policy == 'q-learning':\n learning_alg = q_learning\n else:\n raise ValueError(\"choose 'sarsa' or 'q-learning'\")\n \n epoch_length = 15\n \n # Initialise the rewards vector and state-action values array\n rewards = np.zeros(episodes)\n q_value = np.zeros(Q_DIMS)\n \n print('Training {}...'.format(policy))\n for i in tqdm(range(0, episodes)):\n eps = EPSILON*((1-EPSILON)**(i//epoch_length))\n rewards[i] = learning_alg(q_value, eps=eps)\n \n return q_value, rewards", "_____no_output_____" ], [ "q_sarsa, rewards_sarsa = train('sarsa')\nq_q_learning, rewards_q_learning = train('q-learning')", "Training sarsa...\n" ], [ "print('SARSA')\nprint_optimal_policy(q_sarsa)\nprint('Q-learning')\nprint_optimal_policy(q_q_learning)", "SARSA\nask fill prob: 0.00 0.11 0.22 0.33 0.44 0.56 0.67 0.78 0.89 1.00\n flat b b b s s s s s s s \n short b b b b b b b b b h \n long h s s s s s s s s s \nQ-learning\nask fill prob: 0.00 0.11 0.22 0.33 0.44 0.56 0.67 0.78 0.89 1.00\n flat b b b b b s s s s s \n short b b b b b b b b b b \n long h s s s s s s s s s \n" ], [ "%matplotlib inline\nplt.figure(figsize=(13,8))\nplt.plot(rewards_q_learning, label='Q-Learning')\nplt.plot(rewards_sarsa, label='SARSA')\nplt.xlabel('Episodes')\nplt.ylabel('Sum of rewards during episode')\nplt.legend();", "_____no_output_____" ] ], [ [ "### Animation of the resulting market making strategy", "_____no_output_____" ], [ "This code below will run through the dataset, taking actions according to the state-action values learned during the training process above. \n\nYou can choose to use either of the strategies learned by SARSA and Q-learning by assigning them to the `view_strategy` variable.", "_____no_output_____" ] ], [ [ "view_strategy = q_q_learning\n#view_strategy = q_sarsa", "_____no_output_____" ], [ "%matplotlib nbagg\n%matplotlib nbagg\n\nfig = plt.figure(figsize=(12, 8))\n\ngs = GridSpec(2,2) # 2 rows, 2 columns\n\nax1 = fig.add_subplot(gs[0,0]) # First row, first column\nax2 = fig.add_subplot(gs[0,1]) # First row, second column\nax3 = fig.add_subplot(gs[1,0]) # Second row, first column\n\nbids = []\nasks = []\nbid_fills = []\nxdata = []\npnl = []\n\ndone = False\nstate = get_initial_state(data_generator)\nrewards = 0.0\niteration = 0\n\nwhile iteration < MAX_ITER and not done:\n try:\n start_time = time.time()\n prev_position_name = [name for name, pos in positions.items() if pos == state[0]][0] \n \n action = np.argmax(view_strategy[state[0], state[1], :])\n try:\n state, reward = step(state, action)\n except StopIteration:\n done = True\n print('Stopped at time step', iteration)\n continue\n iteration += 1\n \n position_name = [name for name, pos in positions.items() if pos == state[0]][0] \n action_name = [name for name, act in actions.items() if act == action][0] \n prices = state[2]\n if state[3] is None:\n entry_price = 'n/a'\n else:\n entry_price = \"%.2f\" % state[3]\n \n # Cumulative PnL\n if len(pnl) == 0:\n pnl.append(reward)\n else: \n pnl.append(pnl[-1]+reward)\n\n bids.append(prices['bid'])\n asks.append(prices['ask'])\n xdata.append(iteration)\n \n # Plot most recent 80 prices\n ax1.plot(xdata, \n bids, color = 'black')\n ax1.plot(xdata, \n asks, color = 'black')\n ax1.set_ylabel('Prices')\n ax1.set_xlabel('Iteration')\n ax1.set_title('Cumulated PnL: ' + \"%.2f\" % pnl[-1] + ' ~ '\n + 'Position: ' + position_name + ' ~ '\n + 'Entry Price: ' + entry_price)\n ax1.set_xlim([max(0, iteration - 80.5), iteration + 0.5])\n\n # Plotting actions taken according to the Policy\n if position_name != prev_position_name:\n if action == actions['sell']:\n ax1.scatter(iteration, prices['bid']+0.1, \n color='orangered', marker='v', s=50)\n elif action == actions['buy']:\n ax1.scatter(iteration, prices['ask']-0.1, \n color='lawngreen', marker='^', s=50)\n \n # Ploting PnL\n ax2.clear()\n ax2.plot(xdata, pnl)\n ax2.set_ylabel('Total PnL')\n ax2.set_xlabel('Iteration')\n\n # Plotting current probabilities to fill\n q_a = FILL_PROBS[state[1]]\n q_b = 1 - q_a\n performance = [q_b, q_a]\n\n ax3.clear()\n ax3.bar([0, 1], [q_b, q_a], align='center', alpha=0.5, \n color=['orangered','lawngreen'])\n ax3.set_xticks([0, 1])\n ax3.set_xticklabels(['bid', 'ask'])\n ax3.set_title('Probability of fill')\n ax3.set_ylim([0, 1])\n fig.tight_layout()\n fig.canvas.draw()\n time.sleep(max(0, 0.5 - (time.time() - start_time)))\n \n except KeyboardInterrupt:\n print('Animation stopped')\n break", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
e7197349cc417f91ea930a72466d1458ea811cbd
8,877
ipynb
Jupyter Notebook
GeneralExemplars/GeoExemplars/Ridgemap_Notebook.ipynb
gaybro8777/Exemplars2020
0075e300b5ec671d11a875023f28359009cfeb35
[ "BSD-3-Clause" ]
2
2021-01-11T01:58:26.000Z
2021-06-19T19:49:47.000Z
GeneralExemplars/GeoExemplars/Ridgemap_Notebook.ipynb
gaybro8777/Exemplars2020
0075e300b5ec671d11a875023f28359009cfeb35
[ "BSD-3-Clause" ]
null
null
null
GeneralExemplars/GeoExemplars/Ridgemap_Notebook.ipynb
gaybro8777/Exemplars2020
0075e300b5ec671d11a875023f28359009cfeb35
[ "BSD-3-Clause" ]
3
2020-07-24T15:56:43.000Z
2022-03-09T10:18:26.000Z
44.833333
961
0.639518
[ [ [ "## This Notebook - Goals - FOR EDINA\n\n**IMPORTANT NOTE:**<br>\nThis notebook currently doesn't run properly as there have been changes to the path to the elevation data. This resulted in an HTTP error which appears when the library tries to fetch the data. The developers of the library have been notified so hopefully the bug will soon be fixed.\n\n**What?:** <br>\n- Introduction/tutorial to <code>ridge_map</code>, a geospatial python library\n- Illustate the main features of this library when visualizing elevation data\n\n**Who?:** <br>\n- Academics and students in geosciences and other STEM degrees\n- Geophysical Data Science course\n- USers interested in geospatial data analysis and elevation data\n \n**Why?:** <br>\n- Tutorial/guide to plot elevation data\n\n**Noteable features to exploit:** <br>\n- Use of pre-installed libraries\n\n**How?:** <br>\n- Effective use of core libraries in the Geospatial Notebook\n- Using pre-included data\n- Clear visualizations - concise explanations\n<hr>", "_____no_output_____" ], [ "# Plotting elevation data using ridge_map\n\n[Ridge_map](https://github.com/ColCarroll/ridge_map) is a python library aimed at creating ridge maps of elevaton data which comes from NASA's Shuttle Radar Topography Mission (SRTM), high resolution topographic data collected in 2000 and released in 2015. SRTM data are sampled at a resolution of 1 arc-second (about 30 meters) and provided to <code>ridge_map</code> via the python package SRTM.py, which is licensed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) <br>\n\nThis Notebook illustrates how to create ridge maps of Great Britain and the Lake District. Great Britain was chosen to show the large scale maps this library can achieve in comparison to others where the data is split into tiles. This usually results in many separate smaller files that need to be pre-processed. Substantially larger polygons can be selected, although SRTM is not available for latitudes outside of the range N 60° - S 60°. The Lake District was chosen so that the image can be reasonably compared to maps plotted using other libraries, which can be viewed in the Exemplars on <code>Rasterio</code> and <code>EarthPy</code>. Of course, you can select a bounding box of your area of interest, for example by obtaining the points of a polygon using [bboxfinder](http://bboxfinder.com/). Once you load in the elevation data from SRTM, you can preprocess it to automatically detect lakes, rivers, and oceans, and scale the elevations. <br>\n\n**Notebook contents:**\n- Importing the necessary libraries\n- Creating custom colourmap to reflect typical terrain colours\n- Ridge map of Great Britain\n- Ridge map of the Lake District\n- Customizing the colourbars of the maps", "_____no_output_____" ] ], [ [ "# Import all the necessary libraries\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom ridge_map import RidgeMap, FontManager\n\n# Hide warning messages\nimport warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "# Set up function to customize the chosen matplotlib colormap\ndef truncate_colormap(cmap, minval, maxval, n=100):\n cmapsample = cmap(np.linspace(minval, maxval, n)) #Sample the original colormap at n points between minval and maxval\n new_cmap = matplotlib.colors.LinearSegmentedColormap.from_list(\n 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),\n cmapsample) #Create new colormap using the range that was sampled in cmapsample\n return new_cmap #Return new colormap that can be called upon with get_cmap", "_____no_output_____" ], [ "# Identify the points of the polygon of Great Britain from http://bboxfinder.com/\npolygon = (-8.657227,49.761777,2.241211,59.625348)\n\n# Set font manager (otherwise error in ridge_map)\nfont = FontManager('https://github.com/google/fonts/blob/main/ofl/amaranth/Amaranth-Regular.ttf?raw=true')\n\n# Get the elevation values for each data point within the polygon\nrm = RidgeMap(polygon, font=font.prop)\nvalues = rm.get_elevation_data(num_lines=220, elevation_pts=550)\n\n# Specify the colormap segment for this example - range from deep green to darkbrown\ncmap = plt.get_cmap('terrain') # Colormap ranging darkblue-green-yellow-brown-white\nnew_cmap = truncate_colormap(cmap, 0.25, 0.8) # Cutting out darkblue and white ends\n\n# Plot the ridge map\nfig = plt.figure(figsize=(10, 10))\nax = fig.add_subplot(1, 1, 1)\nrm.plot_map(values=rm.preprocess(values=values, lake_flatness=4, water_ntile=30, vertical_ratio=70),\n ax=ax,\n label='Great Britain', #Label the map with the name of the area\n label_x=0.15, #Set location of label\n label_size=20, #Set size of the label\n linewidth=2, #Set the thickness of the lines\n line_color = plt.get_cmap(new_cmap), #Set colormap of the lines\n kind='elevation')\n\n# Add colorbar and customize the map\nnorm = matplotlib.colors.Normalize(vmin=np.nanmin(values), vmax=np.nanmax(values))\nsm = plt.cm.ScalarMappable(norm = norm, cmap=new_cmap)\nplt.colorbar(sm).set_label(label = 'Elevation above sea level (m)', size=15)\n\nplt.title(\"Ridge map of Great Britain\", size=20)\nplt.show()", "_____no_output_____" ], [ "#Set up the figure\nfig, ax = plt.subplots(figsize=(15, 10))\n\n# Identify the points of the polygon of the Lake District\npolygon = (-3.433415,54.197751,-2.678271,54.648501)\n\n# Set font manager (otherwise error in ridge_map)\nfont = FontManager('https://github.com/google/fonts/blob/main/ofl/amaranth/Amaranth-Regular.ttf?raw=true')\n\n# Get the elevation values for each data point within the polygon\nrm = RidgeMap(polygon, font=font.prop)\nvalues = rm.get_elevation_data(num_lines=220, elevation_pts=550)\n\n# Specify the colormap segment for this example - range from deep green to darkbrown\ncmap = plt.get_cmap('terrain') # Colormap ranging darkblue-green-yellow-brown-white\nnew_cmap = truncate_colormap(cmap, 0.25, 0.8) # Cutting out darkblue and white ends\n\n# Plot the ridge map\nrm.plot_map(values=rm.preprocess(values=values, lake_flatness=4, water_ntile=25, vertical_ratio=100),\n label='Lake District', #Labelling the map with the name of the area\n label_x=0.7, #Set location of label\n label_size=20, #Set size of the label\n linewidth=2, #Set the thickness of the lines\n line_color = plt.get_cmap(new_cmap), #Set colormap of the lines\n kind='elevation', ax=ax)\n\n# Add colorbar and customize the map\nnorm = matplotlib.colors.Normalize(vmin=np.nanmin(values), vmax=np.nanmax(values))\nsm = plt.cm.ScalarMappable(norm = norm, cmap=new_cmap)\nplt.colorbar(sm).set_label(label = 'Elevation above sea level (m)', size=15)\n\nplt.title(\"Ridge map of the Lake District\", size=20)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code" ] ]
e71974f2f0676511a1da7046c78c4460b10a3359
704,863
ipynb
Jupyter Notebook
transportation.ipynb
dbogatic/economic-indicators
5d507afa3296466570429de10c157228aeda5104
[ "MIT" ]
null
null
null
transportation.ipynb
dbogatic/economic-indicators
5d507afa3296466570429de10c157228aeda5104
[ "MIT" ]
null
null
null
transportation.ipynb
dbogatic/economic-indicators
5d507afa3296466570429de10c157228aeda5104
[ "MIT" ]
null
null
null
3,175.058559
257,553
0.784856
[ [ [ "import yfinance as yf\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n%matplotlib inline", "_____no_output_____" ], [ "#Pull IYT (Transportation ETF) data\nticker = \"IYT\" \nstart= \"1990-01-01\" \nend= \"2021-05-14\"\n\ntransport = yf.download(ticker, start=start, end=end,progress=False)\ntransport = transport.asfreq(freq='D').ffill().dropna()\ntransport.to_csv('resources/transport.csv')\ntransport.head()", "_____no_output_____" ], [ "#Plot IYT historical prices\ntransport_plot = transport['Adj Close'].plot(figsize=(12,8), title='IYT (Transportation ETF)', grid=True)", "_____no_output_____" ], [ "# Pull S&P Index historical values from yfinance API\nticker = \"^GSPC\" \nstart= \"2004-01-02\" \nend= \"2021-05-14\"\n\nsp_history = yf.download(ticker, start=start, end=end, progress=False)\nsp_history = sp_history.asfreq(freq='D').ffill().dropna()\nsp_history.to_csv('resources/sp_history.csv')\nsp_history.head()", "_____no_output_____" ], [ "#Plot S&P Index historical data\nsp_plot = sp_history['Adj Close'].plot(figsize=(12,8), title='S&P Historical', grid=True)", "_____no_output_____" ], [ "#Calculate IYT percent change and 200-day rolling average\ntransport_percent_change = transport['Adj Close'].pct_change()\ntransport_percent_change_rolling = transport_percent_change.rolling(window=200).mean()", "_____no_output_____" ], [ "#Plot IYT percent change 200-day rolling average vs S&P 500 index to identify % change below and above y value of 0 \nfig, ax = plt.subplots(figsize=(12,8))\nplt.plot(sp_history['Adj Close'], label ='S&P 500', color='r')\nax.set_ylabel('S&P Index')\nplt.grid()\nax.legend(loc='lower right')\n\nax2 = ax.twinx()\nplt.plot(transport_percent_change_rolling, label='IYT % Change', color='b')\nax.set_title('S&P 500 vs IYT % Change')\nax2.set_ylabel('IYT % Change')\nax2.axhline(y=0)\nax2.legend(loc='upper left')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
e7197cc9126bae7c7bbfed67f23e1fa0183342f3
262,843
ipynb
Jupyter Notebook
notebooks/ensemble_arabidopsis_DL.ipynb
Harshs27/ensembleGRN
3cb60a149b95f3ccbeca8e047b02765dcaa21160
[ "MIT" ]
null
null
null
notebooks/ensemble_arabidopsis_DL.ipynb
Harshs27/ensembleGRN
3cb60a149b95f3ccbeca8e047b02765dcaa21160
[ "MIT" ]
null
null
null
notebooks/ensemble_arabidopsis_DL.ipynb
Harshs27/ensembleGRN
3cb60a149b95f3ccbeca8e047b02765dcaa21160
[ "MIT" ]
null
null
null
458.713787
240,460
0.926675
[ [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pickle\nimport torch\nimport torch.nn as nn\nimport copy\nimport pprint as pp\nimport collections, operator", "_____no_output_____" ], [ "from sklearn import svm\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import MinMaxScaler\n#from sklearn.neighbors.nca import NeighborhoodComponentsAnalysis\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.manifold import TSNE\n\nfrom sklearn import model_selection\nfrom sklearn.model_selection import KFold\nimport xgboost as xgb\nimport random\n# from imblearn.over_sampling import SMOTE\nfrom collections import Counter\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom time import time\nfrom sklearn import manifold\nfrom matplotlib.ticker import NullFormatter\nimport operator, itertools \nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nimport warnings\nwarnings.filterwarnings('ignore')\n%matplotlib inline\n# Harsh: Use Python[pytorch] kernel <others, please ignore this line>", "_____no_output_____" ] ], [ [ "### Considering on dummy data ", "_____no_output_____" ] ], [ [ "# Input data : \n# X = num_edges x num_methods, \n# y = predictions (I - 0/1 values; II - 0/1/2/3 hop connections)\n\nE = 10000 # number of edges\nM = 6 # number of methods\nX = np.random.rand(E, M)\ny = np.random.choice([0, 1], size=E, p=[.9, .1]) # percentage labels", "_____no_output_____" ] ], [ [ "## Visualizing the data\nSometimes it can be helpful, so why not!", "_____no_output_____" ] ], [ [ "def visual2D(X, color):\n Axes3D\n n_points = len(color)\n S = 30 # point size for figures\n\n n_neighbors = 10\n n_components = 2\n\n # fig = plt.figure(figsize=(15, 8))\n fig = plt.figure(figsize=(20, 8))\n plt.suptitle(\"2D projection with %i points, %i neighbors\"\n % (n_points, n_neighbors), fontsize=14)\n\n methods = ['standard', 'ltsa', 'hessian', 'modified']\n labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']\n\n for i, method in enumerate(methods):\n t0 = time()\n Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,\n eigen_solver='dense',\n method=method).fit_transform(X)\n t1 = time()\n print(\"%s: %.2g sec\" % (methods[i], t1 - t0))\n\n ax = fig.add_subplot(252 + i)\n plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral, s=S)\n plt.title(\"%s (%.2g sec)\" % (labels[i], t1 - t0))\n ax.xaxis.set_major_formatter(NullFormatter())\n ax.yaxis.set_major_formatter(NullFormatter())\n plt.axis('tight')\n\n t0 = time()\n Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)\n t1 = time()\n print(\"Isomap: %.2g sec\" % (t1 - t0))\n ax = fig.add_subplot(257)\n plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral, s=S)\n plt.title(\"Isomap (%.2g sec)\" % (t1 - t0))\n ax.xaxis.set_major_formatter(NullFormatter())\n ax.yaxis.set_major_formatter(NullFormatter())\n plt.axis('tight')\n\n\n t0 = time()\n mds = manifold.MDS(n_components, max_iter=100, n_init=1)\n Y = mds.fit_transform(X)\n t1 = time()\n print(\"MDS: %.2g sec\" % (t1 - t0))\n ax = fig.add_subplot(258)\n plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral, s=S)\n plt.title(\"MDS (%.2g sec)\" % (t1 - t0))\n ax.xaxis.set_major_formatter(NullFormatter())\n ax.yaxis.set_major_formatter(NullFormatter())\n plt.axis('tight')\n\n\n t0 = time()\n se = manifold.SpectralEmbedding(n_components=n_components,\n n_neighbors=n_neighbors)\n Y = se.fit_transform(X)\n t1 = time()\n print(\"SpectralEmbedding: %.2g sec\" % (t1 - t0))\n ax = fig.add_subplot(259)\n plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral, s=S)\n plt.title(\"SpectralEmbedding (%.2g sec)\" % (t1 - t0))\n ax.xaxis.set_major_formatter(NullFormatter())\n ax.yaxis.set_major_formatter(NullFormatter())\n plt.axis('tight')\n\n t0 = time()\n tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=5)\n Y = tsne.fit_transform(X)\n t1 = time()\n print(\"t-SNE: %.2g sec\" % (t1 - t0))\n ax = fig.add_subplot(2, 5, 10)\n plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral, s=S)\n plt.title(\"t-SNE (%.2g sec)\" % (t1 - t0))\n ax.xaxis.set_major_formatter(NullFormatter())\n ax.yaxis.set_major_formatter(NullFormatter())\n plt.axis('tight')\n\n plt.show()", "_____no_output_____" ], [ "# randomly choose points for visualization\nvizPts = 500\nvisual2D(X[:vizPts, :], y[:vizPts])", "standard: 0.052 sec\nltsa: 0.082 sec\nhessian: 0.13 sec\nmodified: 0.097 sec\nIsomap: 0.18 sec\nMDS: 0.5 sec\nSpectralEmbedding: 0.032 sec\nt-SNE: 2.2 sec\n" ] ], [ [ "## Binary Classification using basic NN", "_____no_output_____" ], [ "### Basic NN model", "_____no_output_____" ] ], [ [ "# defining the model class\nclass DNN_model(torch.nn.Module): # \n def __init__(self, nF, H, USE_CUDA=False): # initializing all the weights here\n super(DNN_model, self).__init__() # initializing the nn.module\n self.USE_CUDA = USE_CUDA\n if USE_CUDA == False:\n self.dtype = torch.FloatTensor\n else: # shift to GPU\n print('shifting to cuda')\n self.dtype = torch.cuda.FloatTensor\n \n self.nF = nF # number of input features \n self.H = H # hidden layer size\n \n self.DNN = self.fDNN()\n\n def fDNN(self):\n l1 = nn.Linear(self.nF, self.H).type(self.dtype)\n lH1 = nn.Linear(self.H, self.H).type(self.dtype)\n lF = nn.Linear(self.H, 1).type(self.dtype)\n return nn.Sequential(l1, nn.ReLU(),\n lH1, nn.ReLU(),\n lF).type(self.dtype)\n #l2, nn.Sigmoid()).type(self.dtype)\n\n def forward(self, X): # model forward\n out = self.DNN(X)\n return out # B x 1", "_____no_output_____" ], [ "# helper functions \nUSE_CUDA = False\ndef convert_to_torch(data, TESTING_FLAG=False, USE_CUDA=USE_CUDA):# convert from numpy to torch variable \n if USE_CUDA == False:\n data = torch.from_numpy(data.astype(np.float, copy=False)).type(torch.FloatTensor)\n if TESTING_FLAG == True:\n data.requires_grad = False\n else: # On GPU\n if TESTING_FLAG == False:\n data = torch.from_numpy(data.astype(np.float, copy=False)).type(torch.FloatTensor).cuda()\n else: # testing phase, no need to store the data on the GPU\n data = torch.from_numpy(data.astype(np.float, copy=False)).type(torch.FloatTensor).cuda()\n data.requires_grad = False\n return data\n\ndef get_squared_error(y, scores):\n y = np.array(y)\n #scores = np.array(scores)\n y = y.reshape(-1)\n scores = np.array(scores).reshape(-1)\n #print(y,scores)\n mse = np.mean((y - scores)**2)\n # get r2 score\n r2 = 1-len(y)*mse/np.sum(y**2)\n return [mse, r2]\n\ndef getOptimizers(model, params):\n # choose the list of model params to be optimized\n if args_optimizer == 'adam':\n optimizer = torch.optim.Adam(params, lr=args_lr, eps=1e-08,\n betas=(0.9, 0.999), weight_decay=args_wdecay)\n elif args.optimizer == 'sgd':\n optimizer = torch.optim.SGD(params, lr=args_lr, weight_decay=args_wdecay)\n elif args.optimizer == 'adadelta':\n optimizer = torch.optim.Adadelta(params, lr=args_lr, rho=0.9, eps=1e-06, weight_decay=args_wdecay)\n elif args.optimizer == 'asgd':\n optimizer = torch.optim.ASGD(params, lr=args_lr, alpha=0.75, weight_decay=args_wdecay, lambd=0.0001)\n elif args.optimizer == 'rms':\n optimizer = torch.optim.RMSprop(params, lr=args_lr, alpha=0.99, \n eps=1e-08, weight_decay=args_wdecay, momentum=0.25, centered=False) \n\n else: sys.exit('Optimizer not found!')\n return optimizer\n\ndef prepare_NN_data(df):\n # df = T x nF\n y = np.array(df['y'])\n X = np.array(df[[c for c in df.columns if c not in ['y']]])\n #X, y = prepare_sequences(X, y)\n X = convert_to_torch(X, TESTING_FLAG=True) # -1 x seqL x nF\n y = convert_to_torch(y, TESTING_FLAG=True) # -1 x 1\n return X, y\n\ndef classifier_train(df_train, df_valid):\n #print('data: ', df_train, df_valid)\n Xtrain, ytrain = prepare_NN_data(df_train)\n Xvalid, yvalid = prepare_NN_data(df_valid)\n print(Xtrain.shape, ytrain.shape)\n \n _, nF = Xtrain.shape\n print('Initializing and training the model for epochs: ', EPOCHS, nF)\n # initialize the model\n model = DNN_model(nF=nF, H=H, USE_CUDA=USE_CUDA)#***************************MODEl\n \n for n, p in model.named_parameters():\n print(n, p.shape)\n optimizer = getOptimizers(model, model.parameters())\n \n best_valid_mse = np.inf\n for epoch in range(EPOCHS):\n # start a batch\n num_batch_EPOCHS = int(Xtrain.shape[0]/args_BATCHSIZE) + 1\n print('num_batch_EPOCHS: ', num_batch_EPOCHS, Xtrain.shape)\n for batch in range(num_batch_EPOCHS):\n print('batch: ', batch, ' out of ', num_batch_EPOCHS)\n rows = np.random.choice(Xtrain.shape[0], args_BATCHSIZE)\n Xbatch, ybatch = Xtrain[rows, :], ytrain[rows]\n optimizer.zero_grad()\n ypred = model.forward(Xbatch)\n ypred = ypred.reshape(-1)\n loss_diff_cents = criterionMSE(ypred, ybatch)\n loss = loss_diff_cents\n loss.backward() # calculate the gradients\n optimizer.step() # update the weights\n #if epoch%PRINT_EVERY_EPOCHS==0:\n print('epoch: ', epoch, ' batch', batch, ' loss: ', loss.detach().numpy())\n #if epoch%VALID_EVERY_EPOCHS==0 or epoch==EPOCHS-1:\n valid_pred_array, valid_loss_mse = classifier_test([Xvalid, yvalid], model, FROM_TRAIN=True)\n model.train()\n print('epoch: ', epoch, ' batch', batch, ' **********valid loss: ', valid_loss_mse)\n if valid_loss_mse[0] < best_valid_mse:\n print('********************* updated best valid model : ', valid_loss_mse)\n best_valid_mse = valid_loss_mse[0]\n best_valid_model = copy.deepcopy(model)\n return best_valid_model #model\n\n\ndef classifier_test(df, model, FROM_TRAIN=False):\n if FROM_TRAIN:\n X, y = df\n else:\n X, y = prepare_NN_data(df)\n with torch.no_grad():\n model.eval()\n ypred = model.forward(X)\n ypred = ypred.reshape(-1)\n loss = criterionMSE(ypred, y)\n mse = loss.detach().numpy()\n ypred = ypred.detach().numpy()\n #print('MSE from lstm: ', mse)\n eval_metrics = get_squared_error(y, ypred) # mse, r2\n return ypred, eval_metrics\n\ndef NN_classifier(df, df_test=[]):\n total_points = df.shape[0]\n train_points = int(0.6*total_points) # train = 60%\n #train_points = int(0.95*total_points) \n print('train points = ', train_points, ' valid points = ', total_points-train_points, \n 'test points = ', df_test.shape[0])\n \n df_train, df_valid = df.iloc[:train_points], df.iloc[train_points:] \n ytrain, yvalid = df['y'].iloc[:train_points], df['y'].iloc[train_points:]\n ytest = df_test['y'].iloc[:]\n \n print('--------------------------------------------------------------')\n print('Calling the classifier to train')\n model = classifier_train(df_train, df_valid)\n print('Analysing the train data predictions ***********************************')\n train_pred_array, train_mse = classifier_test(df_train, model)\n print('MSE of train data: ', train_mse[0], ' r2 score: ', train_mse[1])\n\n valid_pred_array, valid_mse = classifier_test(df_valid, model)\n print('MSE of valid data: ', valid_mse[0], ' r2 score: ', valid_mse[1])\n \n print('Analysing the test data predictions ***********************************')\n test_pred_array, test_mse = classifier_test(df_test, model)\n print('MSE of test data: ', test_mse[0], ' r2 score: ', test_mse[1])\n\n print('\\nMSE of train = ', train_mse[0], ' valid = ', valid_mse[0], ' test = ', test_mse[0])\n print('\\nR2 scores of train = ', train_mse[1], ' valid = ', valid_mse[1], ' test = ', test_mse[1])\n return model # test_pred_array\n\n\n\ndef main_modelNN(df_data):\n print('all columns : ', df_data.columns, len(df_data.columns))#, df_data)#\n \n ss = int(df_data.shape[0] * 0.70) # 70% train\n #ss = int(df_data.shape[0] * 0.95)\n print('division of samples: ', ss)\n df_train = df_data[0:ss] \n df_test = df_data[ss:]\n # Doing train/valid/test using the XGBoost model\n model_NN = NN_classifier(df_train, df_test)\n \n y_target = np.array(df_test['y'])\n print('All zeros baseline for test: ', np.mean(y_target**2))\n return [model_NN, df_data.columns]", "_____no_output_____" ], [ "# convert data to proper format\nnp.random.seed(15)\nrandom.seed(15)\ndf_X = pd.DataFrame(X, columns=['m'+str(i) for i in range(6)])\ndf_y = pd.DataFrame(pd.Series(y, name='y'))\ndf_data = pd.concat([df_X, df_y], axis=1)\n\n# Global parameters\nH=20 #20 #20 # hidden size\nargs_lr = 0.01 # 0.01 # Learning rate\nargs_optimizer = 'adam' # 'sgd' Optimizer\nargs_wdecay = 1.2e-6\nUSE_CUDA = False\nEPOCHS = 1\n# PRINT_EVERY_EPOCHS=100\n# VALID_EVERY_EPOCHS=100\nargs_BATCHSIZE=500\ncriterionMSE = nn.MSELoss()\n\nmodel_NN = main_modelNN(df_data)", "all columns : Index(['m0', 'm1', 'm2', 'm3', 'm4', 'm5', 'y'], dtype='object') 7\ndivision of samples: 7000\ntrain points = 4200 valid points = 2800 test points = 3000\n--------------------------------------------------------------\nCalling the classifier to train\ntorch.Size([4200, 6]) torch.Size([4200])\nInitializing and training the model for epochs: 1 6\nDNN.0.weight torch.Size([20, 6])\nDNN.0.bias torch.Size([20])\nDNN.2.weight torch.Size([20, 20])\nDNN.2.bias torch.Size([20])\nDNN.4.weight torch.Size([1, 20])\nDNN.4.bias torch.Size([1])\nnum_batch_EPOCHS: 9 torch.Size([4200, 6])\nbatch: 0 out of 9\nepoch: 0 batch 0 loss: 0.101127066\nepoch: 0 batch 0 **********valid loss: [0.09711709, 0.049203327068915725]\n********************* updated best valid model : [0.09711709, 0.049203327068915725]\nbatch: 1 out of 9\nepoch: 0 batch 1 loss: 0.10257368\nepoch: 0 batch 1 **********valid loss: [0.092334986, 0.09602111869758656]\n********************* updated best valid model : [0.092334986, 0.09602111869758656]\nbatch: 2 out of 9\nepoch: 0 batch 2 loss: 0.0831086\nepoch: 0 batch 2 **********valid loss: [0.092484415, 0.09455817896169383]\nbatch: 3 out of 9\nepoch: 0 batch 3 loss: 0.08886457\nepoch: 0 batch 3 **********valid loss: [0.09486752, 0.07122707658714345]\nbatch: 4 out of 9\nepoch: 0 batch 4 loss: 0.10549411\nepoch: 0 batch 4 **********valid loss: [0.09571181, 0.0629612774282069]\nbatch: 5 out of 9\nepoch: 0 batch 5 loss: 0.09690737\nepoch: 0 batch 5 **********valid loss: [0.09517613, 0.06820571505940043]\nbatch: 6 out of 9\nepoch: 0 batch 6 loss: 0.1070632\nepoch: 0 batch 6 **********valid loss: [0.09394465, 0.08026220289977282]\nbatch: 7 out of 9\nepoch: 0 batch 7 loss: 0.086066425\nepoch: 0 batch 7 **********valid loss: [0.09288512, 0.09063517213701366]\nbatch: 8 out of 9\nepoch: 0 batch 8 loss: 0.072324194\nepoch: 0 batch 8 **********valid loss: [0.092243284, 0.09691889802892728]\n********************* updated best valid model : [0.092243284, 0.09691889802892728]\nAnalysing the train data predictions ***********************************\nMSE of train data: 0.08766631 r2 score: 0.09533537210352006\nMSE of valid data: 0.092243284 r2 score: 0.09691889802892728\nAnalysing the test data predictions ***********************************\nMSE of test data: 0.089236125 r2 score: 0.09557981064190735\n\nMSE of train = 0.08766631 valid = 0.092243284 test = 0.089236125\n\nR2 scores of train = 0.09533537210352006 valid = 0.09691889802892728 test = 0.09557981064190735\nAll zeros baseline for test: 0.09866666666666667\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
e7197ff02a9e06a81ff77a512fe52f15aa64c56b
5,067
ipynb
Jupyter Notebook
04_Python_Functions_examples/013_find_the_factorial_of_number_using_recursion.ipynb
peterennis/90_Python_Examples
e2a5a4772ab47d8100b6f13713ea3bc9a25a1ee2
[ "MIT" ]
70
2021-07-02T07:56:45.000Z
2022-03-19T04:13:31.000Z
04_Python_Functions_examples/013_find_the_factorial_of_number_using_recursion.ipynb
bbeella/90_Python_Examples
fbbb1f484b676648881f4287e8175ce9f6224a5a
[ "MIT" ]
null
null
null
04_Python_Functions_examples/013_find_the_factorial_of_number_using_recursion.ipynb
bbeella/90_Python_Examples
fbbb1f484b676648881f4287e8175ce9f6224a5a
[ "MIT" ]
51
2021-10-30T10:16:28.000Z
2022-03-19T04:11:05.000Z
28.466292
242
0.581212
[ [ [ "<small><small><i>\nAll the IPython Notebooks in this **Python Examples** series by Dr. Milaan Parmar are available @ **[GitHub](https://github.com/milaan9/90_Python_Examples)**\n</i></small></small>", "_____no_output_____" ], [ "# Python Program to Find Factorial of Number Using Recursion\n\nIn this example, you'll learn to find the factorial of a number using recursive function.\n\nTo understand this example, you should have the knowledge of the following **[Python programming](https://github.com/milaan9/01_Python_Introduction/blob/main/000_Intro_to_Python.ipynb)** topics:\n\n* **[Python if-else Statement](https://github.com/milaan9/03_Python_Flow_Control/blob/main/002_Python_if_else_statement.ipynb)**\n* **[Python Functions](https://github.com/milaan9/04_Python_Functions/blob/main/001_Python_Functions.ipynb)**\n* **[Python Recursion](https://github.com/milaan9/04_Python_Functions/blob/main/005_Python_Function_Recursion.ipynb)**", "_____no_output_____" ], [ "The factorial of a number is the product of all the integers from 1 to that number.\n\nFor example, the factorial of 6 is **`1*2*3*4*5*6 = 720`**. Factorial is not defined for negative numbers and the factorial of zero is one, **`0! = 1`**.", "_____no_output_____" ] ], [ [ "# Example 1: Factorial of a number using recursion\n\ndef recur_factorial(n):\n if n == 1:\n return n\n else:\n return n*recur_factorial(n-1)\n\nnum = 6\n\n# check if the number is negative\nif num < 0:\n print(\"Sorry, factorial does not exist for negative numbers\")\nelif num == 0:\n print(\"The factorial of 0 is 1\")\nelse:\n print(\"The factorial of\", num, \"is:\", recur_factorial(num))\n \n'''\n>>Output/Runtime Test Cases:\n\nThe factorial of 6 is: 720\n'''", "The factorial of 6 is: 720\n" ] ], [ [ ">**Note:** To find the factorial of another number, change the value of **`num`**.\n\n**Explanation:**\n\nHere, the number is stored in **`num`**. The number is passed to the **`recur_factorial()`** function to compute the factorial of the number.\n\nVisit here to know more about **[recursion in Python](https://github.com/milaan9/04_Python_Functions/blob/main/005_Python_Function_Recursion.ipynb)**.\n\nYou can also solve this problem without recursion: **[Python program to find the factorial of a number](https://github.com/milaan9/90_Python_Examples/blob/main/03_Python_Flow_Control_examples/007_find_the_factorial_of_a_number.ipynb)**.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
e7198cb46a0ff6f3ee95d3b4d919079675c23f4d
136,299
ipynb
Jupyter Notebook
proj/python/.ipynb_checkpoints/img_test_model-checkpoint.ipynb
KyungWan/DINF
511539f53de405bc4b9f5870f099861cb97ace77
[ "MIT" ]
null
null
null
proj/python/.ipynb_checkpoints/img_test_model-checkpoint.ipynb
KyungWan/DINF
511539f53de405bc4b9f5870f099861cb97ace77
[ "MIT" ]
null
null
null
proj/python/.ipynb_checkpoints/img_test_model-checkpoint.ipynb
KyungWan/DINF
511539f53de405bc4b9f5870f099861cb97ace77
[ "MIT" ]
null
null
null
154.885227
77,652
0.8322
[ [ [ "import numpy as np", "_____no_output_____" ], [ "# 윈도우 = keras 앞부분에 tensorflow를 붙여줘야한다.\n# 우분투 = 앞에 붙일 필요 없이 keras만 작성해도 된다.\nfrom tensorflow.keras.applications.vgg16 import VGG16\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.applications.vgg16 import preprocess_input", "_____no_output_____" ], [ "classifier = VGG16()", "_____no_output_____" ], [ "# 마지막 레이어를 제거하여 추가적인 네트워크 구성이 가능하게 만든다.\nlast_layer = str(classifier.layers[-1])", "_____no_output_____" ], [ "# 윈도우는 앞에 tensorflow. 붙이기 / 우분투는 tensorflow. 없어도 된다.\nimport tensorflow.keras\n\nfrom tensorflow.keras.layers import Dense", "_____no_output_____" ], [ "# 추가할 신규 레이어를 작성하는 파트\nnew_layer = tensorflow.keras.Sequential()\n\nfor layer in classifier.layers:\n if str(layer) != last_layer:\n new_layer.add(layer)\n \nnew_layer.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nblock1_conv1 (Conv2D) (None, 224, 224, 64) 1792 \n_________________________________________________________________\nblock1_conv2 (Conv2D) (None, 224, 224, 64) 36928 \n_________________________________________________________________\nblock1_pool (MaxPooling2D) (None, 112, 112, 64) 0 \n_________________________________________________________________\nblock2_conv1 (Conv2D) (None, 112, 112, 128) 73856 \n_________________________________________________________________\nblock2_conv2 (Conv2D) (None, 112, 112, 128) 147584 \n_________________________________________________________________\nblock2_pool (MaxPooling2D) (None, 56, 56, 128) 0 \n_________________________________________________________________\nblock3_conv1 (Conv2D) (None, 56, 56, 256) 295168 \n_________________________________________________________________\nblock3_conv2 (Conv2D) (None, 56, 56, 256) 590080 \n_________________________________________________________________\nblock3_conv3 (Conv2D) (None, 56, 56, 256) 590080 \n_________________________________________________________________\nblock3_pool (MaxPooling2D) (None, 28, 28, 256) 0 \n_________________________________________________________________\nblock4_conv1 (Conv2D) (None, 28, 28, 512) 1180160 \n_________________________________________________________________\nblock4_conv2 (Conv2D) (None, 28, 28, 512) 2359808 \n_________________________________________________________________\nblock4_conv3 (Conv2D) (None, 28, 28, 512) 2359808 \n_________________________________________________________________\nblock4_pool (MaxPooling2D) (None, 14, 14, 512) 0 \n_________________________________________________________________\nblock5_conv1 (Conv2D) (None, 14, 14, 512) 2359808 \n_________________________________________________________________\nblock5_conv2 (Conv2D) (None, 14, 14, 512) 2359808 \n_________________________________________________________________\nblock5_conv3 (Conv2D) (None, 14, 14, 512) 2359808 \n_________________________________________________________________\nblock5_pool (MaxPooling2D) (None, 7, 7, 512) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 25088) 0 \n_________________________________________________________________\nfc1 (Dense) (None, 4096) 102764544 \n_________________________________________________________________\nfc2 (Dense) (None, 4096) 16781312 \n=================================================================\nTotal params: 134,260,544\nTrainable params: 134,260,544\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "# 네트워크를 동결시키도록 한다.\nfor layer in new_layer.layers:\n layer.trainable = False", "_____no_output_____" ], [ "new_layer.add(\n Dense(256, activation = 'relu')\n)\nnew_layer.add(\n Dense(256, activation = 'relu')\n)\nnew_layer.add(\n Dense(256, activation = 'relu')\n)\nnew_layer.add(\n Dense(128, activation = 'relu')\n)\nnew_layer.add(\n Dense(128, activation = 'relu')\n)\nnew_layer.add(\n Dense(128, activation = 'sigmoid')\n)\nnew_layer.add(\n Dense(64, activation = 'relu')\n)\nnew_layer.add(\n Dense(32, activation = 'relu')\n)\nnew_layer.add(\n Dense(16, activation = 'relu')\n)\nnew_layer.add(\n Dense(8, activation = 'sigmoid')\n)\nnew_layer.add(\n Dense(1, activation = 'sigmoid')\n)\n\nnew_layer.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nblock1_conv1 (Conv2D) (None, 224, 224, 64) 1792 \n_________________________________________________________________\nblock1_conv2 (Conv2D) (None, 224, 224, 64) 36928 \n_________________________________________________________________\nblock1_pool (MaxPooling2D) (None, 112, 112, 64) 0 \n_________________________________________________________________\nblock2_conv1 (Conv2D) (None, 112, 112, 128) 73856 \n_________________________________________________________________\nblock2_conv2 (Conv2D) (None, 112, 112, 128) 147584 \n_________________________________________________________________\nblock2_pool (MaxPooling2D) (None, 56, 56, 128) 0 \n_________________________________________________________________\nblock3_conv1 (Conv2D) (None, 56, 56, 256) 295168 \n_________________________________________________________________\nblock3_conv2 (Conv2D) (None, 56, 56, 256) 590080 \n_________________________________________________________________\nblock3_conv3 (Conv2D) (None, 56, 56, 256) 590080 \n_________________________________________________________________\nblock3_pool (MaxPooling2D) (None, 28, 28, 256) 0 \n_________________________________________________________________\nblock4_conv1 (Conv2D) (None, 28, 28, 512) 1180160 \n_________________________________________________________________\nblock4_conv2 (Conv2D) (None, 28, 28, 512) 2359808 \n_________________________________________________________________\nblock4_conv3 (Conv2D) (None, 28, 28, 512) 2359808 \n_________________________________________________________________\nblock4_pool (MaxPooling2D) (None, 14, 14, 512) 0 \n_________________________________________________________________\nblock5_conv1 (Conv2D) (None, 14, 14, 512) 2359808 \n_________________________________________________________________\nblock5_conv2 (Conv2D) (None, 14, 14, 512) 2359808 \n_________________________________________________________________\nblock5_conv3 (Conv2D) (None, 14, 14, 512) 2359808 \n_________________________________________________________________\nblock5_pool (MaxPooling2D) (None, 7, 7, 512) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 25088) 0 \n_________________________________________________________________\nfc1 (Dense) (None, 4096) 102764544 \n_________________________________________________________________\nfc2 (Dense) (None, 4096) 16781312 \n_________________________________________________________________\ndense (Dense) (None, 256) 1048832 \n_________________________________________________________________\ndense_1 (Dense) (None, 256) 65792 \n_________________________________________________________________\ndense_2 (Dense) (None, 256) 65792 \n_________________________________________________________________\ndense_3 (Dense) (None, 128) 32896 \n_________________________________________________________________\ndense_4 (Dense) (None, 128) 16512 \n_________________________________________________________________\ndense_5 (Dense) (None, 128) 16512 \n_________________________________________________________________\ndense_6 (Dense) (None, 64) 8256 \n_________________________________________________________________\ndense_7 (Dense) (None, 32) 2080 \n_________________________________________________________________\ndense_8 (Dense) (None, 16) 528 \n_________________________________________________________________\ndense_9 (Dense) (None, 8) 136 \n_________________________________________________________________\ndense_10 (Dense) (None, 1) 9 \n=================================================================\nTotal params: 135,517,889\nTrainable params: 1,257,345\nNon-trainable params: 134,260,544\n_________________________________________________________________\n" ], [ "new_layer.compile(\n optimizer = 'adam',\n loss = 'binary_crossentropy',\n metrics = ['accuracy']\n)", "_____no_output_____" ], [ "# 윈도우 [tensorflow.] 추가\nfrom tensorflow.keras.applications.resnet50 import ResNet50", "_____no_output_____" ], [ "resModel = ResNet50()\nresModel.summary()", "Model: \"resnet50\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_2 (InputLayer) [(None, 224, 224, 3) 0 \n__________________________________________________________________________________________________\nconv1_pad (ZeroPadding2D) (None, 230, 230, 3) 0 input_2[0][0] \n__________________________________________________________________________________________________\nconv1_conv (Conv2D) (None, 112, 112, 64) 9472 conv1_pad[0][0] \n__________________________________________________________________________________________________\nconv1_bn (BatchNormalization) (None, 112, 112, 64) 256 conv1_conv[0][0] \n__________________________________________________________________________________________________\nconv1_relu (Activation) (None, 112, 112, 64) 0 conv1_bn[0][0] \n__________________________________________________________________________________________________\npool1_pad (ZeroPadding2D) (None, 114, 114, 64) 0 conv1_relu[0][0] \n__________________________________________________________________________________________________\npool1_pool (MaxPooling2D) (None, 56, 56, 64) 0 pool1_pad[0][0] \n__________________________________________________________________________________________________\nconv2_block1_1_conv (Conv2D) (None, 56, 56, 64) 4160 pool1_pool[0][0] \n__________________________________________________________________________________________________\nconv2_block1_1_bn (BatchNormali (None, 56, 56, 64) 256 conv2_block1_1_conv[0][0] \n__________________________________________________________________________________________________\nconv2_block1_1_relu (Activation (None, 56, 56, 64) 0 conv2_block1_1_bn[0][0] \n__________________________________________________________________________________________________\nconv2_block1_2_conv (Conv2D) (None, 56, 56, 64) 36928 conv2_block1_1_relu[0][0] \n__________________________________________________________________________________________________\nconv2_block1_2_bn (BatchNormali (None, 56, 56, 64) 256 conv2_block1_2_conv[0][0] \n__________________________________________________________________________________________________\nconv2_block1_2_relu (Activation (None, 56, 56, 64) 0 conv2_block1_2_bn[0][0] \n__________________________________________________________________________________________________\nconv2_block1_0_conv (Conv2D) (None, 56, 56, 256) 16640 pool1_pool[0][0] \n__________________________________________________________________________________________________\nconv2_block1_3_conv (Conv2D) (None, 56, 56, 256) 16640 conv2_block1_2_relu[0][0] \n__________________________________________________________________________________________________\nconv2_block1_0_bn (BatchNormali (None, 56, 56, 256) 1024 conv2_block1_0_conv[0][0] \n__________________________________________________________________________________________________\nconv2_block1_3_bn (BatchNormali (None, 56, 56, 256) 1024 conv2_block1_3_conv[0][0] \n__________________________________________________________________________________________________\nconv2_block1_add (Add) (None, 56, 56, 256) 0 conv2_block1_0_bn[0][0] \n conv2_block1_3_bn[0][0] \n__________________________________________________________________________________________________\nconv2_block1_out (Activation) (None, 56, 56, 256) 0 conv2_block1_add[0][0] \n__________________________________________________________________________________________________\nconv2_block2_1_conv (Conv2D) (None, 56, 56, 64) 16448 conv2_block1_out[0][0] \n__________________________________________________________________________________________________\nconv2_block2_1_bn (BatchNormali (None, 56, 56, 64) 256 conv2_block2_1_conv[0][0] \n__________________________________________________________________________________________________\nconv2_block2_1_relu (Activation (None, 56, 56, 64) 0 conv2_block2_1_bn[0][0] \n__________________________________________________________________________________________________\nconv2_block2_2_conv (Conv2D) (None, 56, 56, 64) 36928 conv2_block2_1_relu[0][0] \n__________________________________________________________________________________________________\nconv2_block2_2_bn (BatchNormali (None, 56, 56, 64) 256 conv2_block2_2_conv[0][0] \n__________________________________________________________________________________________________\nconv2_block2_2_relu (Activation (None, 56, 56, 64) 0 conv2_block2_2_bn[0][0] \n__________________________________________________________________________________________________\nconv2_block2_3_conv (Conv2D) (None, 56, 56, 256) 16640 conv2_block2_2_relu[0][0] \n__________________________________________________________________________________________________\nconv2_block2_3_bn (BatchNormali (None, 56, 56, 256) 1024 conv2_block2_3_conv[0][0] \n__________________________________________________________________________________________________\nconv2_block2_add (Add) (None, 56, 56, 256) 0 conv2_block1_out[0][0] \n conv2_block2_3_bn[0][0] \n__________________________________________________________________________________________________\nconv2_block2_out (Activation) (None, 56, 56, 256) 0 conv2_block2_add[0][0] \n__________________________________________________________________________________________________\nconv2_block3_1_conv (Conv2D) (None, 56, 56, 64) 16448 conv2_block2_out[0][0] \n__________________________________________________________________________________________________\nconv2_block3_1_bn (BatchNormali (None, 56, 56, 64) 256 conv2_block3_1_conv[0][0] \n__________________________________________________________________________________________________\nconv2_block3_1_relu (Activation (None, 56, 56, 64) 0 conv2_block3_1_bn[0][0] \n__________________________________________________________________________________________________\nconv2_block3_2_conv (Conv2D) (None, 56, 56, 64) 36928 conv2_block3_1_relu[0][0] \n__________________________________________________________________________________________________\nconv2_block3_2_bn (BatchNormali (None, 56, 56, 64) 256 conv2_block3_2_conv[0][0] \n__________________________________________________________________________________________________\nconv2_block3_2_relu (Activation (None, 56, 56, 64) 0 conv2_block3_2_bn[0][0] \n__________________________________________________________________________________________________\nconv2_block3_3_conv (Conv2D) (None, 56, 56, 256) 16640 conv2_block3_2_relu[0][0] \n__________________________________________________________________________________________________\nconv2_block3_3_bn (BatchNormali (None, 56, 56, 256) 1024 conv2_block3_3_conv[0][0] \n__________________________________________________________________________________________________\nconv2_block3_add (Add) (None, 56, 56, 256) 0 conv2_block2_out[0][0] \n conv2_block3_3_bn[0][0] \n__________________________________________________________________________________________________\nconv2_block3_out (Activation) (None, 56, 56, 256) 0 conv2_block3_add[0][0] \n__________________________________________________________________________________________________\nconv3_block1_1_conv (Conv2D) (None, 28, 28, 128) 32896 conv2_block3_out[0][0] \n__________________________________________________________________________________________________\nconv3_block1_1_bn (BatchNormali (None, 28, 28, 128) 512 conv3_block1_1_conv[0][0] \n__________________________________________________________________________________________________\nconv3_block1_1_relu (Activation (None, 28, 28, 128) 0 conv3_block1_1_bn[0][0] \n__________________________________________________________________________________________________\nconv3_block1_2_conv (Conv2D) (None, 28, 28, 128) 147584 conv3_block1_1_relu[0][0] \n__________________________________________________________________________________________________\nconv3_block1_2_bn (BatchNormali (None, 28, 28, 128) 512 conv3_block1_2_conv[0][0] \n__________________________________________________________________________________________________\nconv3_block1_2_relu (Activation (None, 28, 28, 128) 0 conv3_block1_2_bn[0][0] \n__________________________________________________________________________________________________\nconv3_block1_0_conv (Conv2D) (None, 28, 28, 512) 131584 conv2_block3_out[0][0] \n__________________________________________________________________________________________________\nconv3_block1_3_conv (Conv2D) (None, 28, 28, 512) 66048 conv3_block1_2_relu[0][0] \n__________________________________________________________________________________________________\nconv3_block1_0_bn (BatchNormali (None, 28, 28, 512) 2048 conv3_block1_0_conv[0][0] \n__________________________________________________________________________________________________\nconv3_block1_3_bn (BatchNormali (None, 28, 28, 512) 2048 conv3_block1_3_conv[0][0] \n__________________________________________________________________________________________________\nconv3_block1_add (Add) (None, 28, 28, 512) 0 conv3_block1_0_bn[0][0] \n conv3_block1_3_bn[0][0] \n__________________________________________________________________________________________________\nconv3_block1_out (Activation) (None, 28, 28, 512) 0 conv3_block1_add[0][0] \n__________________________________________________________________________________________________\nconv3_block2_1_conv (Conv2D) (None, 28, 28, 128) 65664 conv3_block1_out[0][0] \n__________________________________________________________________________________________________\nconv3_block2_1_bn (BatchNormali (None, 28, 28, 128) 512 conv3_block2_1_conv[0][0] \n__________________________________________________________________________________________________\nconv3_block2_1_relu (Activation (None, 28, 28, 128) 0 conv3_block2_1_bn[0][0] \n__________________________________________________________________________________________________\nconv3_block2_2_conv (Conv2D) (None, 28, 28, 128) 147584 conv3_block2_1_relu[0][0] \n__________________________________________________________________________________________________\nconv3_block2_2_bn (BatchNormali (None, 28, 28, 128) 512 conv3_block2_2_conv[0][0] \n__________________________________________________________________________________________________\nconv3_block2_2_relu (Activation (None, 28, 28, 128) 0 conv3_block2_2_bn[0][0] \n__________________________________________________________________________________________________\nconv3_block2_3_conv (Conv2D) (None, 28, 28, 512) 66048 conv3_block2_2_relu[0][0] \n__________________________________________________________________________________________________\nconv3_block2_3_bn (BatchNormali (None, 28, 28, 512) 2048 conv3_block2_3_conv[0][0] \n__________________________________________________________________________________________________\nconv3_block2_add (Add) (None, 28, 28, 512) 0 conv3_block1_out[0][0] \n conv3_block2_3_bn[0][0] \n__________________________________________________________________________________________________\nconv3_block2_out (Activation) (None, 28, 28, 512) 0 conv3_block2_add[0][0] \n__________________________________________________________________________________________________\nconv3_block3_1_conv (Conv2D) (None, 28, 28, 128) 65664 conv3_block2_out[0][0] \n__________________________________________________________________________________________________\nconv3_block3_1_bn (BatchNormali (None, 28, 28, 128) 512 conv3_block3_1_conv[0][0] \n__________________________________________________________________________________________________\nconv3_block3_1_relu (Activation (None, 28, 28, 128) 0 conv3_block3_1_bn[0][0] \n__________________________________________________________________________________________________\nconv3_block3_2_conv (Conv2D) (None, 28, 28, 128) 147584 conv3_block3_1_relu[0][0] \n__________________________________________________________________________________________________\nconv3_block3_2_bn (BatchNormali (None, 28, 28, 128) 512 conv3_block3_2_conv[0][0] \n__________________________________________________________________________________________________\nconv3_block3_2_relu (Activation (None, 28, 28, 128) 0 conv3_block3_2_bn[0][0] \n__________________________________________________________________________________________________\nconv3_block3_3_conv (Conv2D) (None, 28, 28, 512) 66048 conv3_block3_2_relu[0][0] \n__________________________________________________________________________________________________\nconv3_block3_3_bn (BatchNormali (None, 28, 28, 512) 2048 conv3_block3_3_conv[0][0] \n__________________________________________________________________________________________________\nconv3_block3_add (Add) (None, 28, 28, 512) 0 conv3_block2_out[0][0] \n conv3_block3_3_bn[0][0] \n__________________________________________________________________________________________________\nconv3_block3_out (Activation) (None, 28, 28, 512) 0 conv3_block3_add[0][0] \n__________________________________________________________________________________________________\nconv3_block4_1_conv (Conv2D) (None, 28, 28, 128) 65664 conv3_block3_out[0][0] \n__________________________________________________________________________________________________\nconv3_block4_1_bn (BatchNormali (None, 28, 28, 128) 512 conv3_block4_1_conv[0][0] \n__________________________________________________________________________________________________\nconv3_block4_1_relu (Activation (None, 28, 28, 128) 0 conv3_block4_1_bn[0][0] \n__________________________________________________________________________________________________\nconv3_block4_2_conv (Conv2D) (None, 28, 28, 128) 147584 conv3_block4_1_relu[0][0] \n__________________________________________________________________________________________________\nconv3_block4_2_bn (BatchNormali (None, 28, 28, 128) 512 conv3_block4_2_conv[0][0] \n__________________________________________________________________________________________________\nconv3_block4_2_relu (Activation (None, 28, 28, 128) 0 conv3_block4_2_bn[0][0] \n__________________________________________________________________________________________________\nconv3_block4_3_conv (Conv2D) (None, 28, 28, 512) 66048 conv3_block4_2_relu[0][0] \n__________________________________________________________________________________________________\nconv3_block4_3_bn (BatchNormali (None, 28, 28, 512) 2048 conv3_block4_3_conv[0][0] \n__________________________________________________________________________________________________\nconv3_block4_add (Add) (None, 28, 28, 512) 0 conv3_block3_out[0][0] \n conv3_block4_3_bn[0][0] \n__________________________________________________________________________________________________\nconv3_block4_out (Activation) (None, 28, 28, 512) 0 conv3_block4_add[0][0] \n__________________________________________________________________________________________________\nconv4_block1_1_conv (Conv2D) (None, 14, 14, 256) 131328 conv3_block4_out[0][0] \n__________________________________________________________________________________________________\nconv4_block1_1_bn (BatchNormali (None, 14, 14, 256) 1024 conv4_block1_1_conv[0][0] \n__________________________________________________________________________________________________\nconv4_block1_1_relu (Activation (None, 14, 14, 256) 0 conv4_block1_1_bn[0][0] \n__________________________________________________________________________________________________\nconv4_block1_2_conv (Conv2D) (None, 14, 14, 256) 590080 conv4_block1_1_relu[0][0] \n__________________________________________________________________________________________________\nconv4_block1_2_bn (BatchNormali (None, 14, 14, 256) 1024 conv4_block1_2_conv[0][0] \n__________________________________________________________________________________________________\nconv4_block1_2_relu (Activation (None, 14, 14, 256) 0 conv4_block1_2_bn[0][0] \n__________________________________________________________________________________________________\nconv4_block1_0_conv (Conv2D) (None, 14, 14, 1024) 525312 conv3_block4_out[0][0] \n__________________________________________________________________________________________________\nconv4_block1_3_conv (Conv2D) (None, 14, 14, 1024) 263168 conv4_block1_2_relu[0][0] \n__________________________________________________________________________________________________\nconv4_block1_0_bn (BatchNormali (None, 14, 14, 1024) 4096 conv4_block1_0_conv[0][0] \n__________________________________________________________________________________________________\nconv4_block1_3_bn (BatchNormali (None, 14, 14, 1024) 4096 conv4_block1_3_conv[0][0] \n__________________________________________________________________________________________________\nconv4_block1_add (Add) (None, 14, 14, 1024) 0 conv4_block1_0_bn[0][0] \n conv4_block1_3_bn[0][0] \n__________________________________________________________________________________________________\nconv4_block1_out (Activation) (None, 14, 14, 1024) 0 conv4_block1_add[0][0] \n__________________________________________________________________________________________________\nconv4_block2_1_conv (Conv2D) (None, 14, 14, 256) 262400 conv4_block1_out[0][0] \n__________________________________________________________________________________________________\nconv4_block2_1_bn (BatchNormali (None, 14, 14, 256) 1024 conv4_block2_1_conv[0][0] \n__________________________________________________________________________________________________\nconv4_block2_1_relu (Activation (None, 14, 14, 256) 0 conv4_block2_1_bn[0][0] \n__________________________________________________________________________________________________\nconv4_block2_2_conv (Conv2D) (None, 14, 14, 256) 590080 conv4_block2_1_relu[0][0] \n__________________________________________________________________________________________________\nconv4_block2_2_bn (BatchNormali (None, 14, 14, 256) 1024 conv4_block2_2_conv[0][0] \n__________________________________________________________________________________________________\nconv4_block2_2_relu (Activation (None, 14, 14, 256) 0 conv4_block2_2_bn[0][0] \n__________________________________________________________________________________________________\nconv4_block2_3_conv (Conv2D) (None, 14, 14, 1024) 263168 conv4_block2_2_relu[0][0] \n__________________________________________________________________________________________________\nconv4_block2_3_bn (BatchNormali (None, 14, 14, 1024) 4096 conv4_block2_3_conv[0][0] \n__________________________________________________________________________________________________\nconv4_block2_add (Add) (None, 14, 14, 1024) 0 conv4_block1_out[0][0] \n conv4_block2_3_bn[0][0] \n__________________________________________________________________________________________________\nconv4_block2_out (Activation) (None, 14, 14, 1024) 0 conv4_block2_add[0][0] \n__________________________________________________________________________________________________\nconv4_block3_1_conv (Conv2D) (None, 14, 14, 256) 262400 conv4_block2_out[0][0] \n__________________________________________________________________________________________________\nconv4_block3_1_bn (BatchNormali (None, 14, 14, 256) 1024 conv4_block3_1_conv[0][0] \n__________________________________________________________________________________________________\nconv4_block3_1_relu (Activation (None, 14, 14, 256) 0 conv4_block3_1_bn[0][0] \n__________________________________________________________________________________________________\nconv4_block3_2_conv (Conv2D) (None, 14, 14, 256) 590080 conv4_block3_1_relu[0][0] \n__________________________________________________________________________________________________\nconv4_block3_2_bn (BatchNormali (None, 14, 14, 256) 1024 conv4_block3_2_conv[0][0] \n__________________________________________________________________________________________________\nconv4_block3_2_relu (Activation (None, 14, 14, 256) 0 conv4_block3_2_bn[0][0] \n__________________________________________________________________________________________________\nconv4_block3_3_conv (Conv2D) (None, 14, 14, 1024) 263168 conv4_block3_2_relu[0][0] \n__________________________________________________________________________________________________\nconv4_block3_3_bn (BatchNormali (None, 14, 14, 1024) 4096 conv4_block3_3_conv[0][0] \n__________________________________________________________________________________________________\nconv4_block3_add (Add) (None, 14, 14, 1024) 0 conv4_block2_out[0][0] \n conv4_block3_3_bn[0][0] \n__________________________________________________________________________________________________\nconv4_block3_out (Activation) (None, 14, 14, 1024) 0 conv4_block3_add[0][0] \n__________________________________________________________________________________________________\nconv4_block4_1_conv (Conv2D) (None, 14, 14, 256) 262400 conv4_block3_out[0][0] \n__________________________________________________________________________________________________\nconv4_block4_1_bn (BatchNormali (None, 14, 14, 256) 1024 conv4_block4_1_conv[0][0] \n__________________________________________________________________________________________________\nconv4_block4_1_relu (Activation (None, 14, 14, 256) 0 conv4_block4_1_bn[0][0] \n__________________________________________________________________________________________________\nconv4_block4_2_conv (Conv2D) (None, 14, 14, 256) 590080 conv4_block4_1_relu[0][0] \n__________________________________________________________________________________________________\nconv4_block4_2_bn (BatchNormali (None, 14, 14, 256) 1024 conv4_block4_2_conv[0][0] \n__________________________________________________________________________________________________\nconv4_block4_2_relu (Activation (None, 14, 14, 256) 0 conv4_block4_2_bn[0][0] \n__________________________________________________________________________________________________\nconv4_block4_3_conv (Conv2D) (None, 14, 14, 1024) 263168 conv4_block4_2_relu[0][0] \n__________________________________________________________________________________________________\nconv4_block4_3_bn (BatchNormali (None, 14, 14, 1024) 4096 conv4_block4_3_conv[0][0] \n__________________________________________________________________________________________________\nconv4_block4_add (Add) (None, 14, 14, 1024) 0 conv4_block3_out[0][0] \n conv4_block4_3_bn[0][0] \n__________________________________________________________________________________________________\nconv4_block4_out (Activation) (None, 14, 14, 1024) 0 conv4_block4_add[0][0] \n__________________________________________________________________________________________________\nconv4_block5_1_conv (Conv2D) (None, 14, 14, 256) 262400 conv4_block4_out[0][0] \n__________________________________________________________________________________________________\nconv4_block5_1_bn (BatchNormali (None, 14, 14, 256) 1024 conv4_block5_1_conv[0][0] \n__________________________________________________________________________________________________\nconv4_block5_1_relu (Activation (None, 14, 14, 256) 0 conv4_block5_1_bn[0][0] \n__________________________________________________________________________________________________\nconv4_block5_2_conv (Conv2D) (None, 14, 14, 256) 590080 conv4_block5_1_relu[0][0] \n__________________________________________________________________________________________________\nconv4_block5_2_bn (BatchNormali (None, 14, 14, 256) 1024 conv4_block5_2_conv[0][0] \n__________________________________________________________________________________________________\nconv4_block5_2_relu (Activation (None, 14, 14, 256) 0 conv4_block5_2_bn[0][0] \n__________________________________________________________________________________________________\nconv4_block5_3_conv (Conv2D) (None, 14, 14, 1024) 263168 conv4_block5_2_relu[0][0] \n__________________________________________________________________________________________________\nconv4_block5_3_bn (BatchNormali (None, 14, 14, 1024) 4096 conv4_block5_3_conv[0][0] \n__________________________________________________________________________________________________\nconv4_block5_add (Add) (None, 14, 14, 1024) 0 conv4_block4_out[0][0] \n conv4_block5_3_bn[0][0] \n__________________________________________________________________________________________________\nconv4_block5_out (Activation) (None, 14, 14, 1024) 0 conv4_block5_add[0][0] \n__________________________________________________________________________________________________\nconv4_block6_1_conv (Conv2D) (None, 14, 14, 256) 262400 conv4_block5_out[0][0] \n__________________________________________________________________________________________________\nconv4_block6_1_bn (BatchNormali (None, 14, 14, 256) 1024 conv4_block6_1_conv[0][0] \n__________________________________________________________________________________________________\nconv4_block6_1_relu (Activation (None, 14, 14, 256) 0 conv4_block6_1_bn[0][0] \n__________________________________________________________________________________________________\nconv4_block6_2_conv (Conv2D) (None, 14, 14, 256) 590080 conv4_block6_1_relu[0][0] \n__________________________________________________________________________________________________\nconv4_block6_2_bn (BatchNormali (None, 14, 14, 256) 1024 conv4_block6_2_conv[0][0] \n__________________________________________________________________________________________________\nconv4_block6_2_relu (Activation (None, 14, 14, 256) 0 conv4_block6_2_bn[0][0] \n__________________________________________________________________________________________________\nconv4_block6_3_conv (Conv2D) (None, 14, 14, 1024) 263168 conv4_block6_2_relu[0][0] \n__________________________________________________________________________________________________\nconv4_block6_3_bn (BatchNormali (None, 14, 14, 1024) 4096 conv4_block6_3_conv[0][0] \n__________________________________________________________________________________________________\nconv4_block6_add (Add) (None, 14, 14, 1024) 0 conv4_block5_out[0][0] \n conv4_block6_3_bn[0][0] \n__________________________________________________________________________________________________\nconv4_block6_out (Activation) (None, 14, 14, 1024) 0 conv4_block6_add[0][0] \n__________________________________________________________________________________________________\nconv5_block1_1_conv (Conv2D) (None, 7, 7, 512) 524800 conv4_block6_out[0][0] \n__________________________________________________________________________________________________\nconv5_block1_1_bn (BatchNormali (None, 7, 7, 512) 2048 conv5_block1_1_conv[0][0] \n__________________________________________________________________________________________________\nconv5_block1_1_relu (Activation (None, 7, 7, 512) 0 conv5_block1_1_bn[0][0] \n__________________________________________________________________________________________________\nconv5_block1_2_conv (Conv2D) (None, 7, 7, 512) 2359808 conv5_block1_1_relu[0][0] \n__________________________________________________________________________________________________\nconv5_block1_2_bn (BatchNormali (None, 7, 7, 512) 2048 conv5_block1_2_conv[0][0] \n__________________________________________________________________________________________________\nconv5_block1_2_relu (Activation (None, 7, 7, 512) 0 conv5_block1_2_bn[0][0] \n__________________________________________________________________________________________________\nconv5_block1_0_conv (Conv2D) (None, 7, 7, 2048) 2099200 conv4_block6_out[0][0] \n__________________________________________________________________________________________________\nconv5_block1_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 conv5_block1_2_relu[0][0] \n__________________________________________________________________________________________________\nconv5_block1_0_bn (BatchNormali (None, 7, 7, 2048) 8192 conv5_block1_0_conv[0][0] \n__________________________________________________________________________________________________\nconv5_block1_3_bn (BatchNormali (None, 7, 7, 2048) 8192 conv5_block1_3_conv[0][0] \n__________________________________________________________________________________________________\nconv5_block1_add (Add) (None, 7, 7, 2048) 0 conv5_block1_0_bn[0][0] \n conv5_block1_3_bn[0][0] \n__________________________________________________________________________________________________\nconv5_block1_out (Activation) (None, 7, 7, 2048) 0 conv5_block1_add[0][0] \n__________________________________________________________________________________________________\nconv5_block2_1_conv (Conv2D) (None, 7, 7, 512) 1049088 conv5_block1_out[0][0] \n__________________________________________________________________________________________________\nconv5_block2_1_bn (BatchNormali (None, 7, 7, 512) 2048 conv5_block2_1_conv[0][0] \n__________________________________________________________________________________________________\nconv5_block2_1_relu (Activation (None, 7, 7, 512) 0 conv5_block2_1_bn[0][0] \n__________________________________________________________________________________________________\nconv5_block2_2_conv (Conv2D) (None, 7, 7, 512) 2359808 conv5_block2_1_relu[0][0] \n__________________________________________________________________________________________________\nconv5_block2_2_bn (BatchNormali (None, 7, 7, 512) 2048 conv5_block2_2_conv[0][0] \n__________________________________________________________________________________________________\nconv5_block2_2_relu (Activation (None, 7, 7, 512) 0 conv5_block2_2_bn[0][0] \n__________________________________________________________________________________________________\nconv5_block2_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 conv5_block2_2_relu[0][0] \n__________________________________________________________________________________________________\nconv5_block2_3_bn (BatchNormali (None, 7, 7, 2048) 8192 conv5_block2_3_conv[0][0] \n__________________________________________________________________________________________________\nconv5_block2_add (Add) (None, 7, 7, 2048) 0 conv5_block1_out[0][0] \n conv5_block2_3_bn[0][0] \n__________________________________________________________________________________________________\nconv5_block2_out (Activation) (None, 7, 7, 2048) 0 conv5_block2_add[0][0] \n__________________________________________________________________________________________________\nconv5_block3_1_conv (Conv2D) (None, 7, 7, 512) 1049088 conv5_block2_out[0][0] \n__________________________________________________________________________________________________\nconv5_block3_1_bn (BatchNormali (None, 7, 7, 512) 2048 conv5_block3_1_conv[0][0] \n__________________________________________________________________________________________________\nconv5_block3_1_relu (Activation (None, 7, 7, 512) 0 conv5_block3_1_bn[0][0] \n__________________________________________________________________________________________________\nconv5_block3_2_conv (Conv2D) (None, 7, 7, 512) 2359808 conv5_block3_1_relu[0][0] \n__________________________________________________________________________________________________\nconv5_block3_2_bn (BatchNormali (None, 7, 7, 512) 2048 conv5_block3_2_conv[0][0] \n__________________________________________________________________________________________________\nconv5_block3_2_relu (Activation (None, 7, 7, 512) 0 conv5_block3_2_bn[0][0] \n__________________________________________________________________________________________________\nconv5_block3_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 conv5_block3_2_relu[0][0] \n__________________________________________________________________________________________________\nconv5_block3_3_bn (BatchNormali (None, 7, 7, 2048) 8192 conv5_block3_3_conv[0][0] \n__________________________________________________________________________________________________\nconv5_block3_add (Add) (None, 7, 7, 2048) 0 conv5_block2_out[0][0] \n conv5_block3_3_bn[0][0] \n__________________________________________________________________________________________________\nconv5_block3_out (Activation) (None, 7, 7, 2048) 0 conv5_block3_add[0][0] \n__________________________________________________________________________________________________\navg_pool (GlobalAveragePooling2 (None, 2048) 0 conv5_block3_out[0][0] \n__________________________________________________________________________________________________\npredictions (Dense) (None, 1000) 2049000 avg_pool[0][0] \n==================================================================================================\nTotal params: 25,636,712\nTrainable params: 25,583,592\nNon-trainable params: 53,120\n__________________________________________________________________________________________________\n" ], [ "# 라플라스 변환을 하면 s-domain이라는 평면으로 이동을 하게 되고\n# 라플라스 역변환을 하면 time-domaim(시간축)으로 돌아오게 된다.\nfrom tensorflow.keras.applications.vgg16 import decode_predictions", "_____no_output_____" ], [ "# 여기에 flask 모델 추가하는곳으로 예상\n# vue에서 flask, axios로 python으로 이미지를 받아온다.", "_____no_output_____" ], [ "# import socket\n\n# (host, port) = ('localhost', 37373)\n\n# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# s.bind((host, port))\n# s.listen(1)\n\n# conn, addr = s.accept()\n\n# with open('output', 'wb') as f:\n# \twhile True:\n# \t\tl = conn.recv(1024)\n# \t\tif not 1:\n# \t\t\tbreak\n# \t\tf.write(l)\n\n# s.close()\n", "_____no_output_____" ], [ "# from flask import send_file\n# from flask import Flask\n\n# app = Flask(__name__)\n\n# # RFC - https://tools.ietf.org/html/rfc3003\n# @app.route('/jpg_download')\n# def mp3_download():\n# file_name = \"./TwoStepDR_320kbps.jpeg\"\n# return send_file(file_name,\n# mimetype='image/jpeg',\n# attachment_filename='img_test.jpeg',# 다운받아지는 파일 이름. \n# as_attachment=True)\n\n# @app.route(\"/file_download\")\n# def hello():\n# return '''\n# <a href=\"/jpg_download\">Click me.</a>\n \n# <form method=\"get\" action=\"jpg_download\">\n# <button type=\"submit\">Download!</button>\n# </form>\n# '''\n\n# if __name__ == '__main__':\n# \tapp.run(debug=True)\n", "_____no_output_____" ], [ "# 일단 테스트 용도로 다양한 모델을 검증해보자.\n# 말그대로의 test 모델들을 넣어보자.\n# 현재 이미지 10까지\ntest_A_img = image.load_img(\"static/test_image_5.jpg\", target_size = (224, 224))\ntest_A_img", "_____no_output_____" ], [ "trans_img = image.img_to_array(test_A_img)\ntrans_img = np.expand_dims(trans_img, axis = 0)\ntrans_img = preprocess_input(trans_img)\nprint(trans_img.shape)", "(1, 224, 224, 3)\n" ], [ "pred = classifier.predict(trans_img)\n\n# pred = resModel.predict(trans_img)", "_____no_output_____" ], [ "label = decode_predictions(pred)\n\nprint('top 5!')\n\nfor i in range(5):\n print('%16s (%.2f%%)' % (label[0][i][1], label[0][i][2] * 100))", "top 5!\n coyote (83.16%)\n timber_wolf (8.44%)\n red_wolf (5.82%)\n grey_fox (1.45%)\n white_wolf (0.52%)\n" ], [ "# 분석 결과가 나왔으면 나온 결과를 vue로 연결해서 보여준다.", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7198e8176541cc40b2626fc2873bb70926aa940
134,422
ipynb
Jupyter Notebook
_doc/notebooks/pig_hive/pig_streaming_azure_correction.ipynb
sdpython/sparkouille
6377a6c0f25ee89996b2d64b16b8ae616be38348
[ "MIT" ]
null
null
null
_doc/notebooks/pig_hive/pig_streaming_azure_correction.ipynb
sdpython/sparkouille
6377a6c0f25ee89996b2d64b16b8ae616be38348
[ "MIT" ]
5
2018-04-09T19:47:34.000Z
2021-01-01T02:40:15.000Z
_doc/notebooks/pig_hive/pig_streaming_azure_correction.ipynb
sdpython/sparkouille
6377a6c0f25ee89996b2d64b16b8ae616be38348
[ "MIT" ]
null
null
null
64.316746
23,014
0.629108
[ [ [ "# PIG et JSON et streaming avec les données vélib - correction avec Azure\n\nCorrection.", "_____no_output_____" ] ], [ [ "from jyquickhelper import add_notebook_menu\nadd_notebook_menu()", "_____no_output_____" ] ], [ [ "## Récupération des données\n\nLe code suivant télécharge les données nécessaires [data_velib_paris_2014-11-11_22-23.zip](http://www.xavierdupre.fr/enseignement/complements/data_velib_paris_2014-11-11_22-23.zip).", "_____no_output_____" ] ], [ [ "import os, datetime\nfrom pyensae.datasource import download_data\nif not os.path.exists(\"velib\"):\n os.mkdir(\"velib\")\nfiles=download_data(\"data_velib_paris_2014-11-11_22-23.zip\", website=\"xdtd\", whereTo=\"velib\")\nfiles[:2]", "_____no_output_____" ] ], [ [ "## Connexion au cluster et import des données", "_____no_output_____" ] ], [ [ "import os\nblobhp = {}\nif \"HDCREDENTIALS\" in os.environ:\n blobhp[\"blob_storage\"], blobhp[\"password1\"], blobhp[\"hadoop_server\"], blobhp[\"password2\"], blobhp[\"username\"] = \\\n os.environ[\"HDCREDENTIALS\"].split(\"**\")\n r = type(blobhp)\nelse:\n from pyquickhelper.ipythonhelper import open_html_form\n params={\"blob_storage\":\"\", \"password1\":\"\", \"hadoop_server\":\"\", \"password2\":\"\", \"username\":\"axavier\"}\n r = open_html_form(params=params,title=\"server + hadoop + credentials\", key_save=\"blobhp\")\nr", "_____no_output_____" ], [ "import pyensae\n%load_ext pyensae\n%load_ext pyenbc\nblobstorage = blobhp[\"blob_storage\"]\nblobpassword = blobhp[\"password1\"]\nhadoop_server = blobhp[\"hadoop_server\"]\nhadoop_password = blobhp[\"password2\"]\nusername = blobhp[\"username\"] + \"az\"\nclient, bs = %hd_open\nclient, bs", "_____no_output_____" ] ], [ [ "On uploade les données (sauf si vous l'avez déjà fait une fois) :", "_____no_output_____" ] ], [ [ "files = [ os.path.join(\"velib\",_) for _ in os.listdir(\"velib\") if \".txt\" in _]", "_____no_output_____" ], [ "if not client.exists(bs, \"hdblobstorage\", \"velib_1h1/paris.2014-11-11_22-00-18.331391.txt\"):\n client.upload(bs, \"hdblobstorage\", \"velib_1h1\", files)", "_____no_output_____" ], [ "df=%blob_ls hdblobstorage/velib_1h1\ndf.head()", "_____no_output_____" ] ], [ [ "## Exercice 1 : convertir les valeurs numériques\n\nLe programme suivant prend comme argument les colonnes à extraire des fichiers textes qui sont enregistrés au format \"python\". Le streaming sur Azure est sensiblement différent du streaming présenté avec Cloudera. Cette version fonctionne également avec Cloudera. La réciproque n'est pas vraie. Les scripts python sont interprétés avec la machine virtuelle java tout comme pig. La solution suivante s'inspire de [Utilisation de Python avec Hive et Pig dans HDInsight](http://azure.microsoft.com/fr-fr/documentation/articles/hdinsight-python/). Voir également [Writing Jython UDFs](https://help.mortardata.com/technologies/pig/writing_jython_udfs).\n\nCette écriture impose de comprendre la façon dont PIG décrit les données et l'utilisation de [schema](http://pig.apache.org/docs/r0.9.1/basic.html#Schemas). Le nom du script doit être **jython.py** pour ce notebook sinon le compilateur PIG ne sait pas dans quel langage interpréter ce script.\n\nLa version de jython utilisée sur le cluster est ``2.5.3 (2.5:c56500f08d34+, Aug 13 2012, 14:54:35) [OpenJDK 64-Bit Server VM (Azul Systems, Inc.)]``.\n\n**La correction inclut encore un bug mais cela devrait être bientôt corrigé. Cela est dû aux différences Python/Jython.**", "_____no_output_____" ] ], [ [ "import pyensae", "_____no_output_____" ], [ "%%PYTHON jython.py\n\nimport datetime, sys, re\n\n@outputSchema(\"brow: {(available_bike_stands:int, available_bikes:int, lat:double, lng:double, name:chararray, status:chararray)}\")\ndef extract_columns_from_js(row):\n # pour un programmeur python, les schéma sont contre plutôt intuitifs, \n # { } veut dire une liste, \n # ( ) un tuple dont chaque colonne est nommé\n # j'écrirai peut-être une fonction détermine le schéma en fonction des données\n \n # il faut utiliser des expressions régulières pour découper la ligne\n # car cette expression ne fonctionne pas sur des lignes trop longues\n # eval ( row ) --> revient à évaluer une ligne de 500 Ko\n # Hadoop s'arrête sans réellement proposer de message d'erreurs qui mettent sur la bonne voie\n # dans ces cas-là, il faut relancer le job après avoir commenter des lignes\n # jusqu'à trouver celle qui provoque l'arrêt brutal du programme\n # arrêt qui ne se vérifie pas en local\n \n cols = [\"available_bike_stands\",\"available_bikes\",\"lat\",\"lng\",\"name\",\"status\"]\n exp = re.compile (\"(\\\\{.*?\\\\})\")\n rec = exp.findall(row)\n res = []\n for r in rec :\n station = eval(r)\n vals = [ str(station[c]) for c in cols ]\n res.append(tuple(vals))\n return res", "_____no_output_____" ] ], [ [ "La vérification qui suit ne fonctionne que si la fonction à tester prend comme entrée une chaîne de caractères mais rien n'empêche de de créer une telle fonction de façon temporaire juste pour vérifier que le script fonctionne (avec Jython 2.5.3) :", "_____no_output_____" ] ], [ [ "%%jython jython.py extract_columns_from_js\n[{'address': 'RUE DES CHAMPEAUX (PRES DE LA GARE ROUTIERE) - 93170 BAGNOLET', 'collect_date': datetime.datetime(2014, 11, 11, 22, 2, 18, 47270), 'lng': 2.416170724425901, 'contract_name': 'Paris', 'name': '31705 - CHAMPEAUX (BAGNOLET)', 'banking': 0, 'lat': 48.8645278209514, 'bonus': 0, 'status': 'OPEN', 'available_bikes': 1, 'last_update': datetime.datetime(2014, 11, 11, 21, 55, 22), 'number': 31705, 'available_bike_stands': 49, 'bike_stands': 50}]\n[{'address': 'RUE DES CHAMPEAUX (PRES DE LA GARE ROUTIERE) - 93170 BAGNOLET', 'collect_date': datetime.datetime(2014, 11, 11, 22, 2, 18, 47270), 'lng': 2.416170724425901, 'contract_name': 'Paris', 'name': '31705 - CHAMPEAUX (BAGNOLET)', 'banking': 0, 'lat': 48.8645278209514, 'bonus': 0, 'status': 'OPEN', 'available_bikes': 1, 'last_update': datetime.datetime(2014, 11, 11, 21, 55, 22), 'number': 31705, 'available_bike_stands': 49, 'bike_stands': 50}]", "_____no_output_____" ] ], [ [ "On supprime la précédente exécution si besoin puis on vérifie que le répertoire contenant les résultats est vide :", "_____no_output_____" ] ], [ [ "if client.exists(bs, client.account_name, \"$PSEUDO/velibpy_results/firstjob\"):\n r = client.delete_folder (bs, client.account_name, \"$PSEUDO/velibpy_results/firstjob\")\n print(r)", "['axavieraz/velibpy_results/firstjob', 'axavieraz/velibpy_results/firstjob/_SUCCESS', 'axavieraz/velibpy_results/firstjob/part-m-00000']\n" ] ], [ [ "On écrit le script PIG qui utilise plus de colonnes :", "_____no_output_____" ] ], [ [ "%%PIG json_velib_python.pig\n\nREGISTER '$CONTAINER/$SCRIPTPIG/jython.py' using jython as myfuncs;\n\njspy = LOAD '$CONTAINER/velib_1h1/paris.*.txt' \n USING PigStorage('\\t') AS (arow:chararray);\n\nDESCRIBE jspy ;\n\nmatrice = FOREACH jspy GENERATE myfuncs.extract_columns_from_js(arow);\nDESCRIBE matrice ;\n\nmultiply = FOREACH matrice GENERATE FLATTEN(brow) ;\nDESCRIBE multiply ;\n\nSTORE multiply INTO '$CONTAINER/$PSEUDO/velibpy_results/firstjob' USING PigStorage('\\t') ;", "_____no_output_____" ] ], [ [ "On soumet le job :", "_____no_output_____" ] ], [ [ "jid = %hd_pig_submit json_velib_python.pig -d jython.py -o --stop-on-failure\njid", "_____no_output_____" ] ], [ [ "On regarde son statut : ", "_____no_output_____" ] ], [ [ "st = %hd_job_status jid[\"id\"]\n(st[\"id\"],st[\"percentComplete\"],st[\"completed\"],\nst[\"status\"][\"jobComplete\"],st[\"status\"][\"state\"])", "_____no_output_____" ] ], [ [ "On récupère l'erreur :", "_____no_output_____" ] ], [ [ "%hd_tail_stderr jid[\"id\"] -n 100", "_____no_output_____" ], [ "%blob_ls /$PSEUDO/velibpy_results", "_____no_output_____" ] ], [ [ "On récupère les informations qu'on affiche sous forme de dataframe :", "_____no_output_____" ] ], [ [ "if os.path.exists(\"velib_exo1.txt\") : os.remove(\"velib_exo1.txt\")\n%blob_downmerge /$PSEUDO/velibpy_results/firstjob velib_exo1.txt", "_____no_output_____" ], [ "%head velib_exo1.txt", "_____no_output_____" ], [ "import pandas\ndf = pandas.read_csv(\"velib_hd.txt\", sep=\"\\t\",\n names=[\"available_bike_stands\",\"available_bikes\",\"lat\",\"lng\",\"name\",\"status\"])\ndf.head()", "_____no_output_____" ] ], [ [ "## Exercice 2 : stations fermées\n \nLes stations fermées ne le sont pas tout le temps. On veut calculer le ratio minutes/stations fermées / total des minutes/stations. Le script python permettant de lire les données ne change pas, il suffit juste de déclarer les sorties numériques comme telles. Quelques détails sur les tables :\n\n* ``jspy`` : contient les données brutes dans une liste de fichiers\n* ``matrice`` : d'après le job qui précède, la table contient une ligne par stations et par minute, chaque ligne décrit le status de la station\n* ``grstation`` : table ``matrice`` groupée par status\n* ``fermees`` : pour chaque groupe, on aggrégé le nombre de minutes multipliés par le nombre de vélos\n* ``gr*,dist*`` : distribution du nombre de stations (Y) en fonction du nombre de vélos ou places disponibles", "_____no_output_____" ], [ "En cas d'exécution précédentes :", "_____no_output_____" ] ], [ [ "for sub in [ \"multiply.txt\"]:\n if client.exists(bs, client.account_name, \"$PSEUDO/velibpy_results/\" + sub):\n r = client.delete_folder (bs, client.account_name, \"$PSEUDO/velibpy_results/\" + sub)\n print(r)", "['xavierdupre/velibpy_results/fermees.txt']\n['xavierdupre/velibpy_results/distribution_bikes.txt']\n['xavierdupre/velibpy_results/distribution_stands.txt']\n" ] ], [ [ "On va exécuter le job en deux fois. Le premier job met tout à plat. Le second calcule les aggrégations. La plupart du temps, le travaille de recherche concerne la seconde partie. Mais si le job n'est pas scindé, la première partie est toujours exécutée à chaque itération. Dans ce cas-ci, on scinde le job en deux. La première partie forme une table à partir des données initiales. La seconde les agrègre.", "_____no_output_____" ] ], [ [ "%%PIG json_velib_python2.pig\n\nREGISTER '$CONTAINER/$SCRIPTPIG/jython.py' using jython as myfuncs;\n\njspy = LOAD '$CONTAINER/velib_1h1/*.txt' USING PigStorage('\\t') AS (arow:chararray);\n\nDESCRIBE jspy ;\n\nmatrice = FOREACH jspy GENERATE myfuncs.extract_columns_from_js(arow);\nDESCRIBE matrice ;\n\nmultiply = FOREACH matrice GENERATE FLATTEN(brow) ;\nDESCRIBE multiply ;\n\nSTORE multiply INTO '$CONTAINER/$PSEUDO/velibpy_results/multiply.txt' USING PigStorage('\\t') ;", "_____no_output_____" ], [ "jid = %hd_pig_submit json_velib_python2.pig -d jython.py --stop-on-failure\njid", "_____no_output_____" ], [ "st = %hd_job_status jid[\"id\"]\nst[\"id\"],st[\"percentComplete\"],st[\"status\"][\"jobComplete\"]", "_____no_output_____" ], [ "%blob_ls /$PSEUDO/velibpy_results/multiply.txt", "_____no_output_____" ], [ "for sub in [\"fermees.txt\", \"distribution_bikes.txt\", \"distribution_stands.txt\"]:\n if client.exists(bs, client.account_name, \"$PSEUDO/velibpy_results/\" + sub):\n r = client.delete_folder (bs, client.account_name, \"$PSEUDO/velibpy_results/\" + sub)\n print(r)", "['xavierdupre/velibpy_results/fermees.txt', 'xavierdupre/velibpy_results/fermees.txt/_temporary', 'xavierdupre/velibpy_results/fermees.txt/_temporary/1']\n['xavierdupre/velibpy_results/distribution_bikes.txt', 'xavierdupre/velibpy_results/distribution_bikes.txt/_temporary', 'xavierdupre/velibpy_results/distribution_bikes.txt/_temporary/1']\n['xavierdupre/velibpy_results/distribution_stands.txt', 'xavierdupre/velibpy_results/distribution_stands.txt/_temporary', 'xavierdupre/velibpy_results/distribution_stands.txt/_temporary/1']\n" ], [ "%%PIG json_velib_python3.pig\n\nmultiply = LOAD '$CONTAINER/$PSEUDO/velibpy_results/multiply.txt' USING PigStorage('\\t') AS \n (available_bike_stands:int, available_bikes:int, lat:double, lng:double, name:chararray, status:chararray) ;\nDESCRIBE multiply ; \n\ngrstation = GROUP multiply BY status ; \nDESCRIBE grstation ;\n\nfermees = FOREACH grstation GENERATE\n group\n ,SUM(multiply.available_bikes) AS available_bikes\n ,SUM(multiply.available_bike_stands) AS available_bike_stands \n ;\nDESCRIBE fermees ; \n\ngr_av = GROUP multiply BY available_bikes ;\nDESCRIBE gr_av;\n\ndist_av = FOREACH gr_av GENERATE group, COUNT(multiply) ;\nDESCRIBE dist_av;\n\ngr_pl = GROUP multiply BY available_bike_stands ;\nDESCRIBE gr_pl;\n\ndist_pl = FOREACH gr_pl GENERATE group, COUNT(multiply) ;\nDESCRIBE dist_pl;\n\nSTORE fermees INTO '$CONTAINER/$PSEUDO/velibpy_results/fermees.txt' USING PigStorage('\\t') ;\nSTORE dist_av INTO '$CONTAINER/$PSEUDO/velibpy_results/distribution_bikes.txt' USING PigStorage('\\t') ;\nSTORE dist_pl INTO '$CONTAINER/$PSEUDO/velibpy_results/distribution_stands.txt' USING PigStorage('\\t') ;", "_____no_output_____" ], [ "jid = %hd_pig_submit json_velib_python3.pig --stop-on-failure\njid", "_____no_output_____" ], [ "st = %hd_job_status jid[\"id\"]\nst[\"id\"],st[\"percentComplete\"],st[\"status\"][\"jobComplete\"]", "_____no_output_____" ], [ "%tail_stderr jid[\"id\"] 10", "_____no_output_____" ], [ "%blob_ls /$PSEUDO/velibpy_results", "_____no_output_____" ], [ "if os.path.exists(\"distribution_bikes.txt\") : os.remove(\"distribution_bikes.txt\")\n%blob_downmerge /$PSEUDO/velibpy_results/distribution_bikes.txt distribution_bikes.txt", "_____no_output_____" ], [ "import pandas\ndf = pandas.read_csv(\"distribution_bikes.txt\", sep=\"\\t\", names=[\"nb_velos\", \"nb_stations_minutes\"])\ndf.head()", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nplt.style.use('ggplot')\ndf.plot(x=\"nb_velos\",y=\"nb_stations_minutes\",kind=\"bar\",figsize=(16,4))", "_____no_output_____" ], [ "if os.path.exists(\"distribution_stands.txt\") : os.remove(\"distribution_stands.txt\")\n%blob_downmerge /$PSEUDO/velibpy_results/distribution_stands.txt distribution_stands.txt", "_____no_output_____" ], [ "df = pandas.read_csv(\"distribution_stands.txt\", sep=\"\\t\", names=[\"nb_places\", \"nb_stations_minutes\"])\ndf.plot(x=\"nb_places\",y=\"nb_stations_minutes\",kind=\"bar\",figsize=(16,4))", "_____no_output_____" ], [ "if os.path.exists(\"fermees.txt\") : os.remove(\"fermees.txt\")\n%blob_downmerge /$PSEUDO/velibpy_results/fermees.txt fermees.txt", "_____no_output_____" ], [ "df = pandas.read_csv(\"fermees.txt\", sep=\"\\t\", names=[\"status\", \"nb_velos_stations_minutes\", \"nb_places_stations_minutes\"])\ndf=df.set_index(\"status\")\ndf = df.T\ndf[\"%close\"] = df.CLOSED / (df.CLOSED + df.OPEN)\ndf", "_____no_output_____" ] ], [ [ "Ce dernier tableau n'est vrai que dans la mesure où les nombres reportées sont fiables lorsque les stations sont fermées.", "_____no_output_____" ], [ "## Exercice 3 : stations fermées, journée complète\n \nAppliquer cela à une journée complète revient à lancer le même job sur des données plus grandes. On verra bientôt commencer éviter de recopier le job une seconde fois (introduisant une source potentielle d'erreur).", "_____no_output_____" ], [ "## Exercice 4 : astuces\n\nLes erreurs de PIG ne sont pas très explicite surtout si elles se produisent dans le script python. Un moyen simple de débugger est d'attraper les exceptions produites par python et de les récupérer sous PIG (le reste du job est enlevé). On peut tout-à-fait imaginer ajouter la version de python installée sur le cluster ainsi que la liste de modules", "_____no_output_____" ] ], [ [ "%%PYTHON jython.py\n\nimport sys\n\n@outputSchema(\"brow:chararray\")\ndef information(row):\n return (\";\".join([str(sys.version), str(sys.executable)])).replace(\"\\n\",\" \")", "_____no_output_____" ] ], [ [ "On vérifie que le script fonctionne avec jython :", "_____no_output_____" ] ], [ [ "%%jython jython.py information\nn'importe quoi", "_____no_output_____" ], [ "%%PIG info.pig\n\nREGISTER '$CONTAINER/$SCRIPTPIG/jython.py' using jython as myfuncs;\n\njspy = LOAD '$CONTAINER/velib_1h1/*.txt' USING PigStorage('\\t') AS (arow:chararray);\none = LIMIT jspy 1 ;\ninfos = FOREACH one GENERATE myfuncs.information(arow);\nSTORE infos INTO '$CONTAINER/$PSEUDO/results/infos' USING PigStorage('\\t') ;", "_____no_output_____" ], [ "if client.exists(bs, client.account_name, \"$PSEUDO/results/infos\"):\n r = client.delete_folder (bs, client.account_name, \"$PSEUDO/results/infos\")\n print(r)", "_____no_output_____" ], [ "jid = %hd_pig_submit info.pig jython.py --stop-on-failure\njid", "_____no_output_____" ], [ "st = %hd_job_status jid[\"id\"]\nst[\"id\"],st[\"percentComplete\"],st[\"status\"][\"jobComplete\"]", "_____no_output_____" ], [ "%tail_stderr jid[\"id\"] 10", "_____no_output_____" ], [ "if os.path.exists(\"infos.txt\") : os.remove(\"infos.txt\")\n%blob_downmerge /$PSEUDO/results/infos infos.txt", "_____no_output_____" ], [ "%head infos.txt", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e71998ec0a80c439b92fed16f42419c2d4ca7f6a
68,444
ipynb
Jupyter Notebook
tutorials/detecting-changes-in-sentinel-1-imagery-pt-2/index.ipynb
no2223/ai-how.github.io
b2698b5b87b75315b5634ea8b81eb1bf164a3e43
[ "MIT" ]
null
null
null
tutorials/detecting-changes-in-sentinel-1-imagery-pt-2/index.ipynb
no2223/ai-how.github.io
b2698b5b87b75315b5634ea8b81eb1bf164a3e43
[ "MIT" ]
null
null
null
tutorials/detecting-changes-in-sentinel-1-imagery-pt-2/index.ipynb
no2223/ai-how.github.io
b2698b5b87b75315b5634ea8b81eb1bf164a3e43
[ "MIT" ]
null
null
null
51.733938
1,345
0.566522
[ [ [ "<a href=\"https://colab.research.google.com/github/no2223/ai-how.github.io/blob/master/tutorials/detecting-changes-in-sentinel-1-imagery-pt-2/index.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "#@title Copyright 2020 The Earth Engine Community Authors { display-mode: \"form\" }\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Detecting Changes in Sentinel-1 Imagery (Part 2)\nAuthor: mortcanty\n\n\n\n", "_____no_output_____" ], [ "### Run me first\n\nRun the following cell to initialize the API. The output will contain instructions on how to grant this notebook access to Earth Engine using your account.", "_____no_output_____" ] ], [ [ "import ee\n\n# Trigger the authentication flow.\nee.Authenticate()\n\n# Initialize the library.\nee.Initialize()", "To authorize access needed by Earth Engine, open the following URL in a web browser and follow the instructions. If the web browser does not start automatically, please manually browse the URL below.\n\n https://accounts.google.com/o/oauth2/auth?client_id=517222506229-vsmmajv00ul0bs7p89v5m89qs8eb9359.apps.googleusercontent.com&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fearthengine+https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdevstorage.full_control&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&response_type=code&code_challenge=nUoRvI3M_bSXH0C8oyr6Ez2qfjH90quWK1COf6wIgiU&code_challenge_method=S256\n\nThe authorization workflow will generate a code, which you should paste in the box below. \n" ] ], [ [ "### Datasets and Python modules\nOne [dataset](https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_S1_GRD) will be used in the tutorial:\n\n- COPERNICUS/S1_GRD_FLOAT\n - Sentinel-1 ground range detected images\n\nThe following cell imports some python modules which we will be using as we go along and enables inline graphics.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.stats import norm, gamma, f, chi2\nimport IPython.display as disp\n%matplotlib inline", "_____no_output_____" ] ], [ [ "And to make use of interactive graphics, we import the _folium_ package:", "_____no_output_____" ] ], [ [ "# Import the Folium library.\nimport folium\n\n# Define a method for displaying Earth Engine image tiles to folium map.\ndef add_ee_layer(self, ee_image_object, vis_params, name):\n map_id_dict = ee.Image(ee_image_object).getMapId(vis_params)\n folium.raster_layers.TileLayer(\n tiles = map_id_dict['tile_fetcher'].url_format,\n attr = 'Map Data &copy; <a href=\"https://earthengine.google.com/\">Google Earth Engine</a>',\n name = name,\n overlay = True,\n control = True\n ).add_to(self)\n\n# Add EE drawing method to folium.\nfolium.Map.add_ee_layer = add_ee_layer", "_____no_output_____" ] ], [ [ "## Part 2. Hypothesis testing", "_____no_output_____" ], [ "We continue from [Part 1](https://developers.google.com/earth-engine/tutorials/community/detecting-changes-in-sentinel-1-imagery-pt-1) of the Tutorial with the area of interest _aoi_ covering the Frankfurt International Airport and a subset _aoi\\_sub_ consisting of uniform pixels within a forested region.", "_____no_output_____" ] ], [ [ "geoJSON = {\n \"type\": \"FeatureCollection\",\n \"features\": [\n {\n \"type\": \"Feature\",\n \"properties\": {},\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [\n 8.473892211914062,\n 49.98081240937428\n ],\n [\n 8.658599853515625,\n 49.98081240937428\n ],\n [\n 8.658599853515625,\n 50.06066538593667\n ],\n [\n 8.473892211914062,\n 50.06066538593667\n ],\n [\n 8.473892211914062,\n 49.98081240937428\n ]\n ]\n ]\n }\n }\n ]\n}\ncoords = geoJSON['features'][0]['geometry']['coordinates']\naoi = ee.Geometry.Polygon(coords)\ngeoJSON = {\n \"type\": \"FeatureCollection\",\n \"features\": [\n {\n \"type\": \"Feature\",\n \"properties\": {},\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [\n 8.534317016601562,\n 50.021637833966786\n ],\n [\n 8.530540466308594,\n 49.99780882512238\n ],\n [\n 8.564186096191406,\n 50.00663576154257\n ],\n [\n 8.578605651855469,\n 50.019431940583104\n ],\n [\n 8.534317016601562,\n 50.021637833966786\n ]\n ]\n ]\n }\n }\n ]\n}\ncoords = geoJSON['features'][0]['geometry']['coordinates']\naoi_sub = ee.Geometry.Polygon(coords)", "_____no_output_____" ] ], [ [ "This time we filter the S1 archive to get an image collection consisting of two images acquired in the month of August, 2020. Because we are interested in change detection, it is essential that the local incidence angles be the same in both images. So now we specify both the orbit pass (ASCENDING) as well the relative orbit number (15):", "_____no_output_____" ] ], [ [ "im_coll = (ee.ImageCollection('COPERNICUS/S1_GRD_FLOAT')\n .filterBounds(aoi)\n .filterDate(ee.Date('2020-08-01'),ee.Date('2020-08-31'))\n .filter(ee.Filter.eq('orbitProperties_pass', 'ASCENDING'))\n .filter(ee.Filter.eq('relativeOrbitNumber_start', 15))\n .sort('system:time_start'))", "_____no_output_____" ] ], [ [ "Here are the acquisition times in the collection, formatted with Python's _time_ module:", "_____no_output_____" ] ], [ [ "import time\nacq_times = im_coll.aggregate_array('system:time_start').getInfo()\n[time.strftime('%x', time.gmtime(acq_time/1000)) for acq_time in acq_times]", "_____no_output_____" ] ], [ [ "### A ratio image", "_____no_output_____" ], [ "Let's select the first two images and extract the VV bands, clipping them to _aoi\\_sub_,", "_____no_output_____" ] ], [ [ "im_list = im_coll.toList(im_coll.size())\nim1 = ee.Image(im_list.get(0)).select('VV').clip(aoi_sub)\nim2 = ee.Image(im_list.get(1)).select('VV').clip(aoi_sub)", "_____no_output_____" ] ], [ [ "Now we'll build the ratio of the VV bands and display it\n", "_____no_output_____" ] ], [ [ "ratio = im1.divide(im2)\nurl = ratio.getThumbURL({'min': 0, 'max': 10})\ndisp.Image(url=url, width=800)", "_____no_output_____" ] ], [ [ "As in the first part of the Tutorial, standard GEE reducers can be used to calculate a histogram, mean and variance of the ratio image:", "_____no_output_____" ] ], [ [ "hist = ratio.reduceRegion(ee.Reducer.fixedHistogram(0, 5, 500), aoi_sub).get('VV').getInfo()\nmean = ratio.reduceRegion(ee.Reducer.mean(), aoi_sub).get('VV').getInfo()\nvariance = ratio.reduceRegion(ee.Reducer.variance(), aoi_sub).get('VV').getInfo()", "_____no_output_____" ] ], [ [ "Here is a plot of the (normalized) histogram using _numpy_ and _matplotlib_:", "_____no_output_____" ] ], [ [ "a = np.array(hist)\nx = a[:, 0]\ny = a[:, 1] / np.sum(a[:, 1])\nplt.grid()\nplt.plot(x, y, '.')\nplt.show()", "_____no_output_____" ] ], [ [ "This looks a bit like the gamma distribution we met in [Part 1](https://developers.google.com/earth-engine/tutorials/community/detecting-changes-in-sentinel-1-imagery-pt-1#pixel_distributions) but is in fact an _F probability distribution_. The _F_ distribution is defined as the ratio of two chi square distributions, see [Eq. (1.12)](https://developers.google.com/earth-engine/tutorials/community/detecting-changes-in-sentinel-1-imagery-pt-1#speckle), with $m_1$ and $m_2$ degrees of freedom. The above histogram is an $F$ distribution with $m_1=2m$ and $m_2=2m$ degrees of freedom and is given by\n \n$$\np_{f;2m,2m}(x) = {\\Gamma(2m)\\over \\Gamma(m)^2} x^{m-1}(1+x)^{-2m},\n$$\n\n$$\n\\quad {\\rm mean}(x) = {m\\over m-1},\\tag{2.1}\n$$\n\n$$\n\\quad {\\rm var}(x) = {m(2m-1)\\over (m-1)^2 (m-2)}\n$$\n \nwith parameter $m = 5$. We can see this empirically by overlaying the distribution onto the histogram with the help of _scipy.stats.f_. The histogram bucket widths are 0.01 so we have to divide by 100:", "_____no_output_____" ] ], [ [ "m = 5\nplt.grid()\nplt.plot(x, y, '.', label='data')\nplt.plot(x, f.pdf(x, 2*m, 2*m) / 100, '-r', label='F-dist')\nplt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "Checking the mean and variance, we get approximate agreement", "_____no_output_____" ] ], [ [ "print(mean, m/(m-1))\nprint(variance, m*(2*m-1)/(m-1)**2/(m-2))", "_____no_output_____" ] ], [ [ "So what is so special about this distribution? When looking for changes between two co-registered Sentinel-1 images acquired at different times, it might seem natural to subtract one from the other and then examine the difference, much as we would do for instance with visual/infrared ground reflectance images. In the case of SAR intensity images this is not a good idea. In the difference of two uncorrelated multilook images $\\langle s_1\\rangle$ and $\\langle s_2\\rangle$ the variances add together and, from Eq. (1.21) in the first part of the Tutorial,\n \n$$\n{\\rm var}(\\langle s_1\\rangle-\\langle s_2\\rangle) = {a_1^2+a_2^2\\over m}, \\tag{2.4}\n$$\n \nwhere $a_1$ and $a_2$ are mean intensities. So difference pixels in bright areas will have a higher variance than difference pixels in darker areas. It is not possible to set a reliable threshold to determine with a given confidence where change has occurred. \n \nIt turns out that the _F_ distributed ratio of the two images which we looked at above is much more informative. For each pixel position in the two images, the quotient $\\langle s_1\\rangle / \\langle s_2\\rangle$ is a _likelihood ratio test statistic_ for deciding whether or not a change has occurred between the two acquisition dates at that position. We will explain what this means below. Here for now is the ratio of the two Frankfurt Airport images, this time within the complete _aoi_:", "_____no_output_____" ] ], [ [ "im1 = ee.Image(im_list.get(0)).select('VV').clip(aoi)\nim2 = ee.Image(im_list.get(1)).select('VV').clip(aoi)\nratio = im1.divide(im2)\n\nlocation = aoi.centroid().coordinates().getInfo()[::-1]\nmp = folium.Map(location=location, zoom_start=12)\nmp.add_ee_layer(ratio,\n {'min': 0, 'max': 20, 'palette': ['black', 'white']}, 'Ratio')\nmp.add_child(folium.LayerControl())\n\ndisplay(mp)", "_____no_output_____" ] ], [ [ "We might guess that the bright pixels here are significant changes, for instance due to aircraft movements on the tarmac or vehicles moving on the highway. Of course ''significant'' doesn't necessarily imply ''interesting''. We already know Frankfurt has a busy airport and that a German Autobahn is always crowded. The question is, how significant are the changes in the statistical sense? Let's now try to answer that question.", "_____no_output_____" ], [ "### Statistical testing\n", "_____no_output_____" ], [ "A _statistical hypothesis_ is a conjecture about the distributions of one or more measured variables. It might, for instance, be an assertion about the mean of a distribution, or about the equivalence of the variances of two different distributions. We distinguish between _simple_ hypotheses, for which the distributions are completely specified, for example: _the mean of a normal distribution with variance $\\sigma^2$ is $\\mu=0$_, and _composite_ hypotheses, for which this is not the case, e.g., _the mean is $\\mu\\ge 0$_.\n\nIn order to test such assertions on the basis of measured values, it is also necessary to formulate _alternative_ hypotheses. To distinguish these from the original assertions, the latter are traditionally called _null_ hypotheses. Thus we might be interested in testing the simple null hypothesis $\\mu = 0$ against the composite alternative hypothesis $\\mu\\ne 0$. An appropriate combination of measurements for deciding whether or not to reject the null hypothesis in favor of its alternative is referred to as a _test statistic_, often denoted by the symbol $Q$. An appropriate _test procedure_ will partition the possible test statistics into two subsets: an acceptance region for the null hypothesis and a rejection region. The latter is customarily referred to as the _critical region_.\n\nReferring to the null hypothesis as $H_0$, there are two kinds of errors which can arise from any test procedure:\n\n - $H_0$ may be rejected when in fact it is true. This is called an _error of the first kind_ and the probability that it will occur is denoted $\\alpha$.\n - $H_0$ may be accepted when in fact it is false, which is called an _error of the second kind_ with probability of occurrence $\\beta$.\n\nThe probability of obtaining a value of the test statistic within the critical region when $H_0$ is true is thus $\\alpha$. The probability $\\alpha$ is also referred to as the _level of significance_ of the test or the _probability of a false positive_. It is generally the case that the lower the value of $\\alpha$, the higher is the probability $\\beta$ of making a second kind error, so there is always a trade-off. (Judge Roy Bean, from the film of the same name, didn't believe in trade-offs. He hanged all defendants regardless of the evidence. His $\\beta$ was zero, but his $\\alpha$ was rather large.)\n\nAt any rate, traditionally, significance levels of 0.01 or 0.05 are often used.\n\n#### The _P_ value\n\nSuppose we determine the test statistic to have the value $q$. The _P value_ is defined as the probability of getting a test statistic $Q$ that is at least as extreme as the one observed given the null hypothesis. What is meant by \"extreme\" depends on how we choose the test statistic. If this probability is small, then the null hypothesis is unlikely. If it is smaller than the prescribed significance level $\\alpha$, then the null hypothesis is rejected.", "_____no_output_____" ], [ "#### Likelihood Functions\n\nThe $m$-look VV intensity bands of the two Sentinel-1 images that we took from the archive have pixel values\n\n$$\n\\langle s\\rangle=\\langle|S_{vv}|^2\\rangle, \\quad {\\rm with\\ mean}\\ a=|S^a_{vv}|^2,\n$$\n\nand are _gamma_ distributed according to [Eq. (1.1)](https://developers.google.com/earth-engine/tutorials/community/detecting-changes-in-sentinel-1-imagery-pt-1#pixel_distributions), with parameters $\\alpha=m$ and $\\beta = a/m$. To make the notation a bit simpler, let's write $s = \\langle s \\rangle$, so that the multi-look averaging is understood.\n\nUsing subscript $i=1,2$ to refer to the two images, the probability densities are\n\n$$\np(s_i| a_i) = {1 \\over (a_i/m)^m\\Gamma(m)}s_i^{m-1}e^{-s_i m/a_i},\\quad i=1,2. \\tag{2.5}\n$$\n\nWe've left out the number of looks $m$ on the left hand side, since it is the same for both images. \n\nNow let's formulate a null hypothesis, namely that no change has taken place in the signal strength $a = |S^a_{vv}|^2$ between the two acquisitions, i.e.,\n\n$$\nH_0: \\quad a_1=a_2 = a\n$$ \n\nand test it against the alternative hypothesis that a change took place\n\n$$\nH_1: \\quad a_1\\ne a_2.\n$$ \n\nIf the null hypothesis is true, then the so-called _likelihood_ for getting the measured pixel intensities $s_1$ and $s_2$ is defined as the product of the probability densities for that value of $a$,\n\n$$\nL_0(a) = p(s_1|a)p(s_2|a) = {1\\over(a/m)^{2m}\\Gamma(m)^2}(s_1s_2)^{m-1}e^{-(s_1+s_2)m/a}. \\tag{2.6}\n$$\n\nTaking the product of the probability densities like this is justified by the fact that the measurements $s_1$ and $s_2$ are independent.\n\nThe _maximum likelihood_ is obtained by maximizing $L_0(a)$ with respect to $a$,\n\n$$\nL_0(\\hat a) = p(s_1|\\hat a)p(s_2|\\hat a), \\quad \\hat a = \\arg\\max_a L_0(a). \n$$\n\nWe can get $\\hat a$ simply by solving the equation\n\n$$\n{d L_0(a)\\over da} = 0\n$$\n\nfor which we derive the maximum likelihood estimate (an easy exercise)\n\n$$\n\\hat a = {s_1 + s_2 \\over 2}.\n$$\n\nMakes sense: the only information we have is $s_1$ and $s_2$, so, if there was no change, our best estimate of the intensity $a$ is to take the average. Thus, substituting this value into Eq. (2.6), the maximum likelihood under $H_0$ is\n\n$$\nL_0(\\hat a) = {1\\over ((s_1+s_2)/2m)^{2m}\\Gamma(m)^2}(s_1s_2)^{m-1}e^{-2m}. \\tag{2.7}\n$$\n\nSimilarly, under the alternative hypothesis $H_1$, the maximum likelihood is\n\n$$\nL_1(\\hat a_1,\\hat a_2) = p(s_1|\\hat a_1)p(s_2|\\hat a_2)\\quad \\hat a_1, \\hat a_2 = \\arg\\max_{a_1,a_2} L_1(a_1,a_2). \n$$\n\nAgain, setting derivatives equal to zero, we get for $H_1$\n\n$$\n\\hat a_1 = s_1, \\quad \\hat a_2 = s_2,\n$$\n\nand the maximum likelihood\n\n$$\nL_1(\\hat a_1,\\hat a_2) = {m^{2m}\\over \\Gamma(m)^2}s_1s_2 e^{-2m}. \\tag{2.8}\n$$\n\n", "_____no_output_____" ], [ "### The Likelihood Ratio Test\n \nThe theory of statistical testing specifies methods for\ndetermining the most appropriate test procedure, one which minimizes the probability $\\beta$ of an error of the second kind for a fixed level of significance $\\alpha$. Rather than giving a general definition, we state the appropriate test for our case: \n \nWe should reject the null hypothesis if the _ratio_ of the two likelihoods satisfies the inequality\n \n$$\nQ = {L_0(\\hat a)\\over L_1(\\hat a_1,\\hat a_2)} \\le k \\tag{2.9}\n$$\n \nfor some appropriately small value of threshold $k$.\n \nThis definition simply reflects the fact that, if the null hypothesis is true, the maximum likelihood when $a_1=a_2$ should be close to the maximum likelihood without that restriction, given the measurements $s_1$ and $s_2$. Therefore, if the likelihood ratio is small, (less than or equal to some small value $k$), then $H_0$ should be rejected. \n \nWith some (very) simply algebra, Eq. (2.9) evaluates to\n \n$$\nQ = \\left[2^2 \\left( s_1s_2\\over (s_1+s_2)^2\\right)\\right]^m \\le k \\tag{2.10}\n$$\n \nusing (2.7) and (2.8). This is the same as saying\n \n$$\n{s_1s_2\\over (s_1+s_2)^2} \\le k'\\quad {\\rm or}\\quad {(s_1+s_2)^2\\over s_1s_2}\\ge k''\\quad {\\rm or}\\quad {s_1\\over s_2}+{s_2\\over s_1}\\ge k''-2\n$$\n \nwhere $k',k''$ depend on $k$. The last inequality is satisfied if either term is small enough:\n \n$$\n{s_1\\over s_2} < c_1 \\quad {\\rm or}\\quad {s_2\\over s_1} < c_2 \\tag{2.11}\n$$\n \nagain for some appropriate threshold $c_1$ and $c_2$ which depend on $k''$. \n \nSo the ratio image $s_1/s_2$ that we generated above is indeed a _Likelihood Ratio Test (LRT) statistic_, one of two possible. We'll call it $Q_1 = s_1/s_2$ and the other one $Q_2 = s_2/s_1$. The former tests for a significant increase in intensity between times $t_1$ and $t_2$, the latter for a significant decrease.\n \nFine, but where does the _F_ distribution come in?\n \nBoth $s_1$ and $s_2$ are gamma distributed\n \n$$\np(s\\mid a) = {1\\over (a/m)^m\\Gamma(m)}s^{m-1}e^{-sm/a}.\n$$\n \nLet $z = 2sm/a$. Then\n \n$$\np(z\\mid a) = p(s\\mid a)\\left |{ds\\over dz}\\right | = {1\\over (a/m)^m\\Gamma(m)}\\left({za\\over 2m}\\right)^{m-1}\\left({a\\over 2m}\\right) = {1\\over 2^m\\Gamma(m)}z^{m-1}e^{-z/2}.\n$$\n \nComparing this with [Eq. (1.12)](https://developers.google.com/earth-engine/tutorials/community/detecting-changes-in-sentinel-1-imagery-pt-1#speckle) from the first part of the Tutorial, we see that $z$ is chi square distributed with $2m$ degrees of freedom, and therefore so are the variables $2s_1m/a$ and $2s_2m/a$. The quotients $s_1/s_2$ and $s_2/s_1$ are thus ratios of two chi square distributed variables with $2m$ degrees of freedom. They therefore have the _F_ distribution of Eq. (2.1).\n \nIn order to decide the test for $Q_1$, we need the _P_ value for a measurement $q_1$ of the statistic. Recall that this is the probability of getting a result at least as extreme as the one measured under the null hypothesis. So in this case\n \n$$\nP_1 = {\\rm Prob}(Q_1\\le q_1\\mid H_0), \\tag{2.12}\n$$\n \nwhich we can calculate from the percentiles of the _F_ distribution, Eq. (2.1). Then if $P_1\\le \\alpha/2$ we reject $H_0$ and conclude with significance $\\alpha/2$ that a change occurred. We do the same test for $Q_2$, so that the combined significance is $\\alpha$.", "_____no_output_____" ], [ "Now we can make a change map for the Frankfurt Airport for the two acquisitions, August 5 and August 11, 2020. We want to see quite large changes associated primarily with airplane and vehicle movements, so we will set the significance generously low to $\\alpha = 0.001$. We will also distinguish the direction of change and mask out the no-change pixels:", "_____no_output_____" ] ], [ [ "# Decision threshold alpha/2:\ndt = f.ppf(0.0005, 2*m, 2*m)\n\n# LRT statistics.\nq1 = im1.divide(im2)\nq2 = im2.divide(im1)\n\n# Change map with 0 = no change, 1 = decrease, 2 = increase in intensity.\nc_map = im1.multiply(0).where(q2.lt(dt), 1)\nc_map = c_map.where(q1.lt(dt), 2)\n\n# Mask no-change pixels.\nc_map = c_map.updateMask(c_map.gt(0))\n\n# Display map with red for increase and blue for decrease in intensity.\nlocation = aoi.centroid().coordinates().getInfo()[::-1]\nmp = folium.Map(\n location=location, tiles='Stamen Toner',\n zoom_start=13)\nfolium.TileLayer('OpenStreetMap').add_to(mp)\nmp.add_ee_layer(ratio,\n {'min': 0, 'max': 20, 'palette': ['black', 'white']}, 'Ratio')\nmp.add_ee_layer(c_map,\n {'min': 0, 'max': 2, 'palette': ['black', 'blue', 'red']},\n 'Change Map')\nmp.add_child(folium.LayerControl())\n\ndisplay(mp)", "_____no_output_____" ] ], [ [ "Most changes are within the airport or on the Autobahn. Barge movements on the Main River (upper left hand corner) are also signaled as significant changes. Note that the 'red' changes (significant increases in intensity) do not show up in the 'ratio' overlay, which displays $s_1/s_2$.", "_____no_output_____" ], [ "### Bivariate change detection", "_____no_output_____" ], [ "Rather than analyzing the VV and VH bands individually, it would make more sense to treat them together, and that is what we will now do. It is convenient to work with the covariance matrix form for measured intensities that we introduce in Part 1, see [Eq.(1.6a)](https://developers.google.com/earth-engine/tutorials/community/detecting-changes-in-sentinel-1-imagery-pt-1#single_look_complex_slc_sar_measurements). Again with the aim of keeping the notation simple, define\n\n$$\n\\pmatrix{ s_i & 0\\cr 0 & r_i} = \\pmatrix{\\langle|S_{vv}|^2\\rangle_i & 0 \\cr 0 & \\langle|S_{vh}|^2\\rangle_i}, \\quad {\\rm with\\ means}\\quad a_i = \\langle|S^{a_i}_{vv}|^2\\rangle, \\quad b_i = \\langle|S^{b_i}_{vh}|^2\\rangle \\tag{2.13}\n$$\n\nfor the two acquisition times $t_i,\\ i=1,2$. \n\nUnder $H_0$ we have $a_1=a_2=a$ and $b_1=b_2=b$. Assuming independence of $s_i$ and $r_i$, the likelihood function is the product of the four gamma distributions\n\n$$\nL_0(a,b) = p(s_1\\mid a)p(r_1\\mid b)p(s_2\\mid a)p(r_2\\mid b).\n$$\n\nUnder $H_1$,\n\n$$\nL_1(a_1,b_1,a_2,b_2) = p(s_1\\mid a_1)p(r_1\\mid b_1)p(s_2\\mid a_2)p(r_2\\mid b_2).\n$$\n\nWith maximum likelihood estimates under $H_0$ \n\n$$\n\\hat a = (s_1+s_2)/2\\quad {\\rm and}\\quad \\hat b = (r_1+r_2)/2\n$$ \n\nfor the parameters and some simple algebra, we get \n\n$$\nL_0(\\hat a,\\hat b) = {(2m)^{4m}\\over (s_1+s_2)^{2m}(r_1+r_2)^{2m}\\Gamma(m)^4}s_1r_1s_2r_2e^{-4m}. \\tag{2.14}\n$$ \n\nSimilarly with $\\hat a_1=s_1,\\ \\hat b_1=r_1,\\ \\hat a_2=s_2,\\ \\hat b_2=r_2$, we calculate\n\n$$\nL_1(\\hat a_1,\\hat b_1,\\hat a_2,\\hat b_2) = {m^{4m}\\over s_1r_1s_2r_2}e^{-4m}.\n$$\n\nThe likelihood test statistic in then\n\n$$\nQ = {L_0(\\hat a,\\hat b)\\over L_1(\\hat a_1,\\hat b_1,\\hat a_2,\\hat b_2)}={2^4(s_1r_1s_2r_2)^m\\over (s_1+s_2)^{2m}(r_1+r_2)^{2m}}.\n$$\n\nWriting this in terms of the covariance matrix representation,\n\n$$\nc_i = \\pmatrix{s_i & 0\\cr 0 & r_i},\\quad i=1,2,\n$$\n\nwe derive, finally, the likelihood ratio test\n\n$$\nQ = \\left[2^4\\pmatrix{|c_1| |c_2|\\over |c_1+c_2|^2 }\\right]^m \\le k, \\tag{2.15}\n$$\n\nwhere $|\\cdot|$ indicates the matrix determinant, $|c_i|=s_ir_i$. \n\nSo far so good. But in order to determine _P_ values, we need the probability distribution of $Q$. This time we have no idea how to obtain it. Here again, statistical theory comes to our rescue.\n", "_____no_output_____" ], [ "Let $\\Theta$ be the parameter space for the LRT. In our example it is \n$$\n\\Theta = \\{ a_1,b_1,a_2,b_2\\}\n$$ \nand has $d=4$ dimensions. Under the null hypothesis the parameter space is restricted by the conditions $a=a_1=a_2$ and $b=b_1=b_2$ to \n$$\n\\Theta_0 = \\{ a,b\\}\n$$ \nwith $d_0=2$ dimensions. According to [Wilks' Theorem](https://en.wikipedia.org/wiki/Wilks%27_theorem), as the number of measurements determining the LRT statistic $Q$ approaches $\\infty$, the test statistic $-2\\log Q$ approaches a chi square distribution with $d-d_0=2$ degrees of freedom. (Recall that, in order to determine the matrices $c_1$ and $c_2$, five individual measurements were averaged or multi-looked.) So rather than working with $Q$ directly, we use $-2\\log Q$ instead and hope that Wilk's theorem is a good enough approximation for our case.\n\nIn order to check if this is so, we just have to program \n\n$$\n-2\\log Q = (\\log{|c_1|}+\\log{|c_2|}-2\\log{|c_1+c_2|}+4\\log{2})(-2m)\n$$ \n\nin GEE-ese:", "_____no_output_____" ] ], [ [ "def det(im):\n return im.expression('b(0) * b(1)')\n\n# Number of looks.\nm = 5\n\nim1 = ee.Image(im_list.get(0)).select('VV', 'VH').clip(aoi)\nim2 = ee.Image(im_list.get(1)).select('VV', 'VH').clip(aoi)\n\nm2logQ = det(im1).log().add(det(im2).log()).subtract(\n det(im1.add(im2)).log().multiply(2)).add(4*np.log(2)).multiply(-2*m)", "_____no_output_____" ] ], [ [ "and then plot its histogram, comparing it with the chi square distribution _scipy.stats.chi2.pdf()_ with two degrees of freedom:", "_____no_output_____" ] ], [ [ "hist = m2logQ.reduceRegion(\n ee.Reducer.fixedHistogram(0, 20, 200), aoi).get('VV').getInfo()\na = np.array(hist)\nx = a[:, 0]\ny = a[:, 1] / np.sum(a[:, 1])\nplt.plot(x, y, '.', label='data')\nplt.plot(x, chi2.pdf(x, 2)/10, '-r', label='chi square')\nplt.legend()\nplt.grid()\nplt.show()", "_____no_output_____" ] ], [ [ "Looks pretty good. Note now that a small value of the LRT $Q$ in Eq. (2.15) corresponds to a large value of $-2\\log{Q}$. Therefore the _P_ value for a measurement $q$ is now the probability of getting the value $-2\\log{q}$\nor higher,\n$$\nP = {\\rm Prob}(-2\\log{Q} \\ge -2\\log{q}) = 1 - {\\rm Prob}(-2\\log{Q} < -2\\log{q}).\n$$\n\nSo let's try out our bivariate change detection procedure, this time on an agricultural scene where we expect to see larger regions of change.", "_____no_output_____" ] ], [ [ "geoJSON ={\n \"type\": \"FeatureCollection\",\n \"features\": [\n {\n \"type\": \"Feature\",\n \"properties\": {},\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [\n -98.2122802734375,\n 49.769291532628515\n ],\n [\n -98.00559997558594,\n 49.769291532628515\n ],\n [\n -98.00559997558594,\n 49.88578690918283\n ],\n [\n -98.2122802734375,\n 49.88578690918283\n ],\n [\n -98.2122802734375,\n 49.769291532628515\n ]\n ]\n ]\n }\n }\n ]\n}\ncoords = geoJSON['features'][0]['geometry']['coordinates']\naoi1 = ee.Geometry.Polygon(coords)", "_____no_output_____" ] ], [ [ "This is a mixed agricultural/forest area in southern Manitoba, Canada. We'll gather two images, one from the beginning of August and one from the beginning of September, 2018. A lot of harvesting takes place in this interval, so we expect some extensive changes.", "_____no_output_____" ] ], [ [ "im1 = ee.Image(ee.ImageCollection('COPERNICUS/S1_GRD_FLOAT')\n .filterBounds(aoi1)\n .filterDate(ee.Date('2018-08-01'), ee.Date('2018-08-31'))\n .filter(ee.Filter.eq('orbitProperties_pass', 'ASCENDING'))\n .filter(ee.Filter.eq('relativeOrbitNumber_start', 136))\n .first()\n .clip(aoi1))\nim2 = ee.Image(ee.ImageCollection('COPERNICUS/S1_GRD_FLOAT').filterBounds(aoi1)\n .filterDate(ee.Date('2018-09-01'), ee.Date('2018-09-30'))\n .filter(ee.Filter.eq('orbitProperties_pass', 'ASCENDING'))\n .filter(ee.Filter.eq('relativeOrbitNumber_start', 136))\n .first()\n .clip(aoi1))", "_____no_output_____" ] ], [ [ "Here are the acquisition times:", "_____no_output_____" ] ], [ [ "acq_time = im1.get('system:time_start').getInfo()\nprint( time.strftime('%x', time.gmtime(acq_time/1000)) )\nacq_time = im2.get('system:time_start').getInfo()\nprint( time.strftime('%x', time.gmtime(acq_time/1000)) )", "_____no_output_____" ] ], [ [ "Fortunately it is possible to map the chi square cumulative distribution function over an _ee.Image()_ so that a _P_ value image can be calculated directly. This wasn't possible in the single band case, as the _F_ cumulative distribution is not available on the GEE. Here are the _P_ values:", "_____no_output_____" ] ], [ [ "def chi2cdf(chi2, df):\n ''' Chi square cumulative distribution function for df degrees of freedom\n using the built-in incomplete gamma function gammainc() '''\n return ee.Image(chi2.divide(2)).gammainc(ee.Number(df).divide(2))\n\n# The observed test statistic image -2logq.\nm2logq = det(im1).log().add(det(im2).log()).subtract(\n det(im1.add(im2)).log().multiply(2)).add(4*np.log(2)).multiply(-2*m)\n\n# The P value image prob(m2logQ > m2logq) = 1 - prob(m2logQ < m2logq).\np_value = ee.Image.constant(1).subtract(chi2cdf(m2logq, 2))\n\n# Project onto map.\nlocation = aoi1.centroid().coordinates().getInfo()[::-1]\nmp = folium.Map(location=location, zoom_start=12)\nmp.add_ee_layer(p_value,\n {'min': 0,'max': 1, 'palette': ['black', 'white']}, 'P-value')\nmp.add_child(folium.LayerControl())", "_____no_output_____" ] ], [ [ "The uniformly dark areas correspond to small or vanishing _P_ values and signify change. The bright areas correspond to no change. Why they are not uniformly bright will be explained below. Now we set a significance threshold of $\\alpha=0.01$ and display the significant changes, whereby 1% of them will be false positives. For reference we also show the 2018 [Canada AAFC Annual Crop Inventory](https://developers.google.com/earth-engine/datasets/catalog/AAFC_ACI) map, which is available as a GEE collection:", "_____no_output_____" ] ], [ [ "c_map = p_value.multiply(0).where(p_value.lt(0.01), 1)\n\ncrop2018 = (ee.ImageCollection('AAFC/ACI')\n .filter(ee.Filter.date('2018-01-01', '2018-12-01'))\n .first()\n .clip(aoi1))\n\nmp = folium.Map(location=location, zoom_start=12)\nmp.add_ee_layer(crop2018, {min: 0, max: 255}, 'crop2018')\nmp.add_ee_layer(c_map.updateMask(\n c_map.gt(0)), {'min': 0, 'max': 1, 'palette': ['black', 'red']}, 'c_map')\nmp.add_child(folium.LayerControl())", "_____no_output_____" ] ], [ [ " The major crops in the scene are soybeans (dark brown), oats (light brown), canola (light green), corn (light yellow) and winter wheat (dark gray). The wooded areas exhibit little change, while canola has evidently been extensively harvested in the interval.", "_____no_output_____" ], [ "#### A note on _P_ values\nBecause small _P_ values are indicative of change, it is tempting to say that, the larger the _P_ value, the higher the probability of no change. Or more explicitly, the _P_ value is itself the no change probability. Let's see why this is false. Below we choose a wooded area of the agricultural scene where few significant changes are to be expected and use it to subset the _P_ value image. Then we plot the histogram of the subset:", "_____no_output_____" ] ], [ [ "geoJSON ={\n \"type\": \"FeatureCollection\",\n \"features\": [\n {\n \"type\": \"Feature\",\n \"properties\": {},\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [\n -98.18550109863281,\n 49.769735012247885\n ],\n [\n -98.13949584960938,\n 49.769735012247885\n ],\n [\n -98.13949584960938,\n 49.798109268622\n ],\n [\n -98.18550109863281,\n 49.798109268622\n ],\n [\n -98.18550109863281,\n 49.769735012247885\n ]\n ]\n ]\n }\n }\n ]\n}\ncoords = geoJSON['features'][0]['geometry']['coordinates']\naoi1_sub = ee.Geometry.Polygon(coords)\nhist = p_value.reduceRegion(ee.Reducer.fixedHistogram(0, 1, 100), aoi1_sub).get('constant').getInfo()\na = np.array(hist)\nx = a[:,0]\ny = a[:,1]/np.sum(a[:,1])\nplt.plot(x, y, '.b', label='p-value')\nplt.ylim(0, 0.05)\nplt.grid()\nplt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "So the P values of no-change measurements are uniformly distributed over $[0, 1]$ (the excess of small _P_ values at the left can be ascribed to genuine changes within the polygon). A large _P_ value is no more indicative of no change than a small one. Of course it has to be this way. When, for example, we set a significance level of 5%, then the fraction of false positives, i.e., the fraction of _P_ values smaller than 0.05 given $H_0$, must also be 5%. This accounts for the noisy appearance of the _P_ value image in the no-change regions.", "_____no_output_____" ], [ "#### Change direction: the Loewner order\nWhat about the direction of change in the bivariate case? This is less clear, as we can have the situation where the VV intensity gets larger and the VH smaller from time $t_1$ to $t_2$, or vice versa. When we are dealing with the C2 covariance matrix representation of SAR imagery, see Eq. (2.13), a characterization of change can be made as follows [(Nielsen et al. (2019))](https://ieeexplore.ieee.org/document/8736751): For each significantly changed pixel, we determine the difference $C2_{t_2}-C2_{t_1}$ and examine its so-called _definiteness_, also known as the _Loewner order_ of the change. A matrix is said to be _positive definite_ if all of its eigenvalues are positive, _negative definite_ if they are all negative, otherwise _indefinite_. In the case of the $2\\times 2$ diagonal matrices that we are concerned with the eigenvalues are just the two diagonal elements themselves, so determining the Loewner order is trivial. For full $2\\times 2$ dual pol or $3\\times 3$ quad pol SAR imagery, devising an efficient way to determine the Loewner order is more difficult, see [Nielsen (2019)](https://ieeexplore.ieee.org/document/8913617).\n\nSo let's include the Loewner order in our change map:", "_____no_output_____" ] ], [ [ "c_map = p_value.multiply(0).where(p_value.lt(0.01), 1)\ndiff = im2.subtract(im1)\nd_map = c_map.multiply(0) # Initialize the direction map to zero.\nd_map = d_map.where(det(diff).gt(0), 2) # All pos or neg def diffs are now labeled 2.\nd_map = d_map.where(diff.select(0).gt(0), 3) # Re-label pos def (and label some indef) to 3.\nd_map = d_map.where(det(diff).lt(0), 1) # Label all indef to 1.\nc_map = c_map.multiply(d_map) # Re-label the c_map, 0*X = 0, 1*1 = 1, 1*2= 2, 1*3 = 3.", "_____no_output_____" ] ], [ [ "Now we display the changes, with positive definite red, negative definite blue, and indefinite yellow:", "_____no_output_____" ] ], [ [ "mp = folium.Map(location=location, zoom_start=12)\nmp.add_ee_layer(crop2018, {min: 0, max: 255}, 'crop2018')\nmp.add_ee_layer(\n c_map.updateMask(c_map.gt(0)), {\n 'min': 0,\n 'max': 3,\n 'palette': ['black', 'yellow', 'blue', 'red']\n }, 'c_map')\nmp.add_child(folium.LayerControl())", "_____no_output_____" ] ], [ [ "The more or less compact blue changes indicate a decrease in reflectivity in both VV and VH bands, and correspond to crop harvesting (especially canola).\n\n", "_____no_output_____" ], [ "### Outlook\nWe have now covered the subject of bitemporal change detection with GEE Sentinel-1 imagery. The beauty of GEE is that it is trivially easy to gather arbitrarily long time series of S1 images from the archive, all with revisit times of 6 or 12 days depending on whether one or both satellites are collecting data. The next part of the Tutorial will generalize the techniques we have learned so far to treat multitemporal change detection.\n\n### Oh, and one more thing ...\n\nWe didn't mention it above, but note the similarity between Eq. (2.10) and Eq. (2.15). To go from the monovariate LRT to the bivariate LRT, we simply replace the product of intensities $s_1s_2$ by the product of determinants $|c_1||c_2|$, the sum $s_1+s_2$ by $|c_1+c_2|$ and the factor $2^{2}$ by $2^4=2^{2\\cdot2}$. This observation will come in handy in Part 3.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e7199eb862c7a5cdf18ffd5f3702fcdb6ffc9097
4,295
ipynb
Jupyter Notebook
Building Python APIs with Flask, Flask-RESTPlus and Swagger UI.ipynb
jimit105/Medium-Articles
10ea3f3eda20843f714b4868228cf6740ae8968a
[ "MIT" ]
1
2021-01-26T19:49:26.000Z
2021-01-26T19:49:26.000Z
Building Python APIs with Flask, Flask-RESTPlus and Swagger UI.ipynb
jimit105/medium-articles
10ea3f3eda20843f714b4868228cf6740ae8968a
[ "MIT" ]
1
2020-05-12T04:55:49.000Z
2020-05-12T16:42:14.000Z
Building Python APIs with Flask, Flask-RESTPlus and Swagger UI.ipynb
jimit105/Medium-Articles
10ea3f3eda20843f714b4868228cf6740ae8968a
[ "MIT" ]
null
null
null
23.216216
80
0.489872
[ [ [ "# Building Python APIs with Flask, Flask-RESTPlus and Swagger UI", "_____no_output_____" ], [ "## Medium Article Link: <https://medium.com/p/7461b3a9a2c8>", "_____no_output_____" ], [ "## Minimal API", "_____no_output_____" ], [ "```python\nfrom flask import Flask\nfrom flask_restplus import Api, Resource\n\napp = Flask(__name__)\napi = Api(app)\n\[email protected]('/hello/')\nclass HelloWorld(Resource):\n def get(self):\n return \"Hello World\"\n \nif __name__ == '__main__':\n app.run()\n```", "_____no_output_____" ], [ "## Fetching Request Parameters", "_____no_output_____" ], [ "```python\nfrom flask import Flask\nfrom flask_restplus import Api, Resource, reqparse\n\napp = Flask(__name__)\napi = Api(app)\n\nparser = reqparse.RequestParser()\nparser.add_argument('name', help='Specify your name')\n\[email protected]('/hello/')\nclass HelloWorld(Resource):\n \n @api.doc(parser=parser)\n def get(self): \n args = parser.parse_args()\n name = args['name']\n return \"Hello \" + name\n \nif __name__ == '__main__':\n app.run()\n```", "_____no_output_____" ], [ "## File Upload", "_____no_output_____" ], [ "```python\nfrom flask import Flask\nfrom flask_restplus import Api, Resource\nfrom werkzeug.datastructures import FileStorage\n\napp = Flask(__name__)\napi = Api(app)\n\nupload_parser = api.parser()\nupload_parser.add_argument('file', \n location='files',\n type=FileStorage)\n\n\[email protected]('/upload/')\[email protected](upload_parser)\nclass UploadDemo(Resource):\n def post(self):\n args = upload_parser.parse_args()\n file = args.get('file')\n print(file.filename)\n return \"Uploaded file is \" + file.filename\n\nif __name__ == '__main__':\n app.run() \n```", "_____no_output_____" ], [ "If you get the following error while deploying: \n\n```python\nfrom werkzeug import cached_property\nImportError: cannot import name 'cached_property'\n```\n\nAdd this line before `from werkzeug.datastructures import FileStorage`:\n```python\nimport werkzeug\nwerkzeug.cached_property = werkzeug.utils.cached_property\n```", "_____no_output_____" ], [ "## API Parameters", "_____no_output_____" ], [ "```python\napi = Api(app,\n version='10.5',\n title='Flask Restplus Demo',\n description='Demo to show various API parameters',\n license='MIT',\n contact='Jimit Dholakia',\n contact_url='https://in.linkedin.com/in/jimit105',\n doc = '/docs/',\n prefix='/test'\n )\n```", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
e719b9277440d6c0d97c90cf6fe6a998e3cbd527
23,818
ipynb
Jupyter Notebook
02-Pandas/.ipynb_checkpoints/pandas_test-checkpoint.ipynb
stoniah/mlmathmech
bf241b4cebf0b32503ef89ff6e98b2f7d406b88b
[ "MIT" ]
16
2019-09-17T12:50:15.000Z
2021-01-27T12:49:29.000Z
02-Pandas/.ipynb_checkpoints/pandas_test-checkpoint.ipynb
stoniah/mlmathmech
bf241b4cebf0b32503ef89ff6e98b2f7d406b88b
[ "MIT" ]
null
null
null
02-Pandas/.ipynb_checkpoints/pandas_test-checkpoint.ipynb
stoniah/mlmathmech
bf241b4cebf0b32503ef89ff6e98b2f7d406b88b
[ "MIT" ]
14
2019-09-21T21:18:26.000Z
2020-01-23T10:35:59.000Z
33.593794
455
0.464733
[ [ [ "import numpy as np\nimport pandas as pd\nfrom pandas.testing import assert_frame_equal\ndata = pd.read_csv('data/titanic_train.csv', index_col='PassengerId')", "_____no_output_____" ] ], [ [ "# SLIDE (1) ПодФрейм Вандермонда.", "_____no_output_____" ], [ "Для вектора $x=(x_0, \\ldots, x_n)$ матрица Вандермона выглядит следующим образом:\n$$A = \\begin{pmatrix}\n1 & x_0 & \\cdots & x_0^n \\\\\n1 & x_1 & \\cdots & x_1^n \\\\\n\\vdots & \\ddots & \\ddots & \\vdots \\\\\n1 & x_n & \\cdots & x_n^n\n\\end{pmatrix}$$ \n\nА датафрейм Вандермонда выглядит аналогично:\n\n| | 0 | 1 | |n |\n|-------|---|----|--|----|\n|0 | 1 | $x_0$ | $\\ldots$ | $x_0^n$ |\n|1 | 1 | $x_1$ | $\\ldots$ | $x_1^n$ |\n|$\\vdots$ | $\\vdots$|$\\ddots$ | $\\ddots$ | $\\vdots$ |\n|$n$ | 1 |$x_n$| | $x_n^n$ |\n\n\nНа вход подается вектор вещественных чисел длины $n+1 > 1$ и две пары индексов. Вам нужно вернуть подФрейм Вандермонда , где первая пара индексов - задает срез по строкам, а вторая пара индексов - срез по колонкам. Индексы и колонки - нужно оставить неизменнымы при взятии подФрейма. Все индексы находятся в пределах допустимого - проверять их на корректность не нужно. Результатом не может быть пустой Фрейм, индексы в паре не равны между собой.\n\nПодсказка: не нужно создавать самим матрицу вандермонда, она давно реализована за вас.\n\n### Sample 1\n#### Input:\n```python\nx = np.array([3,1,5,4,2])\nindexes = (0,3)\ncolumns = (2,4)\n```\n#### Output:\n\n| | 2 | 3 |\n|--|---|----|\n|0 | 9 | 27 | \n|1 | 1 | 1 | \n|2 | 25 |125|", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\n\ndef subVander(x: np.ndarray, indexes: tuple, columns: tuple) -> pd.DataFrame:\n ### ╰( ͡° ͜ʖ ͡° )つ──☆*:・゚\n pass", "_____no_output_____" ], [ "def subVander(x: np.ndarray, indexes: tuple, columns: tuple) -> pd.DataFrame:\n df = pd.DataFrame(data = np.vander(x, increasing=True), \n index = np.arange(len(x)), \n columns = np.arange(len(x)))\n return df.iloc[indexes[0]:indexes[1], columns[0]:columns[1]]", "_____no_output_____" ], [ "import inspect\nlines = inspect.getsource(subVander)\nassert ' print(' not in lines\nassert ' while ' not in lines \nassert ' map(' not in lines \nassert ' for ' not in lines\nassert ' open(' not in lines\n######################################################\nx = np.array([3,1,5,4,2])\n(i,j) = (0,3)\n(u,v) = (2,4)\nanswer = pd.DataFrame(\n data = np.array([[9, 27],[1, 1], [25, 125]]), \n index = np.arange(0, 3), \n columns = np.arange(2,4)\n)\nassert_frame_equal(\n subVander(x, (i,j), (u,v)),\n answer\n)\n######################################################\nx = np.array([3,1,5,4,2])\n(i,j) = (0,1)\n(u,v) = (0,1)\nanswer = pd.DataFrame(\n data = np.array([1]), \n index = np.arange(0, 1), \n columns = np.arange(0,1)\n)\nassert_frame_equal(\n subVander(x, (i,j), (u,v)),\n answer\n)\n######################################################\nx = np.array([3.5,1.1,-7.7, 6, 2])\n(i,j) = (0,3)\n(u,v) = (2,4)\nanswer = pd.DataFrame(\n data = np.array([[12.25, 42.875],[1.21, 1.331], [59.29, -456.533]]), \n index = np.arange(0, 3), \n columns = np.arange(2, 4)\n)\nassert_frame_equal(\n subVander(x, (i,j), (u,v)),\n answer\n)\n######################################################", "_____no_output_____" ] ], [ [ "# SLIDE (2) Бухгалтерия зоопарка.", "_____no_output_____" ], [ "Вам на вход подается словарь, где ключ - это тип животного, а значение - словарь с признаками этого животного, где ключ - тип признака, а значение - значение признака (Типичный json проще говоря). Наименования признаков животного - всегда строки. Значения признаков - любой из 5 типов pandas.\n\nВам следует создать табличку, где по строчкам будут идти животные, а по колонкам - их признаки, которая удовлетворяет следующим условиям\n* Тип животного нужно выделить в отдельную колонку `Type`\n* Строки отсортированы по типу животного в алфавитном порядке\n* Колонки отсортированы в алфавитном порядке, кроме колонки `Type` - она первая\n* Индексы строк - ряд натуральных чисел начиная с 0 без пропусков.\n\nИмейте в виду, что признаки у двух животных могут не совпадать, значит незаполненные данные нужно заполнить Nan значением.\n\nВерните на выходе табличку(`DataFrame`), в которой отсутствуют Nan значения. При этом могут отсутствовать некоторые признаки, но животные должны присутствовать все. Изначальные типы значений из словаря: `int64`, `float64`, `bool` и.т.д. должны сохраниться и в конечной табличке, а не превратиться в `object`-ы. (От удаляемых признаков, этого, очевидно, не требуется).\n\nПодсказка: изучите функцию `pandas.from_dict`\n\n### Sample 1\n#### Input:\n```python\nZOO = {\n 'cat':{'color':'black', 'tail_len': 50, 'injured': False}, \n 'dog':{'age': 6, 'tail_len': 30.5, 'injured': True}\n }\n```\n#### Output:\n\n| | Type | injured |tail_len |\n|--|----|--------|-------|\n|0 | cat | False | 50.0 |\n|1 | dog | True | 30.5 |\n", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\n\ndef ZOOtable(zoo: dict) -> pd.DataFrame:\n ### ╰( ͡° ͜ʖ ͡° )つ──☆*:・゚\n pass", "_____no_output_____" ], [ "def ZOOtable(zoo: dict) -> pd.DataFrame:\n df = pd.DataFrame.from_dict(zoo, orient='index').dropna(axis=1)\n sort_df = df.reindex(sorted(df.index), axis=0).reindex(sorted(df.columns), axis=1)\n return sort_df.reset_index().rename(columns={'index':'Type'})", "_____no_output_____" ], [ "import inspect\nlines = inspect.getsource(ZOOtable)\nassert ' print(' not in lines\nassert ' while ' not in lines \nassert ' map(' not in lines \nassert ' for ' not in lines\nassert ' open(' not in lines\n######################################################\nZOO = {\n 'cat': {'color':'black', 'tail_len': 50.0, 'injured': False}, \n 'dog': {'age': 6, 'tail_len': 30.5, 'injured': True}\n }\nanswer = pd.DataFrame(\n {\n 'Type':['cat', 'dog'], \n 'injured':[False, True], \n 'tail_len':[50.0, 30.5]\n }\n)\nassert_frame_equal(\n ZOOtable(ZOO),\n answer\n)\n######################################################\nZOO = {\n 'cat': {'color':'black'}, \n 'dog': {'age': 6}\n }\nanswer = pd.DataFrame(\n {\n 'Type':['cat', 'dog']\n }\n)\nassert_frame_equal(\n ZOOtable(ZOO),\n answer\n)\n######################################################\nZOO = {\n 'fish': {'injured': False, 'color':'gold', 'tail_len': 0.5, 'age': 0.5}, \n 'cat': {'age': 8, 'color':'black', 'tail_len': 50.0, 'injured': False}, \n 'dog': {'color':'grey', 'age': 6, 'tail_len': 30.5, 'injured': True}\n }\nanswer = pd.DataFrame(\n {\n 'Type':['cat', 'dog','fish'],\n 'age':[8.0, 6.0, 0.5],\n 'color':['black', 'grey', 'gold'], \n 'injured':[False, True, False], \n 'tail_len':[50.0, 30.5, 0.5]\n }\n)\nassert_frame_equal(\n ZOOtable(ZOO),\n answer\n)\n######################################################\nZOO = {\n 'cat': {'age': 8, 'color':'black', 'tail_len': 50.0, 'injured': False}, \n }\nanswer = pd.DataFrame(\n {\n 'Type':['cat'],\n 'age':[8],\n 'color':['black'], \n 'injured':[False], \n 'tail_len':[50.0]\n }\n)\nassert_frame_equal(\n ZOOtable(ZOO),\n answer\n)\n######################################################", "_____no_output_____" ] ], [ [ "# SLIDE(2) Простые преобразования.", "_____no_output_____" ], [ "На вход подается `DataFrame` из 3-х колонок дата рождения и смерти человека на **русском** языке в формате представленом ниже:\n\n| | Имя | Дата рождения | Дата смерти |\n|--|-----------------|----------------|-----------------|\n|0 |Никола Тесла |10 июля 1856 г. |7 января 1943 г. |\n|1 |Альберт Эйнштейн |14 марта 1879 г.|18 апреля 1955 г.| \n\nНеобходимо вернуть исходную таблицу с добавленным в конце столбцом полных лет жизни.\n\n\n| | Имя | Дата рождения | Дата смерти | Полных лет|\n|--|-----------------|----------------|-----------------|-----------|\n|0 |Никола Тесла |10 июля 1856 г. |7 января 1943 г. | 86 |\n|1 |Альберт Эйнштейн |14 марта 1879 г.|18 апреля 1955 г.| 76 |\n\nФормат неизменен, пробелы мужду элементами дат присутствуют, исключений(Nan) нету.\n\nПодсказка: месяца имеют лишь одну форму склонения в датах.\n\nПодсказка: воспользоваться функцией `apply` или `map`\n\nПодсказка: воспользоваться методом `pd.to_datetime` или модулем `datetime`\n\nПодсказка: чтобы не париться с високосными годами можно воспользоваться модулем `dateutil.relativedelta`", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\n\ndef rus_feature(df: pd.DataFrame) -> pd.DataFrame:\n ### ╰( ͡° ͜ʖ ͡° )つ──☆*:・゚\n pass", "_____no_output_____" ], [ "from dateutil.relativedelta import relativedelta\n\ndef rus_feature(df: pd.DataFrame) -> pd.DataFrame:\n d = {'января':'Jan', 'февраля':'Feb', 'марта':'Mar', 'апреля':'Apr', 'мая':'May', 'июня':'Jun', \n 'июля':'Jul', 'августа':'Aug', 'сентября':'Sep', 'октября': 'Oct', 'ноября':'Nov', 'декабря':'Dec'}\n\n def f(date):\n s = date.split()\n return pd.to_datetime(s[0] + d[s[1]] + s[2])\n \n df['birth'] = df['Дата рождения'].apply(f)\n df['death'] = df['Дата смерти'].apply(f)\n \n df['Полных лет'] = df.apply(lambda row: relativedelta(row['death'], row['birth']).years, axis=1)\n \n return df.drop(columns=['death', 'birth'])\n", "_____no_output_____" ], [ "import inspect\nlines = inspect.getsource(rus_feature)\nassert ' print(' not in lines\nassert ' while ' not in lines \nassert ' map(' not in lines \nassert ' for ' not in lines\nassert ' open(' not in lines\n######################################################\nnames = pd.DataFrame({'Имя':['Никола Тесла', 'Альберт Эйнштейн'], \n 'Дата рождения':['10 июля 1856 г.','14 марта 1879 г.'],\n 'Дата смерти': ['7 января 1943 г.', '18 апреля 1955 г.']})\nanswer = pd.DataFrame({'Имя':['Никола Тесла', 'Альберт Эйнштейн'], \n 'Дата рождения':['10 июля 1856 г.','14 марта 1879 г.'],\n 'Дата смерти': ['7 января 1943 г.', '18 апреля 1955 г.'],\n 'Полных лет':[86, 76]})\nassert_frame_equal(\n rus_feature(names),\n answer\n)\n######################################################\nnames = pd.DataFrame({'Имя':['Никола Тесла'], \n 'Дата рождения':['10 июля 1856 г.'],\n 'Дата смерти': ['7 января 1857 г.']})\nanswer = pd.DataFrame({'Имя':['Никола Тесла'], \n 'Дата рождения':['10 июля 1856 г.'],\n 'Дата смерти': ['7 января 1857 г.'],\n 'Полных лет':[0]})\nassert_frame_equal(\n rus_feature(names),\n answer\n)\n######################################################\nnames = pd.DataFrame({'Имя':['Никола Тесла'], \n 'Дата рождения':['1 января 2000 г.'],\n 'Дата смерти': ['31 декабря 2000 г.']})\nanswer = pd.DataFrame({'Имя':['Никола Тесла'], \n 'Дата рождения':['1 января 2000 г.'],\n 'Дата смерти': ['31 декабря 2000 г.'],\n 'Полных лет':[0]})\nassert_frame_equal(\n rus_feature(names),\n answer\n)\n######################################################\nnames = pd.DataFrame({'Имя':['Никола Тесла'], \n 'Дата рождения':['1 января 2000 г.'],\n 'Дата смерти': ['1 января 2001 г.']})\nanswer = pd.DataFrame({'Имя':['Никола Тесла'], \n 'Дата рождения':['1 января 2000 г.'],\n 'Дата смерти': ['1 января 2001 г.'],\n 'Полных лет':[1]})\nassert_frame_equal(\n rus_feature(names),\n answer\n)\n######################################################", "_____no_output_____" ] ], [ [ "# SLIDE(1) Характеристики.", "_____no_output_____" ], [ "Верните среднее, медиану, максимальное и минимальное значение возраста **погибших** мужчин. Именно в данном порядке.\n\nПодсказка: если используете маску - не забудьте поставить выражения в круглые скобки.\n\n### Sample 1\n#### Input:\n```python\ndata = pd.read_csv('data/titanic_train.csv', index_col='PassengerId')\n```", "_____no_output_____" ] ], [ [ "def men_stat(df: pd.DataFrame) -> (float, float, float, float):\n ### ╰( ͡° ͜ʖ ͡° )つ──☆*:・゚\n pass", "_____no_output_____" ], [ "def men_stat(df: pd.DataFrame) -> (float, float, float, float):\n mens = data[(data['Sex']=='male') & (data['Survived']==0)]\n return mens['Age'].mean(), mens['Age'].median(), mens['Age'].max(), mens['Age'].min()", "_____no_output_____" ], [ "import inspect\nlines = inspect.getsource(men_stat)\nassert ' print(' not in lines\nassert ' while ' not in lines \nassert ' map(' not in lines \nassert ' for ' not in lines\nassert ' open(' not in lines\n######################################################\nmean, med, mx, mn = men_stat(data)\nassert mean - 31.618 < 1e-3\nassert med == 29\nassert mx == 74\nassert mn == 1\n\n######################################################", "_____no_output_____" ] ], [ [ "# SLIDE (1) Сводная таблица.", "_____no_output_____" ], [ "Сделать сводную таблицу по **медианному возрасту** для пола и класса. Для примера посмотрите сводную таблицу по сумме выживших, для пола и класса. Индексы вашей сводной таблицы должны идти в том же порядке.\n```Python\n Survived\nSex Pclass \nfemale 1 91\n 2 70\n 3 72\nmale 1 45\n 2 17\n 3 47\n```\n\nПодсказка: медиана не всегда дает целое число.\n\n### Sample 1\n#### Input:\n```python\ndata = pd.read_csv('data/titanic_train.csv', index_col='PassengerId')\n```", "_____no_output_____" ] ], [ [ "def age_stat(df: pd.DataFrame) -> pd.DataFrame:\n ### ╰( ͡° ͜ʖ ͡° )つ──☆*:・゚\n pass", "_____no_output_____" ], [ "def age_stat(df: pd.DataFrame) -> pd.DataFrame:\n return df.pivot_table(values = ['Age'], \n index = ['Sex', 'Pclass'], \n aggfunc= lambda x: x.median())", "_____no_output_____" ], [ "import inspect\nlines = inspect.getsource(age_stat)\nassert ' print(' not in lines\nassert ' while ' not in lines \nassert ' map(' not in lines \nassert ' for ' not in lines\nassert ' open(' not in lines\n######################################################\nidx = pd.MultiIndex.from_product([['female', 'male'],\n np.arange(1,4)],\n names=['Sex', 'Pclass'])\ncol = ['Age']\n\nans = pd.DataFrame(np.array([35.0, 28.0, 21.5, 40.0, 30.0, 25.0]), idx, col)\nans\n\nassert_frame_equal(\n age_stat(data),\n ans\n)\n######################################################", "_____no_output_____" ] ], [ [ "# SLIDE (3) Популярные девушки.", "_____no_output_____" ], [ "Выведите список имен незамужних женщин(`Miss`) отсортированный по популярности. \n* В полном имени девушек **имя** - это **первое слово без скобок** после `Miss`.\n* Остальные строки не рассматриваем.\n* Девушки с одинаковой популярностью сортируются по имени в алфавитном порядке.\n\n**Слово/имя** - подстрока без пробелов.\n**Популярность** - количество таких имен в таблице.\n\nПодсказка: воспользуйтесь методом `pd.Series.str.extract`\n\nПодсказка: не забудьте убрать единственную неподходящую девушку под паттерн: \"Meanwell, Miss. (Marion Ogden)\"\n\nПодсказка: для удобного `groupby` сделайте столбец с единичками, на который в последствии и будете навешивать `count`.\n\nПодсказка: не нужно изменять имена \"Mari\" и \"Mary\" - разные имена\n\nПодсказка: при сложной сортировке можно вывести индекс в колонку и отсортировать данные вместе с новой колонкой.\n\n### Sample 1\n#### Input:\n```python\ndata = pd.read_csv('data/titanic_train.csv', index_col='PassengerId')\n```\n#### Output:\nВот начало данного списка. Заметьте, **названия колонок должны совпадать** \n\n| | Name | Popularity |\n|--|----|--------|\n|0 |Anna |9|\n|1 |Mary |9\n|2 |Margaret|6\n|3 |Elizabeth|5\n|4 |Alice |4\n|5 |Bertha |4\n|6 |Ellen |4\n|7 |Helen |4\n", "_____no_output_____" ] ], [ [ "def fename_stat(df: pd.DataFrame) -> pd.DataFrame:\n ### ╰( ͡° ͜ʖ ͡° )つ──☆*:・゚\n pass", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\n\ndef Afename_stat(df: pd.DataFrame) -> pd.DataFrame:\n miss_names = data[data['Name'].str.contains('Miss.')][['Name']].reset_index(drop=True)\n miss = miss_names['Name'].str.extract(r'Miss. (?P<Name>\\w+)(?P<Popularity>\\s|$)').dropna().reset_index(drop=True)\n pop_group = miss.groupby(by='Name')['Popularity'].count()\n sort_name = pop_group.reset_index()\\\n .rename({'index':'Name'})\\\n .sort_values(by=['Popularity','Name'], ascending=[False,True])\\\n .reset_index(drop=True)\n return sort_name", "_____no_output_____" ], [ "import inspect\nlines = inspect.getsource(fename_stat)\nassert ' print(' not in lines\nassert ' while ' not in lines \nassert ' map(' not in lines \nassert ' for ' not in lines\nassert ' open(' not in lines\n######################################################\nassert_frame_equal(\n fename_stat(data),\n Afename_stat(data)\n)\n######################################################", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
e719bb1a3973c2db9f41efb07eab381b0d450e61
3,213
ipynb
Jupyter Notebook
Day 07/Recursive.ipynb
VasTsak/julia_100
0ec969824f0d80e153581c506f197c76e0e8c629
[ "Apache-2.0" ]
1
2022-02-12T21:11:40.000Z
2022-02-12T21:11:40.000Z
Day 07/Recursive.ipynb
VasTsak/julia_100
0ec969824f0d80e153581c506f197c76e0e8c629
[ "Apache-2.0" ]
null
null
null
Day 07/Recursive.ipynb
VasTsak/julia_100
0ec969824f0d80e153581c506f197c76e0e8c629
[ "Apache-2.0" ]
null
null
null
18.9
141
0.467476
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e719e43d6fccaff9800da10697f9ab1fe82d6395
8,824
ipynb
Jupyter Notebook
bronze/B17_Two_Probabilistic_Bits.ipynb
MrQubo/www2020-qiskit-all
d5ae076751a07989afe302dd9d39e8244adc00df
[ "Apache-2.0", "CC-BY-4.0" ]
null
null
null
bronze/B17_Two_Probabilistic_Bits.ipynb
MrQubo/www2020-qiskit-all
d5ae076751a07989afe302dd9d39e8244adc00df
[ "Apache-2.0", "CC-BY-4.0" ]
null
null
null
bronze/B17_Two_Probabilistic_Bits.ipynb
MrQubo/www2020-qiskit-all
d5ae076751a07989afe302dd9d39e8244adc00df
[ "Apache-2.0", "CC-BY-4.0" ]
null
null
null
38.701754
309
0.528445
[ [ [ "<table> <tr>\n <td style=\"background-color:#ffffff;\">\n <a href=\"http://qworld.lu.lv\" target=\"_blank\"><img src=\"..\\images\\qworld.jpg\" width=\"25%\" align=\"left\"> </a></td>\n <td style=\"background-color:#ffffff;vertical-align:bottom;text-align:right;\">\n prepared by <a href=\"http://abu.lu.lv\" target=\"_blank\">Abuzer Yakaryilmaz</a> (<a href=\"http://qworld.lu.lv/index.php/qlatvia/\" target=\"_blank\">QLatvia</a>)\n </td> \n</tr></table>", "_____no_output_____" ], [ "<table width=\"100%\"><tr><td style=\"color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;\">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>\n$ \\newcommand{\\bra}[1]{\\langle #1|} $\n$ \\newcommand{\\ket}[1]{|#1\\rangle} $\n$ \\newcommand{\\braket}[2]{\\langle #1|#2\\rangle} $\n$ \\newcommand{\\dot}[2]{ #1 \\cdot #2} $\n$ \\newcommand{\\biginner}[2]{\\left\\langle #1,#2\\right\\rangle} $\n$ \\newcommand{\\mymatrix}[2]{\\left( \\begin{array}{#1} #2\\end{array} \\right)} $\n$ \\newcommand{\\myvector}[1]{\\mymatrix{c}{#1}} $\n$ \\newcommand{\\myrvector}[1]{\\mymatrix{r}{#1}} $\n$ \\newcommand{\\mypar}[1]{\\left( #1 \\right)} $\n$ \\newcommand{\\mybigpar}[1]{ \\Big( #1 \\Big)} $\n$ \\newcommand{\\sqrttwo}{\\frac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\dsqrttwo}{\\dfrac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\onehalf}{\\frac{1}{2}} $\n$ \\newcommand{\\donehalf}{\\dfrac{1}{2}} $\n$ \\newcommand{\\hadamard}{ \\mymatrix{rr}{ \\sqrttwo & \\sqrttwo \\\\ \\sqrttwo & -\\sqrttwo }} $\n$ \\newcommand{\\vzero}{\\myvector{1\\\\0}} $\n$ \\newcommand{\\vone}{\\myvector{0\\\\1}} $\n$ \\newcommand{\\stateplus}{\\myvector{ \\sqrttwo \\\\ \\sqrttwo } } $\n$ \\newcommand{\\stateminus}{ \\myrvector{ \\sqrttwo \\\\ -\\sqrttwo } } $\n$ \\newcommand{\\myarray}[2]{ \\begin{array}{#1}#2\\end{array}} $\n$ \\newcommand{\\X}{ \\mymatrix{cc}{0 & 1 \\\\ 1 & 0} } $\n$ \\newcommand{\\I}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & 1} } $\n$ \\newcommand{\\Z}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & -1} } $\n$ \\newcommand{\\Htwo}{ \\mymatrix{rrrr}{ \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} } } $\n$ \\newcommand{\\CNOT}{ \\mymatrix{cccc}{1 & 0 & 0 & 0 \\\\ 0 & 1 & 0 & 0 \\\\ 0 & 0 & 0 & 1 \\\\ 0 & 0 & 1 & 0} } $\n$ \\newcommand{\\norm}[1]{ \\left\\lVert #1 \\right\\rVert } $\n$ \\newcommand{\\pstate}[1]{ \\lceil \\mspace{-1mu} #1 \\mspace{-1.5mu} \\rfloor } $", "_____no_output_____" ], [ "<h2> Two Probabilistic Bits</h2>\n\nSuppose that we have two probabilistic bits, and our probabilistic states respectively are\n\n$$ \\myvector{0.2 \\\\ 0.8} \\mbox{ and } \\myvector{0.6 \\\\ 0.4 }. $$\n\nIf we combine both bits as a single system, then what is the state of the combined system?", "_____no_output_____" ], [ "In total, we have four different states. We can name them as follows:\n<ul>\n <li>00: both bits are in states 0</li>\n <li>01: the first bit is in state 0 and the second bit is in state 1</li>\n <li>10: the first bit is in state 1 and the second bit is in state 0</li>\n <li>11: both bits are in states 1</li>\n</ul>", "_____no_output_____" ], [ "<h3> Task 1 </h3>\n\n<b>Discussion and analysis:</b>\n\nWhat are the probabilities of being in states $ 00 $, $ 01 $, $ 10 $, and $11$?\n\nHow can we represent these probabilities as a column vector?", "_____no_output_____" ], [ "<h3> Representation for states 0 and 1</h3>\n\nThe vector representation of state 0 is $ \\myvector{1 \\\\ 0} $. Similarly, the vector representation of state 1 is $ \\myvector{0 \\\\ 1} $.\n\nWe use $ \\pstate{0} $ to represent $ \\myvector{1 \\\\ 0} $ and $ \\pstate{1} $ to represent $ \\myvector{0 \\\\ 1} $.\n\nThen, the probabilistic state $ \\myvector{0.2 \\\\ 0.8} $ is also represented as $ 0.2 \\pstate{0} + 0.8 \\pstate{1} $.\n\nSimilarly, the probabilistic state $ \\myvector{0.6 \\\\ 0.4} $ is also represented as $ 0.6 \\pstate{0} + 0.4 \\pstate{1} $.", "_____no_output_____" ], [ "<h3> Composite system </h3>\n\nWhen two systems are composed, then their states are tensored to calculate the state of composite system.\n\nThe probabilistic state of the first bit is $ \\myvector{0.2 \\\\ 0.8} = 0.2 \\pstate{0} + 0.8 \\pstate{1} $.\n\nThe probabilistic state of the second bit is $ \\myvector{0.6 \\\\ 0.4} = 0.6 \\pstate{0} + 0.4 \\pstate{1} $.\n\nThen, the probabilistic state of the composite system is $ \\big( 0.2 \\pstate{0} + 0.8 \\pstate{1} \\big) \\otimes \\big( 0.6 \\pstate{0} + 0.4 \\pstate{1} \\big) $.\n", "_____no_output_____" ], [ "<h3> Task 2 </h3>\n\nFind the probabilistic state of the composite system.\n\n<i> \nRule 1: Tensor product distributes over addition in the same way as the distribution of multiplication over addition.\n\nRule 2: $ \\big( 0.3 \\pstate{1} \\big) \\otimes \\big( 0.7 \\pstate{0} \\big) = (0.3 \\cdot 0.7) \\big( \\pstate{1} \\otimes \\pstate{0} \\big) = 0.21 \\pstate{10} $.\n</i>", "_____no_output_____" ], [ "<a href=\"B17_Two_Probabilistic_Bits_Solutions.ipynb#task2\">click for our solution</a>", "_____no_output_____" ], [ "<h3> Task 3</h3>\n\nFind the probabilistic state of the composite system by calculating this tensor product $ \\myvector{0.2 \\\\ 0.8} \\otimes \\myvector{0.6 \\\\ 0.4 } $.", "_____no_output_____" ], [ "<a href=\"B17_Two_Probabilistic_Bits_Solutions.ipynb#task3\">click for our solution</a>", "_____no_output_____" ], [ "<h3> Task 4</h3>\n\nFind the vector representations of $ \\pstate{00} $, $ \\pstate{01} $, $\\pstate{10}$, and $ \\pstate{11} $.\n\n<i>The vector representation of $ \\pstate{ab} $ is $ \\pstate{a} \\otimes \\pstate{b} $ for $ a,b \\in \\{0,1\\} $.</i>", "_____no_output_____" ], [ "<a href=\"B17_Two_Probabilistic_Bits_Solutions.ipynb#task4\">click for our solution</a>", "_____no_output_____" ], [ "<h3> Task 5 [extra] </h3>\n\nSuppose that we have three bits.\n\nFind the vector representations of $ \\pstate{abc} $ for each $ a,b,c \\in \\{0,1\\} $.", "_____no_output_____" ], [ "<h3> Task 6 [extra] </h3>\n\n<i>This is a challenging task.</i>\n\nSuppose that we have four bits. \n\nNumber 9 is represented as $ 1001 $ in binary. Verify that the vector representation of $ \\pstate{1001} $ is the zero vector except its $10$th entry, which is 1.\n\nNumber 7 is represented as $ 0111 $ in binary. Verify that the vector representation of $ \\pstate{0111} $ is the zero vector except its $8$th entry, which is 1.\n\nGeneralize this idea for any number between 0 and 15.\n\nGeneralize this idea for any number of bits.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
e719e8bab800a25b2fa5d7aa67b3f1fbfc01584d
540,218
ipynb
Jupyter Notebook
projects/facial_keypoint_detection/1. Load and Visualize Data.ipynb
mmichal9/Computer_Vision_Nanodegree
d3c4a1ae1d35cc00637f24338656167bbd94ed0e
[ "MIT" ]
null
null
null
projects/facial_keypoint_detection/1. Load and Visualize Data.ipynb
mmichal9/Computer_Vision_Nanodegree
d3c4a1ae1d35cc00637f24338656167bbd94ed0e
[ "MIT" ]
null
null
null
projects/facial_keypoint_detection/1. Load and Visualize Data.ipynb
mmichal9/Computer_Vision_Nanodegree
d3c4a1ae1d35cc00637f24338656167bbd94ed0e
[ "MIT" ]
null
null
null
801.510386
207,788
0.949987
[ [ [ "# Facial Keypoint Detection\n \nThis project will be all about defining and training a convolutional neural network to perform facial keypoint detection, and using computer vision techniques to transform images of faces. The first step in any challenge like this will be to load and visualize the data you'll be working with. \n\nLet's take a look at some examples of images and corresponding facial keypoints.\n\n<img src='images/key_pts_example.png' width=50% height=50%/>\n\nFacial keypoints (also called facial landmarks) are the small magenta dots shown on each of the faces in the image above. In each training and test image, there is a single face and **68 keypoints, with coordinates (x, y), for that face**. These keypoints mark important areas of the face: the eyes, corners of the mouth, the nose, etc. These keypoints are relevant for a variety of tasks, such as face filters, emotion recognition, pose recognition, and so on. Here they are, numbered, and you can see that specific ranges of points match different portions of the face.\n\n<img src='images/landmarks_numbered.jpg' width=30% height=30%/>\n\n---", "_____no_output_____" ], [ "## Load and Visualize Data\n\nThe first step in working with any dataset is to become familiar with your data; you'll need to load in the images of faces and their keypoints and visualize them! This set of image data has been extracted from the [YouTube Faces Dataset](https://www.cs.tau.ac.il/~wolf/ytfaces/), which includes videos of people in YouTube videos. These videos have been fed through some processing steps and turned into sets of image frames containing one face and the associated keypoints.\n\n#### Training and Testing Data\n\nThis facial keypoints dataset consists of 5770 color images. All of these images are separated into either a training or a test set of data.\n\n* 3462 of these images are training images, for you to use as you create a model to predict keypoints.\n* 2308 are test images, which will be used to test the accuracy of your model.\n\nThe information about the images and keypoints in this dataset are summarized in CSV files, which we can read in using `pandas`. Let's read the training CSV and get the annotations in an (N, 2) array where N is the number of keypoints and 2 is the dimension of the keypoint coordinates (x, y).\n\n---", "_____no_output_____" ] ], [ [ "# import the required libraries\nimport glob\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nimport cv2", "_____no_output_____" ], [ "key_pts_frame = pd.read_csv('data/training_frames_keypoints.csv')\n\nn = 0\nimage_name = key_pts_frame.iloc[n, 0]\nkey_pts = key_pts_frame.iloc[n, 1:].as_matrix()\nkey_pts = key_pts.astype('float').reshape(-1, 2)\n\nprint('Image name: ', image_name)\nprint('Landmarks shape: ', key_pts.shape)\nprint('First 4 key pts: {}'.format(key_pts[:4]))", "Image name: Luis_Fonsi_21.jpg\nLandmarks shape: (68, 2)\nFirst 4 key pts: [[ 45. 98.]\n [ 47. 106.]\n [ 49. 110.]\n [ 53. 119.]]\n" ], [ "# print out some stats about the data\nprint('Number of images: ', key_pts_frame.shape[0])", "Number of images: 3462\n" ] ], [ [ "## Look at some images\n\nBelow, is a function `show_keypoints` that takes in an image and keypoints and displays them. As you look at this data, **note that these images are not all of the same size**, and neither are the faces! To eventually train a neural network on these images, we'll need to standardize their shape.", "_____no_output_____" ] ], [ [ "def show_keypoints(image, key_pts):\n \"\"\"Show image with keypoints\"\"\"\n plt.imshow(image)\n plt.scatter(key_pts[:, 0], key_pts[:, 1], s=20, marker='.', c='m')\n", "_____no_output_____" ], [ "# Display a few different types of images by changing the index n\n\n# select an image by index in our data frame\nn = 0\nimage_name = key_pts_frame.iloc[n, 0]\nkey_pts = key_pts_frame.iloc[n, 1:].as_matrix()\nkey_pts = key_pts.astype('float').reshape(-1, 2)\n\nplt.figure(figsize=(5, 5))\nshow_keypoints(mpimg.imread(os.path.join('data/training/', image_name)), key_pts)\nplt.show()", "_____no_output_____" ] ], [ [ "## Dataset class and Transformations\n\nTo prepare our data for training, we'll be using PyTorch's Dataset class. Much of this this code is a modified version of what can be found in the [PyTorch data loading tutorial](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html).\n\n#### Dataset class\n\n``torch.utils.data.Dataset`` is an abstract class representing a\ndataset. This class will allow us to load batches of image/keypoint data, and uniformly apply transformations to our data, such as rescaling and normalizing images for training a neural network.\n\n\nYour custom dataset should inherit ``Dataset`` and override the following\nmethods:\n\n- ``__len__`` so that ``len(dataset)`` returns the size of the dataset.\n- ``__getitem__`` to support the indexing such that ``dataset[i]`` can\n be used to get the i-th sample of image/keypoint data.\n\nLet's create a dataset class for our face keypoints dataset. We will\nread the CSV file in ``__init__`` but leave the reading of images to\n``__getitem__``. This is memory efficient because all the images are not\nstored in the memory at once but read as required.\n\nA sample of our dataset will be a dictionary\n``{'image': image, 'keypoints': key_pts}``. Our dataset will take an\noptional argument ``transform`` so that any required processing can be\napplied on the sample. We will see the usefulness of ``transform`` in the\nnext section.\n", "_____no_output_____" ] ], [ [ "from torch.utils.data import Dataset, DataLoader\n\nclass FacialKeypointsDataset(Dataset):\n \"\"\"Face Landmarks dataset.\"\"\"\n\n def __init__(self, csv_file, root_dir, transform=None):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.key_pts_frame = pd.read_csv(csv_file)\n self.root_dir = root_dir\n self.transform = transform\n\n def __len__(self):\n return len(self.key_pts_frame)\n\n def __getitem__(self, idx):\n image_name = os.path.join(self.root_dir,\n self.key_pts_frame.iloc[idx, 0])\n \n image = mpimg.imread(image_name)\n \n # if image has an alpha color channel, get rid of it\n if(image.shape[2] == 4):\n image = image[:,:,0:3]\n \n key_pts = self.key_pts_frame.iloc[idx, 1:].as_matrix()\n key_pts = key_pts.astype('float').reshape(-1, 2)\n sample = {'image': image, 'keypoints': key_pts}\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample", "_____no_output_____" ] ], [ [ "Now that we've defined this class, let's instantiate the dataset and display some images.", "_____no_output_____" ] ], [ [ "# Construct the dataset\nface_dataset = FacialKeypointsDataset(csv_file='data/training_frames_keypoints.csv',\n root_dir='data/training/')\n\n# print some stats about the dataset\nprint('Length of dataset: ', len(face_dataset))", "Length of dataset: 3462\n" ], [ "# Display a few of the images from the dataset\nnum_to_display = 3\n\nfor i in range(num_to_display):\n \n # define the size of images\n fig = plt.figure(figsize=(20,10))\n \n # randomly select a sample\n rand_i = np.random.randint(0, len(face_dataset))\n sample = face_dataset[rand_i]\n\n # print the shape of the image and keypoints\n print(i, sample['image'].shape, sample['keypoints'].shape)\n\n ax = plt.subplot(1, num_to_display, i + 1)\n ax.set_title('Sample #{}'.format(i))\n \n # Using the same display function, defined earlier\n show_keypoints(sample['image'], sample['keypoints'])\n", "0 (163, 128, 3) (68, 2)\n1 (210, 200, 3) (68, 2)\n2 (299, 275, 3) (68, 2)\n" ] ], [ [ "## Transforms\n\nNow, the images above are not of the same size, and neural networks often expect images that are standardized; a fixed size, with a normalized range for color ranges and coordinates, and (for PyTorch) converted from numpy lists and arrays to Tensors.\n\nTherefore, we will need to write some pre-processing code.\nLet's create four transforms:\n\n- ``Normalize``: to convert a color image to grayscale values with a range of [0,1] and normalize the keypoints to be in a range of about [-1, 1]\n- ``Rescale``: to rescale an image to a desired size.\n- ``RandomCrop``: to crop an image randomly.\n- ``ToTensor``: to convert numpy images to torch images.\n\n\nWe will write them as callable classes instead of simple functions so\nthat parameters of the transform need not be passed everytime it's\ncalled. For this, we just need to implement ``__call__`` method and \n(if we require parameters to be passed in), the ``__init__`` method. \nWe can then use a transform like this:\n\n tx = Transform(params)\n transformed_sample = tx(sample)\n\nObserve below how these transforms are generally applied to both the image and its keypoints.\n\n", "_____no_output_____" ] ], [ [ "import torch\nfrom torchvision import transforms, utils\n# tranforms\n\nclass Normalize(object):\n \"\"\"Convert a color image to grayscale and normalize the color range to [0,1].\"\"\" \n\n def __call__(self, sample):\n image, key_pts = sample['image'], sample['keypoints']\n \n image_copy = np.copy(image)\n key_pts_copy = np.copy(key_pts)\n\n # convert image to grayscale\n image_copy = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n \n # scale color range from [0, 255] to [0, 1]\n image_copy= image_copy/255.0\n \n # scale keypoints to be centered around 0 with a range of [-1, 1]\n # mean = 100, sqrt = 50, so, pts should be (pts - 100)/50\n key_pts_copy = (key_pts_copy - 100)/50.0\n\n\n return {'image': image_copy, 'keypoints': key_pts_copy}\n\n\nclass Rescale(object):\n \"\"\"Rescale the image in a sample to a given size.\n\n Args:\n output_size (tuple or int): Desired output size. If tuple, output is\n matched to output_size. If int, smaller of image edges is matched\n to output_size keeping aspect ratio the same.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n self.output_size = output_size\n\n def __call__(self, sample):\n image, key_pts = sample['image'], sample['keypoints']\n\n h, w = image.shape[:2]\n if isinstance(self.output_size, int):\n if h > w:\n new_h, new_w = self.output_size * h / w, self.output_size\n else:\n new_h, new_w = self.output_size, self.output_size * w / h\n else:\n new_h, new_w = self.output_size\n\n new_h, new_w = int(new_h), int(new_w)\n\n img = cv2.resize(image, (new_w, new_h))\n \n # scale the pts, too\n key_pts = key_pts * [new_w / w, new_h / h]\n\n return {'image': img, 'keypoints': key_pts}\n\n\nclass RandomCrop(object):\n \"\"\"Crop randomly the image in a sample.\n\n Args:\n output_size (tuple or int): Desired output size. If int, square crop\n is made.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n if isinstance(output_size, int):\n self.output_size = (output_size, output_size)\n else:\n assert len(output_size) == 2\n self.output_size = output_size\n\n def __call__(self, sample):\n image, key_pts = sample['image'], sample['keypoints']\n\n h, w = image.shape[:2]\n new_h, new_w = self.output_size\n\n top = np.random.randint(0, h - new_h)\n left = np.random.randint(0, w - new_w)\n\n image = image[top: top + new_h,\n left: left + new_w]\n\n key_pts = key_pts - [left, top]\n\n return {'image': image, 'keypoints': key_pts}\n\n\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample):\n image, key_pts = sample['image'], sample['keypoints']\n \n # if image has no grayscale color channel, add one\n if(len(image.shape) == 2):\n # add that third color dim\n image = image.reshape(image.shape[0], image.shape[1], 1)\n \n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n image = image.transpose((2, 0, 1))\n \n return {'image': torch.from_numpy(image),\n 'keypoints': torch.from_numpy(key_pts)}", "_____no_output_____" ] ], [ [ "## Test out the transforms\n\nLet's test these transforms out to make sure they behave as expected. As you look at each transform, note that, in this case, **order does matter**. For example, you cannot crop a image using a value smaller than the original image (and the orginal images vary in size!), but, if you first rescale the original image, you can then crop it to any size smaller than the rescaled size.", "_____no_output_____" ] ], [ [ "## test out some of these transforms\nrescale = Rescale((250,250))\ncrop = RandomCrop(150)\ncomposed = transforms.Compose([Rescale((250,250)),\n RandomCrop(200),\n Rescale((250,250))\n ])#,Normalize(),ToTensor()])\n\n# apply the transforms to a sample image\ntest_num = 199\nsample = face_dataset[test_num]\n\nfig = plt.figure()\nfor i, tx in enumerate([rescale, composed, crop]):\n transformed_sample = tx(sample)\n\n ax = plt.subplot(1, 3, i + 1)\n plt.tight_layout()\n ax.set_title(type(tx).__name__)\n show_keypoints(transformed_sample['image'], transformed_sample['keypoints'])\n\nplt.show()", "_____no_output_____" ] ], [ [ "## Create the transformed dataset\n\nApply the transforms in order to get grayscale images of the same shape. Verify that your transform works by printing out the shape of the resulting data (printing out a few examples should show you a consistent tensor size).", "_____no_output_____" ] ], [ [ "# define the data tranform\n# order matters! i.e. rescaling should come before a smaller crop\ndata_transform = transforms.Compose([Rescale((250,250)), Normalize(), ToTensor()])\n\n# create the transformed dataset\ntransformed_dataset = FacialKeypointsDataset(csv_file='data/training_frames_keypoints.csv',\n root_dir='data/training/',\n transform=data_transform)\n", "_____no_output_____" ], [ "# print some stats about the transformed data\nprint('Number of images: ', len(transformed_dataset))\n\n# make sure the sample tensors are the expected size\nfor i in range(5):\n sample = transformed_dataset[i]\n print(i, sample['image'].size(), sample['keypoints'].size())\n", "Number of images: 3462\n0 torch.Size([1, 250, 250]) torch.Size([68, 2])\n1 torch.Size([1, 250, 250]) torch.Size([68, 2])\n2 torch.Size([1, 250, 250]) torch.Size([68, 2])\n3 torch.Size([1, 250, 250]) torch.Size([68, 2])\n4 torch.Size([1, 250, 250]) torch.Size([68, 2])\n" ] ], [ [ "## Data Iteration and Batching\n\nRight now, we are iterating over this data using a ``for`` loop, but we are missing out on a lot of PyTorch's dataset capabilities, specifically the abilities to:\n\n- Batch the data\n- Shuffle the data\n- Load the data in parallel using ``multiprocessing`` workers.\n\n``torch.utils.data.DataLoader`` is an iterator which provides all these\nfeatures, and we'll see this in use in the *next* notebook, Notebook 2, when we load data in batches to train a neural network!\n\n---\n\n", "_____no_output_____" ], [ "## Ready to Train!\n\nNow that you've seen how to load and transform our data, you're ready to build a neural network to train on this data.\n\nIn the next notebook, you'll be tasked with creating a CNN for facial keypoint detection.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
e719edfe59e6f25da340ef90d1d1c3352c53028e
4,731
ipynb
Jupyter Notebook
bus/bus.ipynb
MarcM58/bcn-feb-2019-prework
f36e6324ac1e12bfb008699071b4f5a3c890e5d1
[ "Unlicense" ]
null
null
null
bus/bus.ipynb
MarcM58/bcn-feb-2019-prework
f36e6324ac1e12bfb008699071b4f5a3c890e5d1
[ "Unlicense" ]
null
null
null
bus/bus.ipynb
MarcM58/bcn-feb-2019-prework
f36e6324ac1e12bfb008699071b4f5a3c890e5d1
[ "Unlicense" ]
null
null
null
20.480519
228
0.500106
[ [ [ "<img src=\"https://bit.ly/2VnXWr2\" width=\"100\" align=\"left\">", "_____no_output_____" ], [ "# Bus\n\nThis bus has a passenger entry and exit control system to monitor the number of occupants it carries and thus detect when there are too many.\n\nAt each stop, the entry and exit of passengers is represented by a tuple consisting of two integer numbers.\n```\nbus_stop = (in, out)\n```\nThe succession of stops is represented by a list of these tuples.\n```\nstops = [(in1, out1), (in2, out2), (in3, out3), (in4, out4)]\n```\n\n## Tools\nYou don't necessarily need to use all the tools. Maybe you opt to use some of them or completely different ones, they are given to help you shape the exercise. Programming exercises can be solved in many different ways.\n* Data structures: **lists, tuples**\n* Loop: **while/for loops**\n* Functions: **min, max, len**\n\n## Tasks", "_____no_output_____" ] ], [ [ "stops = [(10, 0), (4, 1), (3, 5), (3, 4), (5, 1), (1, 5), (5, 8), (4, 6), (2, 3)]", "_____no_output_____" ] ], [ [ "#### 1. Calculate the number of stops.", "_____no_output_____" ] ], [ [ "print (len(stops))", "9\n" ] ], [ [ "#### 2. Assign to a variable a list whose elements are the number of passengers at each stop (in-out).\nEach item depends on the previous item in the list + in - out.", "_____no_output_____" ] ], [ [ "stop_count = 0\npassengers = 0\npassengers_per_stops = []\n\nfor i in stops:\n stop_count += 1\n passengers += i[0]\n passengers -= i[1]\n passengers_per_stops.append(passengers)\n \nprint(passengers_per_stops)", "[10, 13, 11, 10, 14, 10, 7, 5, 4]\n" ] ], [ [ "#### 3. Find the maximum occupation of the bus.", "_____no_output_____" ] ], [ [ "print (max(passengers_per_stops))", "14\n" ] ], [ [ "#### 4. Calculate the average occupation. And the standard deviation.", "_____no_output_____" ] ], [ [ "print(int(sum(passengers_per_stops)/len(passengers_per_stops)))", "9\n" ], [ "import numpy\nnumpy.std(passengers)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
e719f12c288ed9b56b10be1d1c7e0759eb8eb9d6
8,148
ipynb
Jupyter Notebook
Exercise Week 8 - Ahmad Ichsan Baihaqi.ipynb
ahmadichsan/python-exercise-d18
590b23e1d7923beaebedf8f0ee0480fde0e8a3ac
[ "MIT" ]
null
null
null
Exercise Week 8 - Ahmad Ichsan Baihaqi.ipynb
ahmadichsan/python-exercise-d18
590b23e1d7923beaebedf8f0ee0480fde0e8a3ac
[ "MIT" ]
null
null
null
Exercise Week 8 - Ahmad Ichsan Baihaqi.ipynb
ahmadichsan/python-exercise-d18
590b23e1d7923beaebedf8f0ee0480fde0e8a3ac
[ "MIT" ]
null
null
null
24.993865
276
0.500123
[ [ [ "**Exercise Week 8**\n\n**Author: Ahmad Ichsan Baihaqi**\n\n**Email: [email protected]**", "_____no_output_____" ], [ "#### For both questions, you can import any library.", "_____no_output_____" ], [ "### Question 1: Only One Extra Curricular Activity", "_____no_output_____" ], [ "Given a list of students who joined Science Club, Sports Club, and History Club. Your output should be telling us the names of students who only joins 1 club. Your function name will be `one_activity_finder`.\n\n#### Example 1:\n\n- Science = ['Peter', 'Tony', 'Rhodey', 'Bruce']\n- Sports = ['Steve, 'Peter', 'Thor', 'Natasha', 'Clint', 'Bruce']\n- History = ['Steve', 'Thor', 'Natasha', 'Nick]\n\nAs we can see, a few students joined more than 1 club. The only students who only joined 1 club activities are Tony, Rhodey, Clint, and Nick.\n\n`one_activity_finder(Science, Sports, History)` will give the output:\n\n'Tony', 'Rhodey', 'Clint', 'Nick'\n\n\n#### Example 2:\n\n- Science = ['Bruce', 'Victor', 'Barry', 'Lex', 'Hal']\n- Sports = ['Bruce', 'Victor', 'Barry', 'Clark', 'Diana']\n- History = ['Diana', 'Arthur', 'Bruce']\n\nThe students who only join 1 club are: 'Lex', 'Hal', 'Clark', 'Arthur'", "_____no_output_____" ] ], [ [ "def one_activity_finder(Science, Sports, History):\n my_list = [*Science, *Sports, *History]\n \n set_list = list(set(my_list))\n\n only_one = []\n\n for x in set_list:\n find = len([item for item in my_list if x == item])\n \n if (find > 1):\n continue\n \n only_one.append(x)\n \n return only_one", "_____no_output_____" ], [ "science_data = ['Bruce', 'Victor', 'Barry', 'Lex', 'Hal']\nsports_data = ['Bruce', 'Victor', 'Barry', 'Clark', 'Diana']\nhistory_data = ['Diana', 'Arthur', 'Bruce']\n\none_activity_finder(science_data, sports_data, history_data)", "_____no_output_____" ], [ "science_data = ['Peter', 'Tony', 'Rhodey', 'Bruce']\nsports_data = ['Steve', 'Peter', 'Thor', 'Natasha', 'Clint', 'Bruce']\nhistory_data = ['Steve', 'Thor', 'Natasha', 'Nick']\n \none_activity_finder(science_data, sports_data, history_data)", "_____no_output_____" ] ], [ [ "### Question 2: Three Most Common Letter", "_____no_output_____" ], [ "Given one string. Print out the 3 most common letter in this order:\n- Print the most common letter first\n- Then, move to the second most common letter\n- If there is a tie, refer to the alphabetical order and choose the first one to appear first\n\nYour function name is `top_letter`\n\n#### Example 1: \n\nInput = 'ddeeeeffgh'\n\nOutput = 'edf' \n\nThis is because 'e' appears the most, then the second place is a 'tie' between 'd' and 'f'. We should look to the alphabetical order, and since 'd' appears before 'f', we print 'd' first then 'f'.\n\n\n#### Example 2:\n\nInput = 'ddeeeeffgghh'\n\nOutput = 'edf'\n\nThis is because 'e' appears the most, then the second place is a 'tie' between 'd', 'f', 'g', and 'h'. We should look to the alphabetical order, and since 'd' appears before 'f', we print 'd' then 'f'. \n\nWait, why don't we print 'g' and 'h' too? Because we only want TOP 3 LETTERS. This is the challenge. \n\n#### Example 3:\n\nInput = 'aabbbbcdefg'\n\nOutput = 'bac'\n\nThis is because 'b' appears the most, then the second most common letter is 'a', and the third place is a tie between 'c', 'd', 'e', 'f', 'g'. We only choose 'c', because its the earliest letter in the alphabet, and since we only want 3 letters, we only can choose 'c'. ", "_____no_output_____" ] ], [ [ "def top_letter(input_string):\n my_list = list(input_string)\n \n set_list = list(sorted(set(my_list)))\n\n counter = []\n\n for x in set_list:\n find = len([item for item in my_list if x == item])\n \n counter.append(find)\n \n sorted_counter = sorted(counter, reverse=True)\n \n top_three_value = sorted_counter[:3]\n\n top_three_words = []\n \n for x in top_three_value:\n index = counter.index(x)\n \n top_three_words.append(set_list[index])\n \n counter.pop(index)\n set_list.pop(index)\n \n return ''.join(map(str, top_three_words))", "_____no_output_____" ], [ "top_letter('ddeeeeffgh')", "_____no_output_____" ], [ "top_letter('ddeeeeffgghh')", "_____no_output_____" ], [ "top_letter('aabbbbcdefg')", "_____no_output_____" ], [ "top_letter('aaadddccbb')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ] ]
e71a1ddd7b86a39d45e4a0625dde4a7036823a93
5,458
ipynb
Jupyter Notebook
LAB2/.ipynb_checkpoints/Problem1-checkpoint.ipynb
sobil-dalal/Model-Simulation-Optimization
c6e9d17ad0f660bfcbe4b9d86b8b65c8d3ba09d8
[ "MIT" ]
null
null
null
LAB2/.ipynb_checkpoints/Problem1-checkpoint.ipynb
sobil-dalal/Model-Simulation-Optimization
c6e9d17ad0f660bfcbe4b9d86b8b65c8d3ba09d8
[ "MIT" ]
null
null
null
LAB2/.ipynb_checkpoints/Problem1-checkpoint.ipynb
sobil-dalal/Model-Simulation-Optimization
c6e9d17ad0f660bfcbe4b9d86b8b65c8d3ba09d8
[ "MIT" ]
null
null
null
26.8867
1,079
0.540308
[ [ [ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Problem\" data-toc-modified-id=\"Problem-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Problem</a></span></li><li><span><a href=\"#Define-the-variables\" data-toc-modified-id=\"Define-the-variables-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>Define the variables</a></span></li><li><span><a href=\"#Define-the-problem\" data-toc-modified-id=\"Define-the-problem-3\"><span class=\"toc-item-num\">3&nbsp;&nbsp;</span>Define the problem</a></span></li><li><span><a href=\"#Define-objective-function\" data-toc-modified-id=\"Define-objective-function-4\"><span class=\"toc-item-num\">4&nbsp;&nbsp;</span>Define objective function</a></span></li><li><span><a href=\"#Add-the-constraints\" data-toc-modified-id=\"Add-the-constraints-5\"><span class=\"toc-item-num\">5&nbsp;&nbsp;</span>Add the constraints</a></span></li><li><span><a href=\"#Solve-the-problem\" data-toc-modified-id=\"Solve-the-problem-6\"><span class=\"toc-item-num\">6&nbsp;&nbsp;</span>Solve the problem</a></span></li></ul></div>", "_____no_output_____" ], [ "# Problem\nA couple are planning a hiking trip together. The husband carries a knapsack able to hold 20 kg of equipment, while the wife carries another knapsack able to hold 17 kg. There are several items that they would like to take on the trip, each with a weight and an estimated “benefit” (given by a number):\n<img src=\"./Problem1.jpg\" style=\"width: 400px;\">\nSolve the problem using Pulp!", "_____no_output_____" ], [ "# Define the variables", "_____no_output_____" ] ], [ [ "import pulp", "_____no_output_____" ], [ "ks = ['1','2']\ngoods = ['stove', 'lamp', 'axe', 'binoculars', 'rope']\nx = pulp.LpVariable.dicts(\"x\", (ks, goods), lowBound=0, upBound=1, cat=pulp.LpInteger)", "_____no_output_____" ], [ "x[\"1\"]", "_____no_output_____" ] ], [ [ "# Define the problem", "_____no_output_____" ] ], [ [ "prop = pulp.LpProblem(\"Problem1\", sense=pulp.LpMaximize)\n", "_____no_output_____" ] ], [ [ "# Define objective function", "_____no_output_____" ] ], [ [ "prop+ = pulp.lpSum([10*x[(k,'stove')] + 9*x[(k,'lamp')] + 6*x[(k,'axe')] + 3*x[(k, 'binoculars')] + 14*x[(k, 'rope')] \n for k in ks])", "_____no_output_____" ] ], [ [ "# Add the constraints", "_____no_output_____" ], [ "# Solve the problem", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e71a2202b33e6c7d5c369888222cfec8ca9d6f39
67,247
ipynb
Jupyter Notebook
python/coursera_gluonCV_class/notebook/module_4_lab.ipynb
rtp-aws/devpost_aws_disaster_recovery
2ccfff2d8b85614f3043f09d98c9981dedf43c05
[ "MIT" ]
1
2022-01-13T23:36:05.000Z
2022-01-13T23:36:05.000Z
python/coursera_gluonCV_class/notebook/module_4_lab.ipynb
rtp-aws/devpost_aws_disaster_recovery
2ccfff2d8b85614f3043f09d98c9981dedf43c05
[ "MIT" ]
9
2022-01-13T19:34:34.000Z
2022-01-14T19:41:18.000Z
python/coursera_gluonCV_class/notebook/module_4_lab.ipynb
rtp-aws/devpost_aws_disaster_recovery
2ccfff2d8b85614f3043f09d98c9981dedf43c05
[ "MIT" ]
null
null
null
41.924564
8,020
0.679004
[ [ [ "# Module 4 Assignment\n\nIn this assignment, you'll use some of the key concepts from the module to create a neural network for image classification of items of clothing. Step one will be to normalize the input images, and you'll use NDArray operations to calculate the channel mean. You'll create a function to evaluate the performance of networks on the data, and construct a couple of different neural networks for image classification.", "_____no_output_____" ], [ "## 0) Setup\n\nWe start with a number of required imports and set the data paths.", "_____no_output_____" ] ], [ [ "from matplotlib import pyplot as plt\nimport mxnet as mx\nfrom mxnet.gluon.data.vision import transforms\nfrom mxnet.gluon.data.vision import FashionMNIST\nimport numpy as np\nimport os\nfrom pathlib import Path\nfrom tqdm import tqdm", "_____no_output_____" ], [ "print(os.getenv('DATA_DIR'))", "None\n" ], [ "M4_DATA = Path(os.getenv('DATA_DIR', '../../data'), 'module_4')\nM4_IMAGES = Path(M4_DATA, 'images')\nM4_MODELS = Path(M4_DATA, 'models')", "_____no_output_____" ] ], [ [ "## 1) Data (& NDArray Operations)\n\nWe'll use the in-built dataset called `FashionMNIST` which is a variant of the commonly used `MNIST` dataset. It consists of 60,000 training images and 10,000 test images, and each image is a 28px by 28px greyscale image. We'll start by creating the `dataset` and visualize an example image.", "_____no_output_____" ] ], [ [ "test_dataset = FashionMNIST(train=False, root=M4_IMAGES).transform_first(transforms.ToTensor())", "Downloading ../../data/module_4/images/t10k-images-idx3-ubyte.gz from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/fashion-mnist/t10k-images-idx3-ubyte.gz...\nDownloading ../../data/module_4/images/t10k-labels-idx1-ubyte.gz from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/fashion-mnist/t10k-labels-idx1-ubyte.gz...\n" ], [ "+", "_____no_output_____" ] ], [ [ "One important step before passing images to the network is normalization: i.e. shifting and scaling the pixel values so that they are zero-centered on average and have unit variance.\n\nOne method of normalization is pixelwise, where each **pixel** should have a unit normal distribution of values. Another is channelwise, where each **channel** should have a unit normal distribution of values. \n\nOne of the first steps in the pixelwise approach is to calculate an 'average image' from the dataset. Using a sample of 1024 images, you should now implement a function to calculate the average intensity for every pixel. You'd typically want to calculate this from all samples of the dataset, but 1024 samples will be sufficient for now.", "_____no_output_____" ] ], [ [ "test_dataloader = mx.gluon.data.DataLoader(test_dataset, shuffle=False, batch_size=1024)\nfor data, label in test_dataloader:\n break\nprint(type(data))\nprint(data.shape)", "<class 'mxnet.ndarray.ndarray.NDArray'>\n(1024, 1, 28, 28)\n" ], [ "def get_average_image_from_batch(batch):\n \"\"\"\n Given a batch of images, this function should calculate the 'average image'.\n \n :param batch: batch of images in NCHW layout.\n :type batch: mx.nd.NDArray\n \n :return: average image in CHW layout.\n :rtype: mx.nd.NDArray\n \"\"\"\n # YOUR CODE HERE\n #raise NotImplementedError()\n #print(type(batch))\n # remove \n num_images = batch.shape[0]\n sum = batch.sum(axis=0)\n #print(sum.shape)\n #num = batch.reshape((1,28,28)).shape\n #print(sum)\n avg = sum / num_images\n return avg\n ", "_____no_output_____" ], [ "#get_average_image_from_batch(data)", "_____no_output_____" ], [ "average_image = get_average_image_from_batch(data)\nassert average_image.shape == (1, 28, 28)\nplt.imshow(average_image[0].asnumpy()) # 0 for first and only channel (since greyscale).", "_____no_output_____" ] ], [ [ "Using the average image that was calculated above, you should now implement a function to perform the pixelwise normalization.", "_____no_output_____" ] ], [ [ "def subtract_average_image(sample, average_image):\n \"\"\"\n Given a sample images, this function should return a pixelwise normalized image,\n using a pre-calculated average image.\n \n :param sample: sample image in CHW layout.\n :type sample: mx.nd.NDArray\n :param average_image: average image of the dataset in CHW layout.\n :type average_image: mx.nd.NDArray\n \n :return: pixelwise normalized image in CHW layout.\n :rtype: mx.nd.NDArray\n \"\"\"\n # YOUR CODE HERE\n #raise NotImplementedError()\n return sample - average_image", "_____no_output_____" ], [ "normalized_sample_data = subtract_average_image(sample_data, average_image)\nassert normalized_sample_data.shape == (1, 28, 28)\nnp.testing.assert_array_almost_equal(normalized_sample_data.asnumpy(), (sample_data - average_image).asnumpy())\nplt.imshow(normalized_sample_data[0].asnumpy()) # 0 for first and only channel (since greyscale).", "_____no_output_____" ] ], [ [ "```\nOur sample data is of this form\n(1024, 1, 28, 28)\nCreate batch which looks ike this:\n(4, 3, 2, 2)\nThat is a batch of 4, or 4 images\nEach image has 3 channels and is a 2x2 array.\n```", "_____no_output_____" ] ], [ [ "\ntesty = mx.nd.array(\n [\n [\n [[1, 2], [3, 4]],\n [[5, 6], [7, 8]],\n [[9, 10], [11, 12]]\n ],\n [\n [[13, 14], [15, 16]],\n [[17, 18], [19, 20]],\n [[21, 22], [23, 24]]\n ],\n [\n [[25, 26], [27, 28]],\n [[29, 30], [31, 32]],\n [[33, 34], [35, 36]]\n ],\n [\n [[37, 38], [39, 40]],\n [[41, 42], [43, 44]],\n [[45, 46], [47, 48]]\n ]\n ])", "_____no_output_____" ], [ "testy.shape", "_____no_output_____" ], [ "#testy.sum(axis=3)", "_____no_output_____" ], [ "testy.mean(axis=[0,2,3])", "_____no_output_____" ], [ "testy.sum(axis=[0,2,3])/4/2/2", "_____no_output_____" ] ], [ [ "You've now created a transform for pixelwise normalization! As mentioned previously, another common method for normalization is channelwise normalization. Complete the following function to calculate the channel averages from a batch of multi-channel inputs.\n\nNote: although the image from our dataset only have one channel, your function should support cases where there are more than one channel (e.g. RGB images).\n\n**Hint**: Check out the `axis` (or `dim`) arguments on MXNet NDArray functions.", "_____no_output_____" ] ], [ [ "def get_channel_average_from_batch(batch):\n \"\"\"\n Given a batch of images, this function should return the\n average value for each channel across the images of the batch.\n \n :param batch: batch of images in NCHW layout.\n :type batch: mx.nd.NDArray\n \n :return: channel averages in C layout.\n :rtype: mx.nd.NDArray\n \"\"\"\n # YOUR CODE HERE\n #raise NotImplementedError()\n # Could also do\n num_batch = batch.shape[0]\n num_x = batch.shape[2]\n num_y = batch.shape[3]\n #print(\"num_batch = {}\" . format(num_batch))\n #print(\"num_x = {}\" . format(num_x))\n #print(\"num_y = {}\" . format(num_y))\n return batch.sum(axis=[2,3,0])/num_batch/num_x/num_y\n # or\n #\n # one line\n #return batch.mean(axis=(0,2,3))", "_____no_output_____" ], [ "get_channel_average_from_batch(data).shape", "_____no_output_____" ], [ "channel_average = get_channel_average_from_batch(data).asscalar()\nassert isinstance(channel_average, np.float32)\nnp.testing.assert_almost_equal(channel_average, 0.28757906, decimal=5)\n", "_____no_output_____" ], [ "channel_average = get_channel_average_from_batch(data).asscalar()\nprint(channel_average)\nassert isinstance(channel_average, np.float32)\nnp.testing.assert_almost_equal(channel_average, 0.28757906, decimal=5)\n\ntest_averages = mx.nd.array([1,2,3,4])\ntest_input = mx.nd.reshape(test_averages, shape=(1,4,1,1)) * mx.nd.ones(shape=(10,4,25,25))\ntest_channel_average = get_channel_average_from_batch(test_input)\nnp.testing.assert_array_almost_equal(test_averages.asnumpy(), test_channel_average.asnumpy())", "0.28757906\n" ], [ "print(\"type(channel_average) is {}\" . format(type(channel_average)))\nprint(\"channel_average.shape is {}\" . format(channel_average.shape))\nprint(\"channel_average is {}\" . format(channel_average))", "type(channel_average) is <class 'numpy.float32'>\nchannel_average.shape is ()\nchannel_average is 0.2875790596008301\n" ] ], [ [ "Using this channel average, we can use the `Normalize` transform to apply this to all samples in our dataset as they are loaded.", "_____no_output_____" ] ], [ [ "channel_std = 0.31\ntransform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(channel_average, channel_std)\n])", "_____no_output_____" ], [ "print(transform)\n#transform.summary(data)\n#transform.summary(1.1, 2.2)", "Compose(\n (0): HybridSequential(\n (0): ToTensor(\n \n )\n (1): Normalize(\n \n )\n )\n)\n" ], [ "train_dataset = FashionMNIST(train=True, root=M4_IMAGES).transform_first(transform)\ntest_dataset = FashionMNIST(train=False, root=M4_IMAGES).transform_first(transform)\ntrain_dataloader = mx.gluon.data.DataLoader(train_dataset, shuffle=True, batch_size=128)\ntest_dataloader = mx.gluon.data.DataLoader(train_dataset, shuffle=False, batch_size=128)", "Downloading ../../data/module_4/images/train-images-idx3-ubyte.gz from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/fashion-mnist/train-images-idx3-ubyte.gz...\nDownloading ../../data/module_4/images/train-labels-idx1-ubyte.gz from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/fashion-mnist/train-labels-idx1-ubyte.gz...\n" ] ], [ [ "## 2) Metrics\n\nIn this section, you'll implement a function to test the prediction quality of networks. Using `Accuracy` as the evaluation metric, complete the following function that takes a network and a dataloader (with test data) and returns an MXNet Metric that has been updated with labels and predictions. We'll use this function in the next section, when we train classification networks.\n\n**Hint**: You'll find classes in the `mxnet.metric` subpackage useful for this task.", "_____no_output_____" ] ], [ [ "def calculate_accuracy(network, dataloader):\n \"\"\"\n Calculates accuracy of the network on the data given by the dataloader.\n \n :param network: network to be tested\n :type network: mx.gluon.Block\n :param dataloader: dataloader for test data\n :type dataloader: mx.gluon.data.DataLoader\n \n :return: updated metric\n :rtype: mx.metric.EvalMetric\n \"\"\"\n # YOUR CODE HERE\n #raise NotImplementedError()\n accuracy = mx.metric.Accuracy()\n for data, labels in tqdm(dataloader):\n preds = network(data)\n # YOUR CODE HERE\n #raise NotImplementedError()\n # print(\"labels is {} preds are {}\" . format(labels, preds))\n accuracy.update(labels,preds)\n \n return accuracy", "_____no_output_____" ], [ "test_network = mx.gluon.nn.Dense(units=10)\ntest_network.initialize()\nmetric = calculate_accuracy(test_network, test_dataloader)\nprint(metric.get())\nisinstance(metric, mx.metric.EvalMetric)\nassert metric.name == 'accuracy'\nassert metric.num_inst == 60000", "100%|██████████| 469/469 [00:07<00:00, 62.75it/s]" ], [ "isinstance(metric, mx.metric.EvalMetric)\nmetric.name\nmetric.num_inst", "_____no_output_____" ] ], [ [ "## 3) Network", "_____no_output_____" ], [ "In the section, you'll implement a couple of different image classification networks and train then on the `FashionMNIST` dataset. A `train` function is already provided in this assignment, because the focus will be on network construction.", "_____no_output_____" ] ], [ [ "def train(network, dataloader):\n softmax_cross_entropy = mx.gluon.loss.SoftmaxCrossEntropyLoss()\n trainer = mx.gluon.Trainer(network.collect_params(), 'sgd', {'learning_rate': 0.1})\n for data, label in tqdm(dataloader):\n with mx.autograd.record():\n output = network(data)\n loss = softmax_cross_entropy(output, label)\n loss.backward()\n trainer.step(data.shape[0])", "_____no_output_____" ] ], [ [ "Your first model should be a sequential network, with 3 layers. You first layer should have 16 hidden units, the second should have 8 hidden units and the last layer should the correct number of output units for the classification task at hand. You should add ReLU activations on all hidden layers, but not the output layer. You should define `network` in the cell below.\n\n**Hint**: You'll find classes in the `mxnet.gluon.nn` subpackage useful for this task.", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE\n#raise NotImplementedError()\n\nnetwork = mx.gluon.nn.Sequential()\nnetwork.add(\n mx.gluon.nn.Dense(16, activation = 'relu'),\n mx.gluon.nn.Dense(8, activation = 'relu'),\n mx.gluon.nn.Dense(10)\n)", "_____no_output_____" ], [ "assert isinstance(network, mx.gluon.nn.Sequential)\nassert len(network) == 3\nassert isinstance(network[0], mx.gluon.nn.Dense)\nassert network[0].act.name.endswith('relu')\nassert network[0].weight.shape[0] == 16\nassert isinstance(network[1], mx.gluon.nn.Dense)\nassert network[1].act.name.endswith('relu')\nassert network[1].weight.shape[0] == 8\nassert isinstance(network[2], mx.gluon.nn.Dense)\nassert network[2].act is None\nassert network[2].weight.shape[0] == 10", "_____no_output_____" ] ], [ [ "With your network now defined, you should initialize its parameters using the Xavier method in the cell below.\n\n**Hint**: You'll find classes in the `mxnet.init` subpackage useful for this task.", "_____no_output_____" ] ], [ [ "# Use this guide\n# https://mxnet.apache.org/api/python/docs/tutorials/packages/gluon/blocks/init.html\n\n# YOUR CODE HERE\n#raise NotImplementedError()\ninitializer = mx.init.Xavier()\nnetwork.initialize(initializer)\n", "_____no_output_____" ], [ "assert isinstance(initializer, mx.initializer.Xavier)", "_____no_output_____" ] ], [ [ "We'll now check the network summary and see that the network has 12786 trainable parameters.", "_____no_output_____" ] ], [ [ "network.summary(data)", "--------------------------------------------------------------------------------\n Layer (type) Output Shape Param #\n================================================================================\n Input (1024, 1, 28, 28) 0\n Activation-1 <Symbol dense1_relu_fwd> 0\n Activation-2 (1024, 16) 0\n Dense-3 (1024, 16) 12560\n Activation-4 <Symbol dense2_relu_fwd> 0\n Activation-5 (1024, 8) 0\n Dense-6 (1024, 8) 136\n Dense-7 (1024, 10) 90\n================================================================================\nParameters in forward computation graph, duplicate included\n Total params: 12786\n Trainable params: 12786\n Non-trainable params: 0\nShared params in forward computation graph: 0\nUnique parameters in model: 12786\n--------------------------------------------------------------------------------\n" ] ], [ [ "And use the `calculate_accuracy` function defined in the previous section to evaluate the performance of this architecture.", "_____no_output_____" ] ], [ [ "train(network, train_dataloader)\nmetric = calculate_accuracy(network, test_dataloader)\nprint(metric.get())", "100%|██████████| 469/469 [00:08<00:00, 52.27it/s]\n100%|██████████| 469/469 [00:07<00:00, 61.14it/s]" ] ], [ [ "You're final objective in this assignment is to try a different architecture that uses convolutional and max pooling layers. You should define another sequential network, but this time it should have 5 layers in total:\n\n1. Convolutional Layer (32 channels, 3x3 kernel and ReLU activation)\n2. Max Pooling Layer (2x2 kernel and 2x2 stride)\n3. Convolutional Layer (16 channels, 3x3 kernel and ReLU activation)\n4. Max Pooling Layer (2x2 kernel and 2x2 stride)\n5. Dense Layer (10 output units)", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE\n#raise NotImplementedError()\n\nnetwork = mx.gluon.nn.Sequential()\nnetwork.add(\n mx.gluon.nn.Conv2D(channels=32, kernel_size=(3,3), activation='relu'),\n mx.gluon.nn.MaxPool2D(pool_size=(2,2), strides=(2,2)),\n mx.gluon.nn.Conv2D(channels=16, kernel_size=(3,3), activation='relu'),\n mx.gluon.nn.MaxPool2D(pool_size=(2,2), strides=(2,2)),\n mx.gluon.nn.Dense(10)\n) ", "_____no_output_____" ], [ "assert isinstance(network, mx.gluon.nn.Sequential)\nassert len(network) == 5\nassert isinstance(network[0], mx.gluon.nn.Conv2D)\nassert network[0].act.name.endswith('relu')\nassert network[0].weight.shape[0] == 32\nassert isinstance(network[1], mx.gluon.nn.MaxPool2D)\nassert isinstance(network[2], mx.gluon.nn.Conv2D)\nassert network[2].act.name.endswith('relu')\nassert network[2].weight.shape[0] == 16\nassert isinstance(network[3], mx.gluon.nn.MaxPool2D)\nassert isinstance(network[4], mx.gluon.nn.Dense)\nassert network[4].act is None\nassert network[4].weight.shape[0] == 10", "_____no_output_____" ] ], [ [ "Let's initialize the parameters of the network, and show a summary of the network architecture.\n\nWith 8954 trainable parameters, this network's got 30% fewer parameters than the previous network.", "_____no_output_____" ] ], [ [ "network.initialize(init=initializer)\nnetwork.summary(data)", "--------------------------------------------------------------------------------\n Layer (type) Output Shape Param #\n================================================================================\n Input (1024, 1, 28, 28) 0\n Activation-1 <Symbol conv0_relu_fwd> 0\n Activation-2 (1024, 32, 26, 26) 0\n Conv2D-3 (1024, 32, 26, 26) 320\n MaxPool2D-4 (1024, 32, 13, 13) 0\n Activation-5 <Symbol conv1_relu_fwd> 0\n Activation-6 (1024, 16, 11, 11) 0\n Conv2D-7 (1024, 16, 11, 11) 4624\n MaxPool2D-8 (1024, 16, 5, 5) 0\n Dense-9 (1024, 10) 4010\n================================================================================\nParameters in forward computation graph, duplicate included\n Total params: 8954\n Trainable params: 8954\n Non-trainable params: 0\nShared params in forward computation graph: 0\nUnique parameters in model: 8954\n--------------------------------------------------------------------------------\n" ] ], [ [ "And finally, let's evaluate the network performance.", "_____no_output_____" ] ], [ [ "train(network, train_dataloader)\nmetric = calculate_accuracy(network, test_dataloader)\nprint(metric.get())", "100%|██████████| 469/469 [00:19<00:00, 24.18it/s]\n100%|██████████| 469/469 [00:22<00:00, 20.53it/s]" ] ], [ [ "We're only training for a single epoch here. You'd expect to get improved accuracy if training for more epochs. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e71a2967a05ce03212d0895944d5667e5f1e9971
11,695
ipynb
Jupyter Notebook
00b_preprocess_MPP_HSC_overlay_correct_cell_proportions.ipynb
evafast/scrnaseq_paper
231286dc1447516f938bed8191839edb554a4fd3
[ "MIT" ]
null
null
null
00b_preprocess_MPP_HSC_overlay_correct_cell_proportions.ipynb
evafast/scrnaseq_paper
231286dc1447516f938bed8191839edb554a4fd3
[ "MIT" ]
null
null
null
00b_preprocess_MPP_HSC_overlay_correct_cell_proportions.ipynb
evafast/scrnaseq_paper
231286dc1447516f938bed8191839edb554a4fd3
[ "MIT" ]
null
null
null
33.606322
179
0.529457
[ [ [ "# 00b_preprocess_MPP_HSC_overlay_correct_cell_proportions\n\nAssemble merged object of MPPs + HSCs with correct cell proportions (from FACS)\n\nRun with this command in docker container\n\ndocker run \\\n--rm \\\n-d \\\n--name demuxEM \\\n-p 8881:8888 \\\n-e JUPYTER_ENABLE_LAB=YES \\\n-v /Users/efast/Documents/:/home/jovyan/work \\\npegasuspy_scanpy:vs1\n", "_____no_output_____" ] ], [ [ "%reset", "Once deleted, variables cannot be recovered. Proceed (y/[n])? y\n" ], [ "import numpy as np\nimport pandas as pd\nimport scanpy as sc", "_____no_output_____" ], [ "## with LT-HSCs\n\nsample_strings = ['ct', 'dmPGE2', 'GCSF', 'indo', 'pIC']\n\nfile_base = './raw_data/B_'\nfile_end = '/outs/filtered_feature_bc_matrix'\n\nfile_base_LT = './raw_data/A_'\n\nann_base = './write/demux_adata_hto_'\nann_end = '.csv'\n\nwrite_path_base = './sc_objects/demuxannotated_'\nwrite_path_end = '.h5ad'\nwrite_path_end_proportions = 'prop.h5ad'\nwrite_path_end_proportions_LT = '_LT_prop.h5ad'\n\nwrite_path_end_csv = 'counts.csv'\n\nproportions = pd.read_csv('./raw_data/cell_proportions_demux.csv', index_col= 0)\n\nfor i in range(len(sample_strings)):\n sample = sample_strings[i]\n data_file = file_base+sample+file_end # assembles the name of the datafile\n ann_file = ann_base+sample+ann_end\n \n adata = sc.read_10x_mtx(data_file, var_names='gene_symbols', cache=True)\n ann = pd.read_csv( ann_file, index_col= 0)\n\n adata.obs.index = adata.obs.index.map(lambda x: x.rstrip('-1')) # remove the '-1'\n adata.obs = pd.merge(ann, adata.obs, how='right', left_index = True, right_index = True) # merge the ann and adata.obs\n adata = adata[adata.obs['demux_type'] == 'singlet'] # filter and keep only singlets\n\n HSPC_renaming = {'CD48LSK': 'MPP3/4', 'ST': 'MPP1'} # dictionary for renaming\n adata.obs = adata.obs.replace(HSPC_renaming) #replace with new names\n \n category_counts = adata.obs.groupby(['assignment']).count()\n del category_counts.index.name\n \n # write the file to disk\n out_h5ad = write_path_base + sample + write_path_end\n out_csv = write_path_base + sample + write_path_end_csv\n \n adata.write(out_h5ad)\n category_counts.to_csv(out_csv)\n\n count_MPP34 = category_counts.loc['MPP3/4','counts']\n temp_df2 = adata.obs[adata.obs['assignment'] == 'MPP3/4'].sample(n= count_MPP34, random_state=1)\n temp_df2['select_cells'] = 1\n\n temp_df= []\n\n sample_HSPCS = ['MPP2', 'MPP', 'MPP1']\n\n for j in range(len(sample_HSPCS)):\n sample_h = sample_HSPCS[j]\n\n number = round(category_counts.loc['MPP3/4','hto_type'] * (proportions.loc[sample_h, sample]/proportions.loc['MPP3/4', sample]))\n number = number.astype(int)\n\n temp_df = adata.obs[adata.obs['assignment'] == sample_h].sample(n= number, random_state=1)\n temp_df['select_cells'] = 1\n\n temp_df2 = temp_df2.append(temp_df)\n \n # drop columns I don't need\n temp_df2 = temp_df2.drop(columns=['counts', 'hto_type', 'rna_type', 'demux_type', 'assignment'])\n\n # merge with adata.obs\n adata.obs = pd.merge(temp_df2, adata.obs, how='right', left_index = True, right_index = True)\n\n # make subselection of the adata dataframe\n adata = adata[adata.obs['select_cells'] == 1] # filter and keep only singlets\n \n # LT\n data_file_LT = file_base_LT+sample+file_end # assembles the name of the datafile\n \n adata_LT = sc.read_10x_mtx(data_file_LT, var_names='gene_symbols', cache=True)\n\n number_LT = round(category_counts.loc['MPP3/4','hto_type'] * (proportions.loc['HSC', sample]/proportions.loc['MPP3/4', sample]))\n number_LT = number_LT.astype(int)\n\n temp_df3 = adata_LT.obs.sample(n= number_LT, random_state=1)\n temp_df3['select_cells'] = 1\n\n # merge with adata.obs\n adata_LT.obs = pd.merge(temp_df3, adata_LT.obs, how='right', left_index = True, right_index = True)\n\n adata_LT = adata_LT[adata_LT.obs['select_cells'] == 1] # filter and keep only singlets\n\n adata_LT.obs['assignment'] = 'LT'\n \n #path out\n out_h5ad_prop = write_path_base + sample + write_path_end_proportions\n out_h5ad_prop_LT = write_path_base + sample + write_path_end_proportions_LT\n \n adata.write(out_h5ad_prop)\n adata_LT.write(out_h5ad_prop_LT)\n", "... storing 'hto_type' as categorical\n... storing 'rna_type' as categorical\n... storing 'demux_type' as categorical\n... storing 'assignment' as categorical\n... storing 'feature_types' as categorical\nTrying to set attribute `.obs` of view, copying.\n... storing 'assignment' as categorical\n... storing 'feature_types' as categorical\n... storing 'hto_type' as categorical\n... storing 'rna_type' as categorical\n... storing 'demux_type' as categorical\n... storing 'assignment' as categorical\n... storing 'feature_types' as categorical\nTrying to set attribute `.obs` of view, copying.\n... storing 'assignment' as categorical\n... storing 'feature_types' as categorical\n... storing 'hto_type' as categorical\n... storing 'rna_type' as categorical\n... storing 'demux_type' as categorical\n... storing 'assignment' as categorical\n... storing 'feature_types' as categorical\nTrying to set attribute `.obs` of view, copying.\n... storing 'assignment' as categorical\n... storing 'feature_types' as categorical\n... storing 'hto_type' as categorical\n... storing 'rna_type' as categorical\n... storing 'demux_type' as categorical\n... storing 'assignment' as categorical\n... storing 'feature_types' as categorical\nTrying to set attribute `.obs` of view, copying.\n... storing 'assignment' as categorical\n... storing 'feature_types' as categorical\n... storing 'hto_type' as categorical\n... storing 'rna_type' as categorical\n... storing 'demux_type' as categorical\n... storing 'assignment' as categorical\n... storing 'feature_types' as categorical\nTrying to set attribute `.obs` of view, copying.\n... storing 'assignment' as categorical\n... storing 'feature_types' as categorical\n" ], [ "sc.logging.print_versions()\npd.show_versions()", "scanpy==1.4.5.1 anndata==0.7.1 umap==0.3.10 numpy==1.17.3 scipy==1.3.0 pandas==0.25.3 scikit-learn==0.21.3 statsmodels==0.10.0 python-igraph==0.7.1 louvain==0.6.1.post1\n\nINSTALLED VERSIONS\n------------------\ncommit : None\npython : 3.7.3.final.0\npython-bits : 64\nOS : Linux\nOS-release : 4.19.76-linuxkit\nmachine : x86_64\nprocessor : x86_64\nbyteorder : little\nLC_ALL : en_US.UTF-8\nLANG : en_US.UTF-8\nLOCALE : en_US.UTF-8\n\npandas : 0.25.3\nnumpy : 1.17.3\npytz : 2019.3\ndateutil : 2.8.1\npip : 19.3.1\nsetuptools : 41.6.0.post20191101\nCython : 0.29.14\npytest : None\nhypothesis : None\nsphinx : None\nblosc : None\nfeather : None\nxlsxwriter : 1.2.8\nlxml.etree : None\nhtml5lib : None\npymysql : None\npsycopg2 : None\njinja2 : 2.10.3\nIPython : 7.9.0\npandas_datareader: None\nbs4 : 4.8.1\nbottleneck : None\nfastparquet : None\ngcsfs : None\nlxml.etree : None\nmatplotlib : 3.1.2\nnumexpr : 2.6.9\nodfpy : None\nopenpyxl : None\npandas_gbq : None\npyarrow : 0.16.0\npytables : None\ns3fs : None\nscipy : 1.3.0\nsqlalchemy : 1.3.10\ntables : 3.6.1\nxarray : None\nxlrd : 1.2.0\nxlwt : None\nxlsxwriter : 1.2.8\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ] ]
e71a2e9e510515e3e54c8c449da5df500f54d2d7
8,488
ipynb
Jupyter Notebook
Linear models.ipynb
amueller/nyu_ml_lectures
3c5858870bd7177e1850fdd4c721af0115e6a258
[ "BSD-2-Clause" ]
21
2015-09-14T21:01:04.000Z
2020-12-27T19:10:21.000Z
Linear models.ipynb
afcarl/nyu_ml_lectures
3c5858870bd7177e1850fdd4c721af0115e6a258
[ "BSD-2-Clause" ]
null
null
null
Linear models.ipynb
afcarl/nyu_ml_lectures
3c5858870bd7177e1850fdd4c721af0115e6a258
[ "BSD-2-Clause" ]
19
2015-09-17T00:17:13.000Z
2020-02-01T01:28:59.000Z
24.113636
183
0.540528
[ [ [ "%matplotlib nbagg\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "# Linear models for regression", "_____no_output_____" ], [ "\n```\ny_pred = x_test[0] * coef_[0] + ... + x_test[n_features-1] * coef_[n_features-1] + intercept_\n```", "_____no_output_____" ] ], [ [ "from sklearn.datasets import make_regression\nfrom sklearn.cross_validation import train_test_split\n\nX, y, true_coefficient = make_regression(n_samples=80, n_features=30, n_informative=10, noise=100, coef=True, random_state=5)\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=5)\nprint(X_train.shape)\nprint(y_train.shape)", "_____no_output_____" ] ], [ [ "## Linear Regression\n\n$$ \\text{min}_{w, b} \\sum_i || w^\\mathsf{T}x_i + b - y_i||^2 $$", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression\nlinear_regression = LinearRegression().fit(X_train, y_train)\nprint(\"R^2 on training set: %f\" % linear_regression.score(X_train, y_train))\nprint(\"R^2 on test set: %f\" % linear_regression.score(X_test, y_test))", "_____no_output_____" ], [ "from sklearn.metrics import r2_score\nprint(r2_score(np.dot(X, true_coefficient), y))", "_____no_output_____" ], [ "plt.figure(figsize=(10, 5))\ncoefficient_sorting = np.argsort(true_coefficient)[::-1]\nplt.plot(true_coefficient[coefficient_sorting], \"o\", label=\"true\")\nplt.plot(linear_regression.coef_[coefficient_sorting], \"o\", label=\"linear regression\")\n\nplt.legend()", "_____no_output_____" ] ], [ [ "## Ridge Regression (L2 penalty)\n\n$$ \\text{min}_{w,b} \\sum_i || w^\\mathsf{T}x_i + b - y_i||^2 + \\alpha ||w||_2^2$$ ", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import Ridge\nridge_models = {}\ntraining_scores = []\ntest_scores = []\n\nfor alpha in [100, 10, 1, .01]:\n ridge = Ridge(alpha=alpha).fit(X_train, y_train)\n training_scores.append(ridge.score(X_train, y_train))\n test_scores.append(ridge.score(X_test, y_test))\n ridge_models[alpha] = ridge\n\nplt.figure()\nplt.plot(training_scores, label=\"training scores\")\nplt.plot(test_scores, label=\"test scores\")\nplt.xticks(range(4), [100, 10, 1, .01])\nplt.legend(loc=\"best\")", "_____no_output_____" ], [ "plt.figure(figsize=(10, 5))\nplt.plot(true_coefficient[coefficient_sorting], \"o\", label=\"true\", c='b')\n\nfor i, alpha in enumerate([100, 10, 1, .01]):\n plt.plot(ridge_models[alpha].coef_[coefficient_sorting], \"o\", label=\"alpha = %.2f\" % alpha, c=plt.cm.summer(i / 3.))\n \nplt.legend(loc=\"best\")", "_____no_output_____" ] ], [ [ "## Lasso (L1 penalty)\n$$ \\text{min}_{w, b} \\sum_i || w^\\mathsf{T}x_i + b - y_i||^2 + \\alpha ||w||_1$$ ", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import Lasso\n\nlasso_models = {}\ntraining_scores = []\ntest_scores = []\n\nfor alpha in [30, 10, 1, .01]:\n lasso = Lasso(alpha=alpha).fit(X_train, y_train)\n training_scores.append(lasso.score(X_train, y_train))\n test_scores.append(lasso.score(X_test, y_test))\n lasso_models[alpha] = lasso\nplt.figure()\nplt.plot(training_scores, label=\"training scores\")\nplt.plot(test_scores, label=\"test scores\")\nplt.xticks(range(4), [30, 10, 1, .01])\nplt.legend(loc=\"best\")", "_____no_output_____" ], [ "plt.figure(figsize=(10, 5))\nplt.plot(true_coefficient[coefficient_sorting], \"o\", label=\"true\", c='b')\n\nfor i, alpha in enumerate([30, 10, 1, .01]):\n plt.plot(lasso_models[alpha].coef_[coefficient_sorting], \"o\", label=\"alpha = %.2f\" % alpha, c=plt.cm.summer(i / 3.))\n \nplt.legend(loc=\"best\")", "_____no_output_____" ] ], [ [ "## Linear models for classification", "_____no_output_____" ], [ "\n```\ny_pred = x_test[0] * coef_[0] + ... + x_test[n_features-1] * coef_[n_features-1] + intercept_ > 0\n```", "_____no_output_____" ], [ "The influence of C in LinearSVC", "_____no_output_____" ] ], [ [ "from plots import plot_linear_svc_regularization\nplot_linear_svc_regularization()", "_____no_output_____" ] ], [ [ "## Multi-Class linear classification", "_____no_output_____" ] ], [ [ "from sklearn.datasets import make_blobs\nplt.figure()\nX, y = make_blobs(random_state=42)\nplt.scatter(X[:, 0], X[:, 1], c=y)", "_____no_output_____" ], [ "from sklearn.svm import LinearSVC\nlinear_svm = LinearSVC().fit(X, y)\nprint(linear_svm.coef_.shape)\nprint(linear_svm.intercept_.shape)", "_____no_output_____" ], [ "plt.figure()\nplt.scatter(X[:, 0], X[:, 1], c=y)\nline = np.linspace(-15, 15)\nfor coef, intercept in zip(linear_svm.coef_, linear_svm.intercept_):\n plt.plot(line, -(line * coef[0] + intercept) / coef[1])\nplt.ylim(-10, 15)\nplt.xlim(-10, 8)", "_____no_output_____" ] ], [ [ "# Exercises", "_____no_output_____" ], [ "* Compare Logistic regression with l1 penalty and l2 penalty by plotting the coefficients as above for the digits dataset. Classify odd vs even digits to make it a binary task.", "_____no_output_____" ] ], [ [ "y % 2", "_____no_output_____" ], [ "# %load solutions/linear_models.py", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
e71a2fad6c4bdca1ff13ff5425c16c7ec5309070
25,514
ipynb
Jupyter Notebook
data-analysis/data_analysis.ipynb
wichmann-lab/robust-detection-benchmark
288f6549e89e39a5808282c30e3c14d4f872df42
[ "MIT" ]
160
2019-07-18T03:33:16.000Z
2022-03-15T16:37:57.000Z
data-analysis/data_analysis.ipynb
wichmann-lab/robust-detection-benchmark
288f6549e89e39a5808282c30e3c14d4f872df42
[ "MIT" ]
5
2020-01-01T04:46:38.000Z
2022-03-12T09:15:17.000Z
data-analysis/data_analysis.ipynb
wichmann-lab/robust-detection-benchmark
288f6549e89e39a5808282c30e3c14d4f872df42
[ "MIT" ]
25
2019-07-18T08:38:34.000Z
2021-12-06T04:42:01.000Z
38.599092
206
0.567414
[ [ [ "# data analysis and plotting results", "_____no_output_____" ] ], [ [ "import pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom os.path import join as pjoin\nimport seaborn as sns\n%matplotlib inline\n%config InlineBackend.print_figure_kwargs = {'bbox_inches':'tight'}\n\nfrom robustness_eval import get_results, get_distortions_from_file", "_____no_output_____" ] ], [ [ "## general definitions", "_____no_output_____" ] ], [ [ "result_dir = \"../raw-data/\"\nfigures_dir = \"../figures/results/\"", "_____no_output_____" ], [ "sns.set()\nsns.set_style('ticks')\nsns.set_context('paper',rc={\"font.size\":8,\"axes.titlesize\":8,\"axes.labelsize\":8})", "_____no_output_____" ], [ "# Colour scheme\n\ncol_clean = tuple(x/255.0 for x in (50, 65, 75))\ncol_stylized = tuple(x/255.0 for x in (180, 160, 105))\ncol_combined = tuple(x/255.0 for x in (165, 30, 55))", "_____no_output_____" ], [ "# plotting parameters\n\ncombined_plot_height = 7.0\ncombined_plot_width = 6.0\n\nmarkersize = 16\nfontsize = 24\nlinewidth = 2.5\nlabelsize = 18\nlegend_fontsize=18\naxis_start = -0.2\naxis_end = 5.3\n", "_____no_output_____" ], [ "def plot_individual_results(data_A, data_B, data_C, plot_name, metric_name,\n ylim, legend_loc=1, markersize=12, linewidth=2.0,\n fontsize=15, legend_fontsize=11.3, labelsize=12.0,\n data_name_A=\"standard data\", data_name_B=\"stylized data\",\n data_name_C=\"combined data\", color_A=col_clean,\n color_B = col_stylized, color_C = col_combined):\n\n fig = plt.figure(figsize = (15.0, 20.0))\n plt.subplots_adjust(wspace = 0.3, hspace = 0.5)\n\n for i, distortion in enumerate(distortions[0:15]): # only plot first 15 distortions, not the validation distortions\n ax = plt.subplot(5,3,i+1)\n\n plt.plot(list(range(6)), data_A[i,:] * 100, 's-', zorder=2, color=color_A,\n label=data_name_A, markersize=markersize, linewidth=linewidth)\n plt.plot(list(range(6)), data_B[i,:] * 100, '^-', zorder=1, color=color_B,\n label=data_name_B, markersize=markersize, linewidth=linewidth)\n plt.plot(list(range(6)), data_C[i,:] * 100, 'o-', zorder=3, color=color_C,\n label=data_name_C, markersize=markersize, linewidth=linewidth)\n\n\n plt.title(distortion, fontsize=fontsize) \n if i >= 12: # plot xlabel only in last row\n plt.xlabel('corruption severity', fontsize=fontsize)\n if i % 3 == 0:\n plt.ylabel(metric_name, fontsize=fontsize)\n plt.axis([axis_start, axis_end, ylim[0], ylim[1]])\n if i == 0:\n legend = plt.legend(loc=legend_loc, fontsize=legend_fontsize, frameon=True, edgecolor=\"black\")\n legend.get_frame().set_linewidth(1.0)\n\n ax.tick_params(axis='both', which='major', labelsize=labelsize)\n\n\n sns.despine(trim=True, offset=5)\n\n plt.show()\n\n fig.savefig(pjoin(figures_dir, plot_name), bbox_inches='tight')\n ", "_____no_output_____" ] ], [ [ "### Pascal VOC", "_____no_output_____" ] ], [ [ "distortions = get_distortions_from_file(pjoin(result_dir, \"pascal_voc/faster_rcnn_r50_fpn_1x_voc0712_results.pkl\"))\nvoc_results = get_results(pjoin(result_dir, \"pascal_voc/faster_rcnn_r50_fpn_1x_voc0712_results.pkl\"), dataset='voc')\nsvoc_results = get_results(pjoin(result_dir, \"pascal_voc/faster_rcnn_r50_fpn_1x_svoc0712_results.pkl\"), dataset='voc')\nvoc_svoc_results = get_results(pjoin(result_dir, \"pascal_voc/faster_rcnn_r50_fpn_1x_vocsvoc0712_results.pkl\"), dataset='voc')", "_____no_output_____" ], [ "def set_fontsize_helper(ax, fontsize):\n \n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize(fontsize)", "_____no_output_____" ], [ "# Pascal VOC combined=averaged plot\n\nfig = plt.figure(figsize = (combined_plot_width, combined_plot_height))\n\nax = plt.subplot(1,1,1)\nplt.plot(list(range(6)), np.mean(voc_results[:15,:], axis=0) * 100,\n 's-', zorder=2, color=col_clean, label='standard data', markersize=markersize,\n linewidth=linewidth)\nplt.plot(list(range(6)), np.mean(svoc_results[:15,:], axis=0) * 100,\n '^-', zorder=1, color=col_stylized, label='stylized data', markersize=markersize,\n linewidth=linewidth)\nplt.plot(list(range(6)), np.mean(voc_svoc_results[:15,:], axis=0) * 100,\n 'o-', zorder=3, color=col_combined, label='combined data', markersize=markersize,\n linewidth=linewidth)\n\nplt.xlabel('corruption severity', fontsize=fontsize)\nplt.ylabel('mAP50 in %', fontsize=fontsize)\nplt.axis([axis_start, axis_end, 0, 85])\nax.tick_params(axis='both', which='major', labelsize=labelsize)\nlegend = plt.legend(loc=3, fontsize=legend_fontsize, frameon=True, edgecolor=\"black\")\nlegend.get_frame().set_linewidth(1.0)\n\nsns.despine(trim=True, offset=5)\n\nplt.show()\n\nfig.savefig(pjoin(figures_dir, 'pascal_corruption_overall.pdf'), bbox_inches='tight')", "_____no_output_____" ], [ "plot_individual_results(data_A = voc_results,\n data_B=svoc_results,\n data_C=voc_svoc_results,\n plot_name=\"pascal_corruption_individual.pdf\",\n metric_name=\"mAP50 in %\", ylim=[0, 85], legend_loc=3)", "_____no_output_____" ], [ "import csv\nfrom imagecorruptions import get_corruption_names\n\ncorruption_names = get_corruption_names('all')\n\nclean_performance = voc_results[0,0,0]\nmean_voc_results = np.mean(voc_results[:15,1:,0], axis=1)\nmean_voc_svoc_results = np.mean(voc_svoc_results[:15, 1:,0], axis=1)\n\nrpc_voc_clean = mean_voc_results / clean_performance\nrpc_voc_combined = mean_voc_svoc_results / clean_performance\n\n# load the rmse values calculated with the calc_rmse.py script\nrmse_voc = np.zeros((19, 5))\nwith open('../raw-data/pascal_voc/voc_rmse.csv') as f:\n reader = csv.DictReader(f)\n for rownum, row in enumerate(reader):\n corruption_number = corruption_names.index(row['corruption'])\n rmse_voc[corruption_number, int(row['severity']) - 1] = row['RMSE']\n \nmean_rmse_voc = np.mean(rmse_voc, axis=1)\n\nnum_noise = 3\nnum_blur = 4\nnum_weather = 4\nnum_digital = 4\nnoise = np.array([145,105,70]) / 255.\nblur = np.array([50, 110, 30]) / 255.\nweather = np.array([80, 170, 200]) / 255.\ndigital = np.array([175, 110, 150]) / 255.\n\ncolors = [noise for _ in range(num_noise)] + [blur for _ in range(num_blur)] + [weather for _ in range(num_weather)] + [digital for _ in range(num_digital)]", "_____no_output_____" ], [ "fig = plt.figure(figsize = (10.0, 4.0))\n\nax = plt.subplot(1,1,1)\n\nrmse_fontsize=14\nrmse_linewidth=2\nrmse_markersize=8\nrmse_capsize=4\n\nplt.axis([2, 12, 0, 100])\nax.tick_params(axis='both', which='major', labelsize=rmse_fontsize)\n\nplt.xlabel('RMSE', fontsize=rmse_fontsize)\nplt.ylabel('rPC[%]', fontsize=rmse_fontsize)\nx = mean_rmse_voc\ny = rpc_voc_clean * 100\nywhiskers = (rpc_voc_combined - rpc_voc_clean) * 100\nxe = x[:num_noise]\nye = y[:num_noise]\nywe = ywhiskers[:num_noise]\nlolims = np.ones(num_noise, dtype=np.bool)\nuplims = np.zeros(num_noise, dtype=np.bool)\nplt.errorbar(xe, ye, yerr=ywe, lolims=lolims, uplims=uplims, linestyle=\"None\", ecolor=noise, marker='o', mfc=noise, linewidth=rmse_linewidth, markersize=rmse_markersize, capsize=rmse_capsize)\n\nxe = x[num_noise:num_noise+num_blur]\nye = y[num_noise:num_noise+num_blur]\nywe = ywhiskers[num_noise:num_noise+num_blur]\nlolims = np.ones(num_blur, dtype=np.bool)\nuplims = np.zeros(num_blur, dtype=np.bool)\nplt.errorbar(xe, ye, yerr=ywe, lolims=lolims, uplims=uplims, linestyle=\"None\", ecolor=blur, marker='s', mfc=blur, linewidth=rmse_linewidth, markersize=rmse_markersize, capsize=rmse_capsize)\n\nxe = x[num_noise+num_blur:num_noise+num_blur+num_weather]\nye = y[num_noise+num_blur:num_noise+num_blur+num_weather]\nywe = ywhiskers[num_noise+num_blur:num_noise+num_blur+num_weather]\nlolims = np.ones(num_weather, dtype=np.bool)\nuplims = np.zeros(num_weather, dtype=np.bool)\nplt.errorbar(xe, ye, yerr=ywe, lolims=lolims, uplims=uplims, linestyle=\"None\", ecolor=weather, marker='D', mfc=weather, linewidth=rmse_linewidth, markersize=rmse_markersize, capsize=rmse_capsize)\n\nxe = x[num_noise+num_blur+num_weather:num_noise+num_blur+num_weather+num_digital]\nye = y[num_noise+num_blur+num_weather:num_noise+num_blur+num_weather+num_digital]\nywe = ywhiskers[num_noise+num_blur+num_weather:num_noise+num_blur+num_weather+num_digital]\nlolims = np.ones(num_digital, dtype=np.bool)\nuplims = np.zeros(num_digital, dtype=np.bool)\nplt.errorbar(xe, ye, yerr=ywe, lolims=lolims, uplims=uplims, linestyle=\"None\", ecolor=digital, marker='X', mfc=digital, linewidth=rmse_linewidth, markersize=rmse_markersize, capsize=rmse_capsize)\n\ntext_offsets = [\n (4, -4), # gaussian noise 0\n (-4, -4), # shot noise 1\n (4, -4), # impulse noise 2\n (-4, -4), # defocus blur 3\n (4, -4), # glass blur 4\n (4, -4), # motion blur 5\n (4, -4), # zoom blur 6\n (4, -4), # snow 7\n (-4, 0), # frost 8\n (4, -4), # fog 9\n (-4, -4), # brightness 10\n (-4, 0), # contrast 11\n (4, -4), # elastic transform 12\n (-4, -4), # pixelate 13\n (4, 0), # jpeg_compression 14\n]\n\nannotations = [\n 'gaussian',\n 'shot',\n 'impulse',\n 'defocus',\n 'glass',\n 'motion',\n 'zoom',\n 'snow',\n 'frost',\n 'fog',\n 'brightness',\n 'contrast',\n 'elastic',\n 'pixelate',\n 'jpeg'\n]\n\nha = [\n 'left',\n 'right',\n 'left',\n 'right',\n 'left',\n 'left',\n 'left',\n 'left',\n 'right',\n 'left',\n 'right',\n 'right',\n 'left',\n 'right',\n 'left',\n]\nax = plt.gca()\nfor c in range(15):\n ax.annotate(annotations[c], (x[c], y[c]), xytext=text_offsets[c], textcoords='offset pixels', fontsize=rmse_fontsize, ha = ha[c], color=colors[c])\n\nlegend = plt.legend(['noise', 'blur', 'weather', 'digital'], loc='lower left', fontsize=rmse_fontsize, frameon=True, edgecolor=\"black\")\nfor text, color in zip(legend.get_texts(), [noise, blur, weather, digital]):\n plt.setp(text, color=color)\nlegend.get_frame().set_linewidth(1.0)\n \nsns.despine(trim=True, offset=5)\n\nplt.show()\nfig.savefig(pjoin(figures_dir, \"rpc_vs_rmse.pdf\"), bbox_inches='tight')", "_____no_output_____" ] ], [ [ "### MS COCO", "_____no_output_____" ] ], [ [ "distortions = get_distortions_from_file(pjoin(result_dir, \"coco/faster_rcnn_r50_fpn_1x_coco_results.pkl\"))\ncoco_results = get_results(pjoin(result_dir, \"coco/faster_rcnn_r50_fpn_1x_coco_results.pkl\"), metric='AP')\nscoco_results = get_results(pjoin(result_dir, \"coco/faster_rcnn_r50_fpn_1x_scoco_results.pkl\"), metric='AP')\ncoco_scoco_results = get_results(pjoin(result_dir, \"coco/faster_rcnn_r50_fpn_1x_cocoscoco_results.pkl\"), metric='AP')", "_____no_output_____" ], [ "# MS COCO combined results\n\nfig = plt.figure(figsize = (combined_plot_width, combined_plot_height))\n\nax = plt.subplot(1,1,1)\n\nplt.plot(list(range(6)), np.mean(coco_results[:15,:], axis=0) * 100,\n 's-', zorder=2, color=col_clean, label='coco', markersize=markersize,\n linewidth=linewidth)\nplt.plot(list(range(6)), np.mean(scoco_results[:15,:], axis=0) * 100,\n '^-', zorder=1, color=col_stylized, label='stylized coco', markersize=markersize,\n linewidth=linewidth)\nplt.plot(list(range(6)), np.mean(coco_scoco_results[:15,:], axis=0) * 100,\n 'o-', zorder=3, color=col_combined, label='combined', markersize=markersize,\n linewidth=linewidth)\n\n\nplt.xlabel('corruption severity', fontsize=fontsize)\nplt.ylabel('mAP in %', fontsize=fontsize)\nplt.axis([axis_start, axis_end, 0, 40])\nax.tick_params(axis='both', which='major', labelsize=labelsize)\n\nsns.despine(trim=True, offset=5)\n\nplt.show()\n\nfig.savefig(pjoin(figures_dir, \"coco_corruption_overall.pdf\"), bbox_inches='tight')", "_____no_output_____" ], [ "plot_individual_results(data_A = coco_results,\n data_B=scoco_results,\n data_C=coco_scoco_results,\n plot_name=\"coco_corruption_individual.pdf\",\n metric_name=\"mAP in %\", ylim=[0, 40])", "_____no_output_____" ] ], [ [ "### MS COCO Benchmark", "_____no_output_____" ] ], [ [ "metric = 'AP'\ndistortions = get_distortions_from_file(pjoin(result_dir, \"coco/faster_rcnn_r50_fpn_1x_results.pkl\"))\n\ncoco_frcnn_r50_results = get_results(pjoin(result_dir, \"coco/faster_rcnn_r50_fpn_1x_results.pkl\"), metric=metric)\ncoco_frcnn_r101_results = get_results(pjoin(result_dir, \"coco/faster_rcnn_r101_fpn_1x_results.pkl\"), metric=metric)\ncoco_frcnn_x101_32x4d_results = get_results(pjoin(result_dir, \"coco/faster_rcnn_x101_32x4d_fpn_1x_results.pkl\"), metric=metric)\ncoco_frcnn_x101_64x4d_results = get_results(pjoin(result_dir, \"coco/faster_rcnn_x101_64x4d_fpn_1x_results.pkl\"), metric=metric)\n \ncoco_mrcnn_r50_results = get_results(pjoin(result_dir, \"coco/mask_rcnn_r50_fpn_1x_results.pkl\"), metric=metric)\ncoco_crcnn_r50_results = get_results(pjoin(result_dir, \"coco/cascade_rcnn_r50_fpn_1x_results.pkl\"), metric=metric)\ncoco_cmrcnn_r50_results = get_results(pjoin(result_dir, \"coco/cascade_mask_rcnn_r50_fpn_1x_results.pkl\"), metric=metric)\ncoco_retina_r50_results = get_results(pjoin(result_dir, \"coco/retinanet_r50_fpn_1x_results.pkl\"), metric=metric)\n \ncoco_frcnn_dcn_r50_results = get_results(pjoin(result_dir, \"coco/faster_rcnn_dconv_c3-c5_r50_fpn_1x_results.pkl\"), metric=metric)\ncoco_frcnn_dcn_x101_32x4d_results = get_results(pjoin(result_dir, \"coco/faster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x_results.pkl\"), metric=metric)\ncoco_mrcnn_dcn_r50_results = get_results(pjoin(result_dir, \"coco/mask_rcnn_dconv_c3-c5_r50_fpn_1x_results.pkl\"), metric=metric)\ncoco_htc_dcn_x101_64x4d_results = get_results(pjoin(result_dir, \"coco/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e_results.pkl\"), metric=metric)\n\ncoco_mrcnn_r50_results_segm = get_results(pjoin(result_dir, \"coco/mask_rcnn_r50_fpn_1x_results.pkl\"), task='segm', metric=metric)\ncoco_cmrcnn_r50_results_segm = get_results(pjoin(result_dir, \"coco/cascade_mask_rcnn_r50_fpn_1x_results.pkl\"), task='segm', metric=metric)\ncoco_mrcnn_dcn_r50_results_segm = get_results(pjoin(result_dir, \"coco/mask_rcnn_dconv_c3-c5_r50_fpn_1x_results.pkl\"), task='segm', metric=metric)\ncoco_htc_dcn_x101_64x4d_results_segm = get_results(pjoin(result_dir, \"coco/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e_results.pkl\"), task='segm', metric=metric)", "_____no_output_____" ], [ "result_list = [coco_frcnn_r50_results, \n coco_frcnn_r101_results, \n coco_frcnn_x101_32x4d_results, \n coco_frcnn_x101_64x4d_results,\n coco_frcnn_dcn_r50_results,\n coco_frcnn_dcn_x101_32x4d_results,\n coco_mrcnn_dcn_r50_results,\n coco_htc_dcn_x101_64x4d_results,\n coco_mrcnn_r50_results, \n coco_crcnn_r50_results, \n coco_cmrcnn_r50_results, \n coco_retina_r50_results, \n coco_mrcnn_r50_results_segm, \n coco_cmrcnn_r50_results_segm, \n coco_mrcnn_dcn_r50_results_segm,\n coco_htc_dcn_x101_64x4d_results_segm]\n\nmodel_names = ['frcnn_r50', \n 'frcnn_r101', \n 'frcnn_x101_32x4d', \n 'frcnn_x101_64x4d',\n 'frcnn_dcn_r50',\n 'frcnn_dcn_x101_32x4d',\n 'mrcnn_dcn_r50',\n 'htc_dcn_x101_64x4d',\n 'mrcnn_r50', \n 'crcnn_r50', \n 'cmrcnn_r50', \n 'retinanet_r50', \n 'mrcnn_r50_segm', \n 'cmrcnn_r50_segm',\n 'mrcnn_dcn_r50_segm',\n 'htc_dcn_x101_64x4d_segm']\n\ncolors = ['black','dark red', 'red', 'orange', 'dandelion']", "_____no_output_____" ], [ "for i, results in enumerate(result_list):\n print('{}: clean:{:.1f} corr.:{:.1f} rel:{:.1f}%'.format(model_names[i], \n results[0,0,0] * 100, \n np.mean(results[:15,1:,0]) * 100, \n np.mean(results[:15,1:,0])/results[0,0,0] * 100))\n \n print('')", "_____no_output_____" ], [ "# plot results for different backbones\n\n# use different colour scheme to avoid confusion\ncol_resnet50 = col_clean\ncol_resnet101 = tuple(x/255.0 for x in (175, 110, 150))\ncol_resnext101 = tuple(x/255.0 for x in (210, 150, 0))\n\nplot_individual_results(data_A = coco_frcnn_r50_results,\n data_B=coco_frcnn_r101_results,\n data_C=coco_frcnn_x101_64x4d_results,\n color_A=col_resnet50,\n color_B=col_resnet101,\n color_C=col_resnext101,\n data_name_A=\"ResNet-50\",\n data_name_B=\"ResNet-101\",\n data_name_C=\"ResNeXt-101\",\n plot_name=\"coco_corruption_backbones_individual.pdf\",\n metric_name=\"mAP in %\", ylim=[-1, 45])", "_____no_output_____" ] ], [ [ "## Citscapes", "_____no_output_____" ] ], [ [ "distortions = get_distortions_from_file(pjoin(result_dir, \"cityscapes/faster_rcnn_r50_fpn_1x_city_results.pkl\"))\n\ncity_results = get_results(pjoin(result_dir, \"cityscapes/faster_rcnn_r50_fpn_1x_city_results.pkl\"), metric='AP')\nscity_results = get_results(pjoin(result_dir, \"cityscapes/faster_rcnn_r50_fpn_1x_scity_results.pkl\"), metric='AP')\ncity_scity_results = get_results(pjoin(result_dir, \"cityscapes/faster_rcnn_r50_fpn_1x_cityscity_results.pkl\"), metric='AP')", "_____no_output_____" ], [ "# Cityscapes combined results\n\nfig = plt.figure(figsize = (combined_plot_width, combined_plot_height))\n\nax = plt.subplot(1,1,1)\n\nplt.plot(list(range(6)), np.mean(city_results[:15,:], axis=0) * 100,\n 's-', zorder=2, color=col_clean, label='standard data', markersize=markersize,\n linewidth=linewidth)\nplt.plot(list(range(6)), np.mean(scity_results[:15,:], axis=0) * 100,\n '^-', zorder=1, color=col_stylized, label='stylized data', markersize=markersize,\n linewidth=linewidth)\nplt.plot(list(range(6)), np.mean(city_scity_results[:15,:], axis=0) * 100,\n 'o-', zorder=3, color=col_combined, label='including stylized data', markersize=markersize,\n linewidth=linewidth)\n\n\nplt.xlabel('corruption severity', fontsize=fontsize)\nplt.ylabel('mAP in %', fontsize=fontsize)\nplt.axis([axis_start, axis_end, 0, 40])\nax.tick_params(axis='both', which='major', labelsize=labelsize)\n\nsns.despine(trim=True, offset=5)\n\nplt.show()\n\nfig.savefig(pjoin(figures_dir, \"cityscapes_corruption_overall.pdf\"), bbox_inches='tight')", "_____no_output_____" ], [ "plot_individual_results(data_A = city_results,\n data_B = scity_results,\n data_C = city_scity_results,\n plot_name=\"cityscapes_corruption_individual.pdf\",\n metric_name=\"mAP in %\", ylim=[-1, 40])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e71a39277639a1ff4733a254e3c026022da1ba56
2,507
ipynb
Jupyter Notebook
archive/20190209-mnist-linear-buggy.ipynb
akabakcioglu/SGDDynamics
f8b7ed1be545f525b01f0ca9e42591ae38e85976
[ "MIT" ]
null
null
null
archive/20190209-mnist-linear-buggy.ipynb
akabakcioglu/SGDDynamics
f8b7ed1be545f525b01f0ca9e42591ae38e85976
[ "MIT" ]
null
null
null
archive/20190209-mnist-linear-buggy.ipynb
akabakcioglu/SGDDynamics
f8b7ed1be545f525b01f0ca9e42591ae38e85976
[ "MIT" ]
1
2019-09-10T10:45:27.000Z
2019-09-10T10:45:27.000Z
21.991228
79
0.513761
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e71a42b738a595acadca17635da6a5d03752d247
231,011
ipynb
Jupyter Notebook
Example-ConditionalGAN.ipynb
hsljc/AE-DNN
d47430b24d3320d3b22f685e7e01db638bda3673
[ "MIT" ]
2
2021-02-26T02:52:29.000Z
2021-09-27T11:59:25.000Z
Example-ConditionalGAN.ipynb
dhuseljic/ae-dnn
d47430b24d3320d3b22f685e7e01db638bda3673
[ "MIT" ]
7
2021-04-30T21:24:21.000Z
2021-08-12T13:48:15.000Z
Example-ConditionalGAN.ipynb
hsljc/ae-dnn
d47430b24d3320d3b22f685e7e01db638bda3673
[ "MIT" ]
null
null
null
1,480.839744
170,204
0.955669
[ [ [ "import torch\n\nfrom utils.gan import ConditionalDCGAN\nfrom utils.datasets import load_mnist_notmnist\nfrom utils.ood_sampling import generate_ood_hypersphere\n\nimport pylab as plt\nimport torchvision", "_____no_output_____" ], [ "train_ds, _, _, _, _ = load_mnist_notmnist()\ntrain_loader = torch.utils.data.DataLoader(train_ds, batch_size=256, shuffle=True)", "Using downloaded and verified file: /home/denis/.datasets/notMNIST/notMNIST_small.tar.gz\n" ], [ "gan = ConditionalDCGAN(n_latent=2, n_channel=1, n_classes=10)\ngan.fit(train_loader, n_epochs=20)", "Training on cuda.\n" ], [ "gan.eval().cpu()\nfontsize = 15\n\nn_samples = 300\nz = torch.randn(n_samples, 2)\nz_ood = generate_ood_hypersphere(z, z_mean=torch.zeros(len(z.T)), z_cov=torch.eye(len(z.T)), factor=3)\nz_ood = z_ood[:, :, None, None]\nlbls = torch.arange(n_samples) % 10\n\nplt.figure(figsize=(10, 5))\nplt.subplot(121)\nplt.title('Latent Space', fontsize=fontsize)\nplt.scatter(z[:, 0], z[:, 1], label='Samples from unit Gaussian')\nplt.scatter(z_ood[:, 0], z_ood[:, 1], label='Uniform Samples from Hyperellipsoid')\nplt.xlabel('$z_1$', fontsize=fontsize)\nplt.ylabel('$z_2$', fontsize=fontsize)\nplt.legend(fontsize=fontsize//1.14)\n\nplt.subplot(122)\nplt.title('Generated OOD Samples', fontsize=fontsize)\nX_ood = gan.generator(z_ood, lbls).detach()\nplt.axis('off')\nplt.imshow(torchvision.utils.make_grid(X_ood[:64]).permute(1, 2, 0), aspect='auto')\nplt.tight_layout()\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
e71a4539cff5b9ecfb0a250a586771e0d544602a
49,058
ipynb
Jupyter Notebook
nb/04_Rigid_Bodies.ipynb
shishitao/boffi_dynamics
365f16d047fb2dbfc21a2874790f8bef563e0947
[ "MIT" ]
null
null
null
nb/04_Rigid_Bodies.ipynb
shishitao/boffi_dynamics
365f16d047fb2dbfc21a2874790f8bef563e0947
[ "MIT" ]
null
null
null
nb/04_Rigid_Bodies.ipynb
shishitao/boffi_dynamics
365f16d047fb2dbfc21a2874790f8bef563e0947
[ "MIT" ]
2
2019-06-23T12:32:39.000Z
2021-08-15T18:33:55.000Z
158.251613
37,113
0.868605
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e71a4ce6f59f9ff59566b3763d64c807fa6d9e47
165,733
ipynb
Jupyter Notebook
25_Supervised Learning with scikit-learn/01_Classification_Supervised_Learning.ipynb
mohd-faizy/DataScience-With-Python
13ebb10cf9083343056d5b782957241de1d595f9
[ "MIT" ]
5
2021-02-03T14:36:58.000Z
2022-01-01T10:29:26.000Z
25_Supervised Learning with scikit-learn/01_Classification_Supervised_Learning.ipynb
mohd-faizy/DataScience-With-Python
13ebb10cf9083343056d5b782957241de1d595f9
[ "MIT" ]
null
null
null
25_Supervised Learning with scikit-learn/01_Classification_Supervised_Learning.ipynb
mohd-faizy/DataScience-With-Python
13ebb10cf9083343056d5b782957241de1d595f9
[ "MIT" ]
3
2021-02-08T00:31:16.000Z
2022-03-17T13:52:32.000Z
98.299526
41,258
0.746942
[ [ [ "<a href=\"https://colab.research.google.com/github/mohd-faizy/CAREER-TRACK-Data-Scientist-with-Python/blob/main/01_Classification_Supervised_Learning.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "--- \r\n<strong> \r\n <h1 align='center'>Classification</h1> \r\n</strong>\r\n\r\n---", "_____no_output_____" ], [ "__k-nearest neighbors (KNN)__", "_____no_output_____" ] ], [ [ "! git clone https://github.com/mohd-faizy/CAREER-TRACK-Data-Scientist-with-Python.git", "Cloning into 'CAREER-TRACK-Data-Scientist-with-Python'...\nremote: Enumerating objects: 6, done.\u001b[K\nremote: Counting objects: 100% (6/6), done.\u001b[K\nremote: Compressing objects: 100% (6/6), done.\u001b[K\nremote: Total 2192 (delta 1), reused 2 (delta 0), pack-reused 2186\u001b[K\nReceiving objects: 100% (2192/2192), 269.45 MiB | 31.34 MiB/s, done.\nResolving deltas: 100% (779/779), done.\nChecking out files: 100% (929/929), done.\n" ], [ "import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport os\r\n\r\n#plt.style.use('ggplot')\r\nsns.set_theme(style='whitegrid')\r\n%matplotlib inline", "_____no_output_____" ], [ "os.chdir('/content/CAREER-TRACK-Data-Scientist-with-Python/25_Supervised Learning with scikit-learn/_dataset')\r\ncwd = os.getcwd()\r\nprint('Curent working directory is ', cwd)", "Curent working directory is /content/CAREER-TRACK-Data-Scientist-with-Python/25_Supervised Learning with scikit-learn/_dataset\n" ], [ "ls", "automobile_miles_per_gallon.csv gm_2008_region.csv\nboston_housing.csv house-votes-84.csv\ndiabetes.csv house-votes-84_missing_data.csv\ngapminder_2008_region.csv PIMA_Indians.csv\ngapminder_all.csv white-wine.csv\ngapminder_drop_region.csv winequality-red.csv\n" ] ], [ [ "## __EDA__", "_____no_output_____" ], [ "### __approch 1__ [X]", "_____no_output_____" ] ], [ [ "dfc = pd.read_csv('house-votes-84.csv')\r\nprint(dfc.shape)\r\ndfc.head(3)", "(434, 17)\n" ], [ "dfm = pd.read_csv('house-votes-84_missing_data.csv')\r\n\r\n# Drop rows with a 'question mark' value in any column in a pandas dataframe\r\ndfm = dfm[(dfm != '?').all(axis=1)].reset_index(drop=True)\r\n# Reset index after dropping rows?\r\ndfm.drop(['index'], axis=1, inplace=True)\r\n\r\nprint(dfm.shape)\r\ndfm.head(3)", "(232, 17)\n" ] ], [ [ "Shape of the dataframe get reduced.", "_____no_output_____" ] ], [ [ "dfm.columns", "_____no_output_____" ] ], [ [ "### __approch 2__ [✓]", "_____no_output_____" ] ], [ [ "df = pd.read_csv('house-votes-84.csv', header=None)\ndf.columns = ['party', 'infants', 'water', 'budget', 'physician', 'salvador',\n 'religious', 'satellite', 'aid', 'missile', 'immigration', 'synfuels',\n 'education', 'superfund', 'crime', 'duty_free_exports', 'eaa_rsa']\ndf.replace({'?':'n'}, inplace=True)\ndf.replace({'n':0, 'y': 1}, inplace=True)\ndf.head()", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 435 entries, 0 to 434\nData columns (total 17 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 party 435 non-null object\n 1 infants 435 non-null int64 \n 2 water 435 non-null int64 \n 3 budget 435 non-null int64 \n 4 physician 435 non-null int64 \n 5 salvador 435 non-null int64 \n 6 religious 435 non-null int64 \n 7 satellite 435 non-null int64 \n 8 aid 435 non-null int64 \n 9 missile 435 non-null int64 \n 10 immigration 435 non-null int64 \n 11 synfuels 435 non-null int64 \n 12 education 435 non-null int64 \n 13 superfund 435 non-null int64 \n 14 crime 435 non-null int64 \n 15 duty_free_exports 435 non-null int64 \n 16 eaa_rsa 435 non-null int64 \ndtypes: int64(16), object(1)\nmemory usage: 57.9+ KB\n" ], [ "df.describe()", "_____no_output_____" ], [ "plt.figure(figsize=(12, 7), facecolor='lightgrey')\nsns.countplot(x='education', hue='party', data=df, palette='RdBu')\nplt.xticks([0, 1], ['No', 'Yes'])", "_____no_output_____" ], [ "plt.figure(figsize=(12, 7), facecolor='lightgrey')\nsns.countplot(x='satellite', hue='party', data=df, palette='RdBu')\nplt.xticks([0, 1], ['No', 'Yes'])", "_____no_output_____" ], [ "plt.figure(figsize=(12, 7), facecolor='lightgrey')\nsns.countplot(x='missile', hue='party', data=df, palette='RdBu')\nplt.xticks([0, 1], ['No', 'Yes'])", "_____no_output_____" ], [ "plt.figure(figsize=(12, 7), facecolor='lightgrey')\r\nsns.countplot(x='crime', hue='party', data=df, palette='RdBu')\r\nplt.xticks([0, 1], ['No', 'Yes'])", "_____no_output_____" ] ], [ [ "### __KNeighborsClassifier__", "_____no_output_____" ] ], [ [ "# Import KNeighborsClassifier from sklearn.neighbors\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\n\r\n# Create arrays for the features and the response variable\r\ny = df['party'].values\r\nX = df.drop('party', axis=1).values\r\n\r\n# Create a k-NN classifier with 6 neighbors\r\nknn = KNeighborsClassifier(n_neighbors=6)\r\n\r\n# Fit the classifier to the data\r\nknn.fit(X,y)", "_____no_output_____" ], [ "# Predict the labels for the training data X\r\ny_pred = knn.predict(X)", "_____no_output_____" ], [ "X_new = pd.DataFrame([0.696469, 0.286139, 0.226851, 0.551315, 0.719469, 0.423106, 0.980764, \r\n 0.68483, 0.480932, 0.392118, 0.343178, 0.72905, 0.438572, 0.059678,\r\n 0.398044, 0.737995]).transpose()", "_____no_output_____" ], [ "# Predict and print the label for the new data point X_new\r\nnew_prediction = knn.predict(X_new)\r\nprint(\"Prediction: {}\".format(new_prediction))", "Prediction: ['democrat']\n" ], [ "X", "_____no_output_____" ], [ "y", "_____no_output_____" ], [ "print('shape of X: ',X.shape)\r\nprint('shape of y: ',y.shape)", "shape of X: (435, 16)\nshape of y: (435,)\n" ] ], [ [ "# __The digits recognition dataset__", "_____no_output_____" ] ], [ [ "# Import necessary modules\r\nfrom sklearn import datasets\r\nimport matplotlib.pyplot as plt\r\n\r\n# Load the digits dataset: digits\r\ndigits = datasets.load_digits()\r\n\r\n# Print the keys and DESCR of the dataset\r\nprint(digits.keys())\r\nprint(digits.DESCR)\r\n\r\n# Print the shape of the images and data keys\r\nprint(digits.images.shape)\r\nprint(digits.data.shape)\r\n\r\n# Display digit 1010\r\nplt.imshow(digits.images[1010], cmap=plt.cm.gray_r, interpolation='nearest')\r\nplt.show()", "dict_keys(['data', 'target', 'target_names', 'images', 'DESCR'])\n.. _digits_dataset:\n\nOptical recognition of handwritten digits dataset\n--------------------------------------------------\n\n**Data Set Characteristics:**\n\n :Number of Instances: 5620\n :Number of Attributes: 64\n :Attribute Information: 8x8 image of integer pixels in the range 0..16.\n :Missing Attribute Values: None\n :Creator: E. Alpaydin (alpaydin '@' boun.edu.tr)\n :Date: July; 1998\n\nThis is a copy of the test set of the UCI ML hand-written digits datasets\nhttps://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits\n\nThe data set contains images of hand-written digits: 10 classes where\neach class refers to a digit.\n\nPreprocessing programs made available by NIST were used to extract\nnormalized bitmaps of handwritten digits from a preprinted form. From a\ntotal of 43 people, 30 contributed to the training set and different 13\nto the test set. 32x32 bitmaps are divided into nonoverlapping blocks of\n4x4 and the number of on pixels are counted in each block. This generates\nan input matrix of 8x8 where each element is an integer in the range\n0..16. This reduces dimensionality and gives invariance to small\ndistortions.\n\nFor info on NIST preprocessing routines, see M. D. Garris, J. L. Blue, G.\nT. Candela, D. L. Dimmick, J. Geist, P. J. Grother, S. A. Janet, and C.\nL. Wilson, NIST Form-Based Handprint Recognition System, NISTIR 5469,\n1994.\n\n.. topic:: References\n\n - C. Kaynak (1995) Methods of Combining Multiple Classifiers and Their\n Applications to Handwritten Digit Recognition, MSc Thesis, Institute of\n Graduate Studies in Science and Engineering, Bogazici University.\n - E. Alpaydin, C. Kaynak (1998) Cascading Classifiers, Kybernetika.\n - Ken Tang and Ponnuthurai N. Suganthan and Xi Yao and A. Kai Qin.\n Linear dimensionalityreduction using relevance weighted LDA. School of\n Electrical and Electronic Engineering Nanyang Technological University.\n 2005.\n - Claudio Gentile. A New Approximate Maximal Margin Classification\n Algorithm. NIPS. 2000.\n(1797, 8, 8)\n(1797, 64)\n" ] ], [ [ "### __Train/Test Split + Fit/Predict/Accuracy__", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# Create feature and target arrays\r\nX = digits.data\r\ny = digits.target\r\n\r\n# Split into training and test set\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, \r\n random_state=42, stratify=y)\r\n\r\n# Create a k-NN classifier with 7 neighbors: knn\r\nknn = KNeighborsClassifier(n_neighbors=7)\r\n\r\n# Fit the classifier to the training data\r\nknn.fit(X_train, y_train)\r\n\r\n# Print the accuracy\r\nprint(knn.score(X_test, y_test))", "0.9833333333333333\n" ] ], [ [ "`stratify=y`**stratification **means that the `train_test_split` method returns **training** and **test subsets** that have the **same proportions** of class labels as the input dataset.", "_____no_output_____" ], [ "### __Overfitting and underfitting__", "_____no_output_____" ] ], [ [ "# Setup arrays to store train and test accuracies\r\nneighbors = np.arange(1, 9)\r\ntrain_accuracy = np.empty(len(neighbors))\r\ntest_accuracy = np.empty(len(neighbors))\r\n\r\n# Loop over different values of k\r\nfor i, k in enumerate(neighbors):\r\n # Setup a k-NN Classifier with k neighbors: knn\r\n knn = KNeighborsClassifier(n_neighbors=k)\r\n\r\n # Fit the classifier to the training data\r\n knn.fit(X_train, y_train)\r\n \r\n #Compute accuracy on the training set\r\n train_accuracy[i] = knn.score(X_train, y_train)\r\n\r\n #Compute accuracy on the testing set\r\n test_accuracy[i] = knn.score(X_test, y_test)\r\n\r\n# Generate plot\r\nplt.figure(figsize=(12, 7), facecolor='lightgrey')\r\nplt.title('k-NN: Varying Number of Neighbors')\r\nplt.plot(neighbors, test_accuracy, label = 'Testing Accuracy')\r\nplt.plot(neighbors, train_accuracy, label = 'Training Accuracy')\r\nplt.xlabel('Number of Neighbors')\r\nplt.ylabel('Accuracy')\r\nplt.legend()\r\nplt.show()", "_____no_output_____" ] ], [ [ "<p align='center'> \r\n <a href=\"https://twitter.com/F4izy\"> \r\n <img src=\"https://th.bing.com/th/id/OIP.FCKMemzqNplY37Jwi0Yk3AHaGl?w=233&h=207&c=7&o=5&pid=1.7\" width=50px \r\n height=50px> \r\n </a> \r\n <a href=\"https://www.linkedin.com/in/mohd-faizy/\"> \r\n <img src='https://th.bing.com/th/id/OIP.idrBN-LfvMIZl370Vb65SgHaHa?pid=Api&rs=1' width=50px height=50px> \r\n </a> \r\n</p>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
e71a4e1bf124ec2d28e4193a2df0a398a8d0f133
59,998
ipynb
Jupyter Notebook
CarPricePredication_HotEncoding.ipynb
Homedepot5/DataScience
b0e265696f562db101345fcca341b6fff6827b2b
[ "Apache-2.0" ]
null
null
null
CarPricePredication_HotEncoding.ipynb
Homedepot5/DataScience
b0e265696f562db101345fcca341b6fff6827b2b
[ "Apache-2.0" ]
null
null
null
CarPricePredication_HotEncoding.ipynb
Homedepot5/DataScience
b0e265696f562db101345fcca341b6fff6827b2b
[ "Apache-2.0" ]
null
null
null
37.592732
6,937
0.360679
[ [ [ "<a href=\"https://colab.research.google.com/github/Homedepot5/DataScience/blob/origin%2Ffeature%2Fdevelopment/CarPricePredication_HotEncoding.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "**CarPricePredication_HotEncoding**\nand dummies variable example **bold text** **bold text**", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nfrom sklearn import linear_model\nfrom google.colab import files\nimport io", "_____no_output_____" ], [ "uploaded=files.upload()", "_____no_output_____" ], [ "df=pd.read_csv(io.BytesIO(uploaded['CarPrices.csv']))\ndf", "_____no_output_____" ], [ "dummeies=pd.get_dummies(df['Car Model'])\ndummeies", "_____no_output_____" ], [ "newdf=pd.concat([df,dummeies],axis=1)\nnewdf", "_____no_output_____" ], [ "x=newdf.drop(df[['Car Model','Sell Price($)']],axis=1)\nx", "_____no_output_____" ], [ "model=linear_model.LinearRegression()\nmodel", "_____no_output_____" ], [ "model.fit(x,newdf[['Sell Price($)']])", "_____no_output_____" ], [ "model.predict([[45000,4,0,0,1]])", "_____no_output_____" ] ], [ [ "BMW PRICE", "_____no_output_____" ] ], [ [ "model.predict([[8600,7,0,1,0]])", "_____no_output_____" ], [ "model.score(x,y)", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "UsingHot label Encoder ", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import LabelEncoder\nle=LabelEncoder()", "_____no_output_____" ], [ "dfl=df\ndfl", "_____no_output_____" ], [ "dfl['Car Model']=le.fit_transform(dfl['Car Model'])\ndfl", "_____no_output_____" ], [ "x=dfl[['Car Model','Mileage','Age(yrs)']].values\nx", "_____no_output_____" ], [ "y=dfl['Sell Price($)']\ny", "_____no_output_____" ], [ "from sklearn.preprocessing import OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\nct = ColumnTransformer([('Car Model', OneHotEncoder(), [0])], remainder = 'passthrough')\n", "_____no_output_____" ], [ "x = ct.fit_transform(x)\nx", "_____no_output_____" ], [ "X = x[:,1:]\nX", "_____no_output_____" ], [ "model.fit(X,y)", "_____no_output_____" ], [ "model.predict([[1,4,0,45000]])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e71a5762a03f047faaee863d73cb83fb054c50eb
5,524
ipynb
Jupyter Notebook
colabs/trends_places_to_bigquery_via_query.ipynb
RMStanford/starthinker
0a2df38bebb9d089bf91b6df01598d11a354eed3
[ "Apache-2.0" ]
null
null
null
colabs/trends_places_to_bigquery_via_query.ipynb
RMStanford/starthinker
0a2df38bebb9d089bf91b6df01598d11a354eed3
[ "Apache-2.0" ]
null
null
null
colabs/trends_places_to_bigquery_via_query.ipynb
RMStanford/starthinker
0a2df38bebb9d089bf91b6df01598d11a354eed3
[ "Apache-2.0" ]
null
null
null
32.880952
230
0.489138
[ [ [ "#1. Install Dependencies\nFirst install the libraries needed to execute recipes, this only needs to be done once, then click play.\n", "_____no_output_____" ] ], [ [ "!pip install git+https://github.com/google/starthinker\n", "_____no_output_____" ] ], [ [ "#2. Get Cloud Project ID\nTo run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play.\n", "_____no_output_____" ] ], [ [ "CLOUD_PROJECT = 'PASTE PROJECT ID HERE'\n\nprint(\"Cloud Project Set To: %s\" % CLOUD_PROJECT)\n", "_____no_output_____" ] ], [ [ "#3. Get Client Credentials\nTo read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play.\n", "_____no_output_____" ] ], [ [ "CLIENT_CREDENTIALS = 'PASTE CREDENTIALS HERE'\n\nprint(\"Client Credentials Set To: %s\" % CLIENT_CREDENTIALS)\n", "_____no_output_____" ] ], [ [ "#4. Enter Trends Places To BigQuery Via Query Parameters\nMove using a WOEID query.\n 1. Provide <a href='https://apps.twitter.com/' target='_blank'>Twitter credentials</a>.\n 1. Provide BigQuery WOEID source query.\n 1. Specify BigQuery dataset and table to write API call results to.\n 1. Writes: WOEID, Name, Url, Promoted_Content, Query, Tweet_Volume\n 1. Note Twitter API is rate limited to 15 requests per 15 minutes. So keep WOEID lists short.\nModify the values below for your use case, can be done multiple times, then click play.\n", "_____no_output_____" ] ], [ [ "FIELDS = {\n 'secret': '',\n 'key': '',\n 'places_dataset': '',\n 'places_query': '',\n 'places_legacy': False,\n 'destination_dataset': '',\n 'destination_table': '',\n}\n\nprint(\"Parameters Set To: %s\" % FIELDS)\n", "_____no_output_____" ] ], [ [ "#5. Execute Trends Places To BigQuery Via Query\nThis does NOT need to be modified unles you are changing the recipe, click play.\n", "_____no_output_____" ] ], [ [ "from starthinker.util.project import project\nfrom starthinker.script.parse import json_set_fields\n\nUSER_CREDENTIALS = '/content/user.json'\n\nTASKS = [\n {\n 'twitter': {\n 'auth': 'user',\n 'secret': {'field': {'name': 'secret','kind': 'string','order': 1,'default': ''}},\n 'key': {'field': {'name': 'key','kind': 'string','order': 2,'default': ''}},\n 'trends': {\n 'places': {\n 'single_cell': True,\n 'bigquery': {\n 'dataset': {'field': {'name': 'places_dataset','kind': 'string','order': 3,'default': ''}},\n 'query': {'field': {'name': 'places_query','kind': 'string','order': 4,'default': ''}},\n 'legacy': {'field': {'name': 'places_legacy','kind': 'boolean','order': 5,'default': False}}\n }\n }\n },\n 'out': {\n 'bigquery': {\n 'dataset': {'field': {'name': 'destination_dataset','kind': 'string','order': 6,'default': ''}},\n 'table': {'field': {'name': 'destination_table','kind': 'string','order': 7,'default': ''}}\n }\n }\n }\n }\n]\n\njson_set_fields(TASKS, FIELDS)\nproject.initialize(_recipe={ 'tasks':TASKS }, _project=CLOUD_PROJECT, _user=USER_CREDENTIALS, _client=CLIENT_CREDENTIALS, _verbose=True)\nproject.execute()\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e71a585dfcdae0efaccca626887413ee41bd8710
49,031
ipynb
Jupyter Notebook
transformers_doc/pytorch/task_summary.ipynb
nbroad1881/notebooks
8044bbce25bed20a79e5488040a41d3c32575cec
[ "Apache-2.0" ]
null
null
null
transformers_doc/pytorch/task_summary.ipynb
nbroad1881/notebooks
8044bbce25bed20a79e5488040a41d3c32575cec
[ "Apache-2.0" ]
null
null
null
transformers_doc/pytorch/task_summary.ipynb
nbroad1881/notebooks
8044bbce25bed20a79e5488040a41d3c32575cec
[ "Apache-2.0" ]
null
null
null
41.799659
617
0.635149
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e71a60e0127c3b0cf2024651580bf9815f952823
69,451
ipynb
Jupyter Notebook
notebooks/ChangeCounter.ipynb
HGUISEL/debuggingbook
6dc18c90dafd4cdff577fec8ba485eb9650ec4bf
[ "MIT" ]
107
2020-09-27T13:33:26.000Z
2022-03-21T10:45:04.000Z
notebooks/ChangeCounter.ipynb
HGUISEL/debuggingbook
6dc18c90dafd4cdff577fec8ba485eb9650ec4bf
[ "MIT" ]
26
2020-10-23T14:43:04.000Z
2022-03-03T15:06:52.000Z
notebooks/ChangeCounter.ipynb
HGUISEL/debuggingbook
6dc18c90dafd4cdff577fec8ba485eb9650ec4bf
[ "MIT" ]
20
2021-03-04T11:49:09.000Z
2022-03-23T06:16:36.000Z
28.301141
468
0.559243
[ [ [ "# Where the Bugs are\n\nEvery time a bug is fixed, developers leave a trace – in the _version database_ when they commit the fix, or in the _bug database_ when they close the bug. In this chapter, we learn how to _mine these repositories_ for past changes and bugs, and how to _map_ them to individual modules and functions, highlighting those project components that have seen most changes and fixes over time.", "_____no_output_____" ] ], [ [ "from bookutils import YouTubeVideo\nYouTubeVideo(\"Aifq0JOc1Jc\")", "_____no_output_____" ] ], [ [ "**Prerequisites**\n\n* You should have read [the chapter on tracking bugs](Tracking.ipynb).", "_____no_output_____" ] ], [ [ "import bookutils", "_____no_output_____" ], [ "import Tracking", "_____no_output_____" ] ], [ [ "## Synopsis\n<!-- Automatically generated. Do not edit. -->\n\nTo [use the code provided in this chapter](Importing.ipynb), write\n\n```python\n>>> from debuggingbook.ChangeCounter import <identifier>\n```\n\nand then make use of the following features.\n\n\nThis chapter provides two classes `ChangeCounter` and `FineChangeCounter` that allow to mine and visualize the distribution of changes in a given `git` repository.\n\n`ChangeCounter` is initialized as\n\n```python\nchange_counter = ChangeCounter(repository)\n```\nwhere `repository` is either \n\n* a _directory_ containing a `git` clone (i.e., it contains a `.git` directory)\n* the URL of a `git` repository.\n\nAdditional arguments are being passed to the underlying `Repository` class from the [PyDriller](https://pydriller.readthedocs.io/) Python package. A `filter` keyword argument, if given, is a predicate that takes a modification (from PyDriller) and returns True if it should be included.\n\nIn a change counter, all elements in the repository are represented as _nodes_ – tuples $(f_1, f_2, ..., f_n)$ that denote a _hierarchy_: Each $f_i$ is a directory holding $f_{i+1}$, with $f_n$ being the actual file.\n\nA `change_counter` provides a number of attributes. `changes` is a mapping of nodes to the number of changes in that node:\n\n```python\n>>> change_counter.changes.get(('README.md',), None)\n13\n```\nThe `messages` attribute holds all commit messages related to that node:\n\n```python\n>>> change_counter.messages.get(('README.md',), None)\n['Fix: corrected rule for rendered notebooks (#24)\\nNew: strip out any <iframe> tags\\nNew: when rendering .md files, replace videos by proper image',\n 'Doc update',\n 'Doc update',\n 'New: show badges at top of GitHub project page',\n 'More badges',\n 'Fix: bad links in CI badges',\n 'New: prefer Unicode arrows over LaTeX ones',\n 'Updated README.md',\n 'Update',\n 'Doc update',\n 'Doc update',\n 'Doc update',\n 'Doc update']\n```\nThe `sizes` attribute holds the (last) size of the respective element:\n\n```python\n>>> change_counter.sizes.get(('README.md',), None)\n13025\n```\n`FineChangeCounter` acts like `ChangeCounter`, but also retrieves statistics for elements _within_ the respective files; it has been tested for C, Python, and Jupyter Notebooks and should provide sufficient results for programming languages with similar syntax.\n\nThe `map()` method of `ChangeCounter` and `FineChangeCounter` produces an interactive tree map that allows to explore the elements of a repository. The redder (darker) a rectangle, the more changes it has seen; the larger a rectangle, the larger its size in bytes.\n\n```python\n>>> fine_change_counter.map()\n```\n![](PICS/ChangeCounter-synopsis-1.png)\n\nThe included classes offer several methods that can be overridden in subclasses to customize what to mine and how to visualize it. See the chapter for details.\n\nHere are all the classes defined in this chapter:\n\n![](PICS/ChangeCounter-synopsis-2.svg)\n\n", "_____no_output_____" ], [ "## Mining Change Histories\n\nThe history of any software project is a history of change. Any nontrivial project thus comes with a _version database_ to organize and track changes; and possibly also with an [issue database](Tracking.ipynb) to organize and track issues.\n\nOver time, these databases hold plenty of information about the project: _Who changed what, when, and why?_ This information can be _mined_ from existing databases and _analyzed_ to answer questions such as\n\n* Which parts in my project were most frequently or recently changed?\n* How many files does the average change touch?\n* Where in my project were the most bugs fixed?", "_____no_output_____" ], [ "To answer such questions, we can _mine_ change and bug histories for past changes and fixes. This involves digging through version databases such as `git` and [issue trackers such as RedMine or Bugzilla](Tracking.ipynb) and extracting all their information. Fortunately for us, there is ready-made infrastructure for some of this. ", "_____no_output_____" ], [ "## Mining with PyDriller", "_____no_output_____" ], [ "[PyDriller](https://pydriller.readthedocs.io/) is a Python package for mining change histories. Its `Repository` class takes a `git` version repository and allows to access all the individual changes (\"modifications\"), together with committers, affected files, commit messages, and more.", "_____no_output_____" ] ], [ [ "from pydriller import Repository # https://pydriller.readthedocs.io/", "_____no_output_____" ], [ "from pydriller.domain.commit import Commit", "_____no_output_____" ], [ "from pydriller.domain.commit import ModifiedFile", "_____no_output_____" ] ], [ [ "To use `Repository`, we need to pass it \n* the URL of a `git` repository; or\n* the directory name where a cloned `git` repository can be found.\n\nIn general, cloning a `git` repository locally (with `git clone URL`) and then analyzing it locally will be faster and require less network resources.", "_____no_output_____" ], [ "Let us apply `Repository` on the repository of this book. The function `current_repo()` returns the directory in which a `.git` subdirectory is stored – that is, the root of a cloned `git` repository.", "_____no_output_____" ] ], [ [ "import os", "_____no_output_____" ], [ "# ignore\nfrom typing import Callable, Optional, Type, Tuple, Any\nfrom typing import Dict, Union, Set, List", "_____no_output_____" ], [ "def current_repo() -> Optional[str]:\n path = os.getcwd()\n while True:\n if os.path.exists(os.path.join(path, '.git')):\n return os.path.normpath(path)\n\n # Go one level up\n new_path = os.path.normpath(os.path.join(path, '..'))\n if new_path != path:\n path = new_path\n else:\n return None\n\n return None", "_____no_output_____" ], [ "current_repo()", "_____no_output_____" ] ], [ [ "This gives us a repository miner for the book:", "_____no_output_____" ] ], [ [ "from datetime import datetime", "_____no_output_____" ], [ "book_miner = Repository(current_repo(), to=datetime(2020, 10, 1))", "_____no_output_____" ] ], [ [ "The `to` argument limits the range of time we want to look at.", "_____no_output_____" ], [ "You can also specify a URL instead, but this will access the repository via the network and generally be much slower.", "_____no_output_____" ] ], [ [ "DEBUGGINGBOOK_REMOTE_REPO = 'https://github.com/uds-se/debuggingbook.git'\n# book_miner = Repository(DEBUGGINGBOOK_REMOTE_REPO)", "_____no_output_____" ], [ "# ignore\nif 'CI' in os.environ:\n # The CI git clone is shallow, so access full repo remotely\n book_miner = Repository(DEBUGGINGBOOK_REMOTE_REPO,\n to=datetime(2020, 10, 1))", "_____no_output_____" ] ], [ [ "`traverse_commits()` is a generator that returns one commit after another. Let us fetch the very first commit made to the book:", "_____no_output_____" ] ], [ [ "book_commits = book_miner.traverse_commits()\nbook_first_commit = next(book_commits)", "_____no_output_____" ] ], [ [ "Each commit has a number of attributes telling us more about the commit.", "_____no_output_____" ] ], [ [ "[attr for attr in dir(book_first_commit) if not attr.startswith('_')]", "_____no_output_____" ] ], [ [ "For instance, the `msg` attribute lets us know about the commit message:", "_____no_output_____" ] ], [ [ "book_first_commit.msg", "_____no_output_____" ] ], [ [ "whereas the `author` attribute gets us the name and email of the person who made the commit:", "_____no_output_____" ] ], [ [ "[attr for attr in dir(book_first_commit.author) if not attr.startswith('_')]", "_____no_output_____" ], [ "book_first_commit.author.name, book_first_commit.author.email", "_____no_output_____" ] ], [ [ "A commit consists of multiple _modifications_ to possibly multiple files. The commit `modified_files` attribute returns a list of modifications.", "_____no_output_____" ] ], [ [ "book_first_commit.modified_files", "_____no_output_____" ] ], [ [ "For each modification, we can retrieve the files involved as well as several statistics:", "_____no_output_____" ] ], [ [ "[attr for attr in dir(book_first_commit.modified_files[0]) if not attr.startswith('_')]", "_____no_output_____" ] ], [ [ "Let us see which file was created with this modification:", "_____no_output_____" ] ], [ [ "book_first_commit.modified_files[0].new_path", "_____no_output_____" ] ], [ [ "The `source_code` attribute holds the entire file contents after the modification.", "_____no_output_____" ] ], [ [ "print(book_first_commit.modified_files[0].source_code)", "_____no_output_____" ] ], [ [ "We see that the `debuggingbook` project started with a very simple commit, namely the addition of an (almost empty) `README.md` file.", "_____no_output_____" ], [ "The attribute `source_code_before` holds the previous source code. We see that it is `None` – the file was just created.", "_____no_output_____" ] ], [ [ "print(book_first_commit.modified_files[0].source_code_before)", "_____no_output_____" ] ], [ [ "Let us have a look at the _second_ commit. We see that it is much more substantial already.", "_____no_output_____" ] ], [ [ "book_second_commit = next(book_commits)", "_____no_output_____" ], [ "[m.new_path for m in book_second_commit.modified_files]", "_____no_output_____" ] ], [ [ "We fetch the modification for the `README.md` file:", "_____no_output_____" ] ], [ [ "readme_modification = [m for m in book_second_commit.modified_files if m.new_path == 'README.md'][0]", "_____no_output_____" ] ], [ [ "The `source_code_before` attribute holds the previous version (which we already have seen):", "_____no_output_____" ] ], [ [ "print(readme_modification.source_code_before)", "_____no_output_____" ] ], [ [ "The `source_code` attribute holds the new version – now a complete \"README\" file. (Compare this first version to the [current README text](index.ipynb).)", "_____no_output_____" ] ], [ [ "print(readme_modification.source_code[:400])", "_____no_output_____" ] ], [ [ "The `diff` attribute holds the differences between the old and the new version.", "_____no_output_____" ] ], [ [ "print(readme_modification.diff[:100])", "_____no_output_____" ] ], [ [ "The `diff_parsed` attribute even lists added and deleted lines:", "_____no_output_____" ] ], [ [ "readme_modification.diff_parsed['added'][:10]", "_____no_output_____" ] ], [ [ "With all this information, we can track all commits and modifications and establish statistics over which files were changed (and possibly even fixed) most. This is what we will do in the next section.", "_____no_output_____" ] ], [ [ "# ignore\ndel book_miner # Save a bit of memory", "_____no_output_____" ] ], [ [ "## Counting Changes\n\nWe start with a simple `ChangeCounter` class that, given a repository, counts for each file how frequently it was changed.", "_____no_output_____" ], [ "We represent file names as _nodes_ – a tuple $(f_1, f_2, ..., f_n)$ that denotes a _hierarchy_: Each $f_i$ is a directory holding $f_{i+1}$, with $f_n$ being the actual file. Here is what this notebook looks as a node:", "_____no_output_____" ] ], [ [ "tuple('debuggingbook/notebooks/ChangeCounter.ipynb'.split('/'))", "_____no_output_____" ], [ "Node = Tuple", "_____no_output_____" ] ], [ [ "The constructor takes the repository to be analyzed and sets the internal counters.", "_____no_output_____" ] ], [ [ "from collections import defaultdict", "_____no_output_____" ], [ "import warnings", "_____no_output_____" ], [ "from git.exc import GitCommandError # type: ignore", "_____no_output_____" ], [ "class ChangeCounter:\n \"\"\"Count the number of changes for a repository.\"\"\"\n\n def __init__(self, repo: str, *, \n filter: Optional[Callable[[Commit], bool]] = None, \n log: bool = False, \n **kwargs: Any) -> None:\n \"\"\"\n Constructor.\n `repo` is a git repository (as URL or directory).\n `filter` is a predicate that takes a modification and returns True \n if it should be considered (default: consider all).\n `log` turns on logging if set.\n `kwargs` are passed to the `Repository()` constructor.\n \"\"\"\n self.repo = repo\n self.log = log\n\n if filter is None:\n def filter(m: ModifiedFile) -> bool:\n return True\n assert filter is not None\n\n self.filter = filter\n\n # A node is an tuple (f_1, f_2, f_3, ..., f_n) denoting\n # a folder f_1 holding a folder f_2 ... holding a file f_n.\n\n # Mapping node -> #of changes\n self.changes: Dict[Node, int] = defaultdict(int)\n\n # Mapping node -> list of commit messages\n self.messages: Dict[Node, List[str]] = defaultdict(list)\n\n # Mapping node -> last size seen\n self.sizes: Dict[Node, Union[int, float]] = {}\n\n self.mine(**kwargs)", "_____no_output_____" ] ], [ [ "The method `mine()` does all the heavy lifting of mining. It retrieves all commits and all modifications from the repository, passing the modifications through the `update_stats()` method.", "_____no_output_____" ] ], [ [ "class ChangeCounter(ChangeCounter):\n def mine(self, **kwargs: Any) -> None:\n \"\"\"Gather data from repository. To be extended in subclasses.\"\"\"\n miner = Repository(self.repo, **kwargs)\n\n for commit in miner.traverse_commits():\n try:\n self.mine_commit(commit)\n except GitCommandError as err:\n # Warn about failing git commands, but continue\n warnings.warn(str(err))\n\n def mine_commit(self, commit: Commit) -> None:\n for m in commit.modified_files:\n m.committer = commit.committer\n m.committer_date = commit.committer_date\n m.msg = commit.msg\n\n if self.include(m):\n self.update_stats(m)", "_____no_output_____" ] ], [ [ "The `include()` method allows to filter modifications. For simplicity, we copy the most relevant attributes of the commit over to the modification, such that the filter can access them, too.", "_____no_output_____" ] ], [ [ "class ChangeCounter(ChangeCounter):\n def include(self, m: ModifiedFile) -> bool:\n \"\"\"\n Return True if the modification `m` should be included\n (default: the `filter` predicate given to the constructor).\n To be overloaded in subclasses.\n \"\"\"\n return self.filter(m)", "_____no_output_____" ] ], [ [ "For each such node, `update_stats()` then invokes `update_size()`, `update_changes()`, and `update_elems()`.", "_____no_output_____" ] ], [ [ "class ChangeCounter(ChangeCounter):\n def update_stats(self, m: ModifiedFile) -> None:\n \"\"\"\n Update counters with modification `m`.\n Can be extended in subclasses.\n \"\"\"\n if not m.new_path:\n return\n\n node = tuple(m.new_path.split('/'))\n\n self.update_size(node, len(m.source_code) if m.source_code else 0)\n self.update_changes(node, m.msg)\n\n self.update_elems(node, m)", "_____no_output_____" ] ], [ [ "`update_size()` simply saves the last size of the item being modified. Since we progress from first to last commit, this reflects the size of the newest version.", "_____no_output_____" ] ], [ [ "class ChangeCounter(ChangeCounter):\n def update_size(self, node: Tuple, size: int) -> None:\n \"\"\"\n Update counters for `node` with `size`.\n Can be extended in subclasses.\n \"\"\"\n self.sizes[node] = size", "_____no_output_____" ] ], [ [ "`update_changes()` increases the counter `changes` for the given node `node`, and adds the current commit message `commit_msg` to its list. This makes\n\n* `size` a mapping of nodes to their size\n* `changes` a mapping of nodes to the number of changes they have seen\n* `commit_msg` a mapping of nodes to the list of commit messages that have affected them.", "_____no_output_____" ] ], [ [ "class ChangeCounter(ChangeCounter):\n def update_changes(self, node: Tuple, commit_msg: str) -> None:\n \"\"\"\n Update stats for `node` changed with `commit_msg`.\n Can be extended in subclasses.\n \"\"\"\n self.changes[node] += 1\n\n self.messages[node].append(commit_msg)", "_____no_output_____" ] ], [ [ "The `update_elems()` method is reserved for later use, when we go and count fine-grained changes.", "_____no_output_____" ] ], [ [ "class ChangeCounter(ChangeCounter):\n def update_elems(self, node: Tuple, m: ModifiedFile) -> None:\n \"\"\"\n Update counters for subelements of `node` with modification `m`.\n To be defined in subclasses.\n \"\"\"\n pass", "_____no_output_____" ] ], [ [ "Let us put `ChangeCounter` to action – on the current (debuggingbook) repository.", "_____no_output_____" ] ], [ [ "DEBUGGINGBOOK_REPO = current_repo()", "_____no_output_____" ], [ "DEBUGGINGBOOK_REPO", "_____no_output_____" ] ], [ [ "The function `debuggingbook_change_counter` instantiates a `ChangeCounter` class (or any subclass) with the debuggingbook repository, mining all the counters as listed above. Since mining _all_ history takes quite some time, its parameter `start_date` allows to set a starting date (default: March 1, 2021); changes before that date will be ignored.", "_____no_output_____" ] ], [ [ "DEBUGGINGBOOK_START_DATE: datetime = datetime(2021, 3, 1)", "_____no_output_____" ], [ "NUM_WORKERS = 4 # Number of threads to be run in parallel", "_____no_output_____" ], [ "def debuggingbook_change_counter(\n cls: Type,\n start_date: datetime = DEBUGGINGBOOK_START_DATE) -> Any:\n \"\"\"\n Instantiate a ChangeCounter (sub)class `cls` with the debuggingbook repo.\n Only mines changes after `start_date` (default: DEBUGGINGBOOK_START_DATE)\n \"\"\"\n\n def filter(m: ModifiedFile) -> bool:\n \"\"\"\n Do not include\n * the `docs/` directory; it only holds generated Web pages\n * the `notebooks/shared/` package; this is infrastructure\n * the `synopsis` pictures; these are all generated\n \"\"\"\n return (m.new_path and\n not m.new_path.startswith('docs/') and\n not m.new_path.startswith('notebooks/shared/') and\n '-synopsis-' not in m.new_path)\n\n return cls(DEBUGGINGBOOK_REPO,\n filter=filter,\n since=start_date,\n num_workers=NUM_WORKERS)", "_____no_output_____" ] ], [ [ "Let us set `change_counter` to this `ChangeCounter` instance. This can take a few minutes.", "_____no_output_____" ] ], [ [ "from Timer import Timer", "_____no_output_____" ], [ "with Timer() as t:\n change_counter = debuggingbook_change_counter(ChangeCounter)\n\nt.elapsed_time()", "_____no_output_____" ] ], [ [ "The attribute `changes` of our `ChangeCounter` now is a mapping of nodes to the respective number of changes. Here are the first 10 entries:", "_____no_output_____" ] ], [ [ "list(change_counter.changes.keys())[:10]", "_____no_output_____" ] ], [ [ "This is the number of changes to the `Chapters.makefile` file which lists the book chapters:", "_____no_output_____" ] ], [ [ "change_counter.changes.get(('Chapters.makefile',), None)", "_____no_output_____" ] ], [ [ "The `messages` attribute holds all the messages:", "_____no_output_____" ] ], [ [ "change_counter.messages.get(('Chapters.makefile',), None)", "_____no_output_____" ], [ "for node in change_counter.changes:\n assert len(change_counter.messages[node]) == change_counter.changes[node]", "_____no_output_____" ] ], [ [ "The `sizes` attribute holds the final size:", "_____no_output_____" ] ], [ [ "change_counter.sizes.get(('Chapters.makefile',), None)", "_____no_output_____" ] ], [ [ "## Visualizing Past Changes", "_____no_output_____" ], [ "To explore the number of changes across all project files, we visualize them as a _tree map_. A tree map visualizes hierarchical data using nested rectangles. In our visualization, each directory is shown as a rectangle containing smaller rectangles. The _size_ of a rectangle is relative to its size (in bytes); and the _color_ of a rectangle is relative to the number of changes it has seen.", "_____no_output_____" ], [ "We use the [easyplotly](https://github.com/mwouts/easyplotly) package to easily create a treemap.", "_____no_output_____" ] ], [ [ "import easyplotly as ep\nimport plotly.graph_objects as go", "_____no_output_____" ], [ "import math", "_____no_output_____" ] ], [ [ "The method `map_node_sizes()` returns a size for the node – any number will do. By default, we use a logarithmic scale, such that smaller files are not totally visually eclipsed by larger files.", "_____no_output_____" ] ], [ [ "class ChangeCounter(ChangeCounter):\n def map_node_sizes(self,scale: str = 'log') -> \\\n Dict[Node, Union[int, float]]:\n \"\"\"\n Return a mapping of nodes to sizes.\n Can be overloaded in subclasses.\n \"\"\"\n\n if scale == 'log':\n # Default: use log scale\n return {node: math.log(size+1) \n for node, size in self.sizes.items()}\n\n elif scale == 'sqrt':\n # Alternative: use sqrt size\n return {node: math.sqrt(size)\n for node, size in self.sizes.items()}\n\n elif scale == 'abs':\n # Alternative: use absolute size\n return self.sizes\n\n else:\n raise ValueError(f\"Unknown scale: {scale}; \"\n f\"use one of [log, sqrt, abs]\")", "_____no_output_____" ] ], [ [ "The method `map_node_color()` returns a color for the node – again, as a number. The smallest and largest numbers returned indicate beginning and end in the given color scale, respectively.", "_____no_output_____" ] ], [ [ "class ChangeCounter(ChangeCounter):\n def map_node_color(self, node: Node) -> Optional[int]:\n \"\"\"\n Return a color of the node, as a number.\n Can be overloaded in subclasses.\n \"\"\"\n return self.changes.get(node)", "_____no_output_____" ] ], [ [ "The method `map_node_text()` shows a text to be displayed in the rectangle; we set this to the number of changes.", "_____no_output_____" ] ], [ [ "class ChangeCounter(ChangeCounter):\n def map_node_text(self, node: Node) -> Optional[str]:\n \"\"\"\n Return the text to be shown for the node (default: #changes).\n Can be overloaded in subclasses.\n \"\"\"\n change = self.changes.get(node)\n return str(change) if change is not None else None", "_____no_output_____" ] ], [ [ "The methods `map_hoverinfo()` and `map_colorscale()` set additional map parameters. For details, see the [easyplotly](https://github.com/mwouts/easyplotly) documentation.", "_____no_output_____" ] ], [ [ "class ChangeCounter(ChangeCounter):\n def map_hoverinfo(self) -> str:\n \"\"\"\n Return the text to be shown when hovering over a node.\n To be overloaded in subclasses.\n \"\"\"\n return 'label+text'\n\n def map_colorscale(self) -> str:\n \"\"\"\n Return the colorscale for the map. To be overloaded in subclasses.\n \"\"\"\n return 'YlOrRd'", "_____no_output_____" ] ], [ [ "With all this, the `map()` function creates a tree map of the repository, using the [easyplotly](https://github.com/mwouts/easyplotly) `Treemap` constructor.", "_____no_output_____" ] ], [ [ "class ChangeCounter(ChangeCounter):\n def map(self) -> go.Figure:\n \"\"\"Produce an interactive tree map of the repository.\"\"\"\n treemap = ep.Treemap(\n self.map_node_sizes(),\n text=self.map_node_text,\n hoverinfo=self.map_hoverinfo(),\n marker_colors=self.map_node_color,\n marker_colorscale=self.map_colorscale(),\n root_label=self.repo,\n branchvalues='total'\n )\n\n fig = go.Figure(treemap)\n fig.update_layout(margin=dict(l=0, r=0, t=30, b=0))\n\n return fig", "_____no_output_____" ] ], [ [ "This is what the tree map for `debuggingbook` looks like. \n\n* Click on any rectangle to enlarge it.\n* Click outside of the rectangle to return to a wider view.\n* Hover over a rectangle to get further information.", "_____no_output_____" ] ], [ [ "change_counter = debuggingbook_change_counter(ChangeCounter)", "_____no_output_____" ], [ "change_counter.map()", "_____no_output_____" ] ], [ [ "We can easily identify the most frequently changed files:", "_____no_output_____" ] ], [ [ "sorted(change_counter.changes.items(), key=lambda kv: kv[1], reverse=True)[:4]", "_____no_output_____" ], [ "# ignore\nall_notebooks = [node for node in change_counter.changes.keys()\n if len(node) == 2 and node[1].endswith('.ipynb')]\nall_notebooks.sort(key=lambda node: change_counter.changes[node],\n reverse=True)", "_____no_output_____" ], [ "from bookutils import quiz", "_____no_output_____" ], [ "quiz(\"Which two notebooks have seen the most changes over time?\",\n [\n f\"`{all_notebooks[i][1].split('.')[0]}`\"\n for i in [0, 3, 1, 2]\n if i < len(all_notebooks)\n ]\n , '[1234 % 3, 3702 / 1234]')", "_____no_output_____" ] ], [ [ "Indeed, these two are the two most frequently recently changed notebooks:", "_____no_output_____" ] ], [ [ "[notebook[1].split('.')[0] for notebook in all_notebooks[:2]]", "_____no_output_____" ] ], [ [ "## Counting Past Fixes\n\nKnowing which files have been changed most is useful in debugging, because any change increases the chance to introduce a new bug. Even more important, however, is the question of how frequently a file was _fixed_ in the past, as this is an important indicator for its bug-proneness.", "_____no_output_____" ], [ "(One may think that fixing several bugs _reduces_ the number of bugs, but unfortunately, a file which has seen several fixes in the past is likely to see fixes in the future, too. This is because the bug-proneness of a software component very much depends on the requirements it has to fulfill, and if these requirements are unclear, complex, or frequently change, this translates into many fixes.)", "_____no_output_____" ], [ "How can we tell _changes_ from _fixes_? \n\n* One indicator is _commit messages_:\n If they refer to \"bugs\" or \"fixes\", then the change is a fix.\n* Another indicator is _bug numbers_:\n If a commit message contains an issue number from an associated issue database, then we can make use of the issue referred to.\n * The issue database may provide us with additional information about the bug, such as its severity, how many people it was assigned to, how long it took to fix it, and more.\n* A final indicator is _time_:\n If a developer first committed a change and in the same time frame marked an issue as \"resolved\", then it is likely that the two refer to each other.\n\nThe way these two are linked very much depends on the project – and the discipline of developers as it comes to change messages. _Branches_ and _merges_ bring additional challenges.", "_____no_output_____" ], [ "For the `debuggingbook` project, identifying fixes is easy. The discipline is that if a change fixes a bug, it is prefixed with `Fix:`. We can use this to introduce a `FixCounter` class specific to our `debuggingbook` project.", "_____no_output_____" ] ], [ [ "class FixCounter(ChangeCounter):\n \"\"\"\n Count the fixes for files in the repository.\n Fixes are all commits whose message starts with the word 'Fix: '\n \"\"\"\n\n def include(self, m: ModifiedFile) -> bool:\n \"\"\"Include all modifications whose commit messages start with 'Fix:'\"\"\"\n return super().include(m) and m and m.msg.startswith(\"Fix:\")", "_____no_output_____" ] ], [ [ "As a twist to our default `ChangeCounter` class, we include the \"fix\" messages in the tree map rectangles.", "_____no_output_____" ] ], [ [ "class FixCounter(FixCounter):\n def map_node_text(self, node: Node) -> str:\n return \"<br>\".join(self.messages.get(node, []))\n\n def map_hoverinfo(self) -> str:\n return 'label'", "_____no_output_____" ] ], [ [ "This is the tree map showing fixes. We see that \n* only those components that actually have seen a fix are shown; and\n* the fix distribution differs from the change distribution.", "_____no_output_____" ] ], [ [ "fix_counter = debuggingbook_change_counter(FixCounter)", "_____no_output_____" ], [ "fix_counter.map()", "_____no_output_____" ] ], [ [ "If you want to take a look at the _entire_ fix history, pass an early `start_date` argument to `debugging_change_counter()`. This takes a few extra minutes, so we make it optional:", "_____no_output_____" ] ], [ [ "# fix_counter = debuggingbook_change_counter(\n# FixCounter,\n# start_date=datetime(1999, 1, 1)\n# )\n# fix_counter.map()", "_____no_output_____" ] ], [ [ "## Counting Fine-Grained Changes\n\nIn programming projects, individual files typically consist of _smaller units_ such as functions, classes, and methods. We want to determine which of these _units_ are frequently changed (and fixed). For this, we need to _break down_ individual files into smaller parts, and then determine which of these parts would be affected by a change.", "_____no_output_____" ], [ "### Mapping Elements to Locations\n\nOur first task is a simple means to split a (programming) file into smaller parts, each with their own locations. First, we need to know what kind of content a file contains. To this end, we use the Python [magic](https://github.com/ahupp/python-magic) package. (The \"magic\" in the name does not refer to some \"magic\" functionality, but to the practice of having files start with \"magic\" bytes that indicate their type.)", "_____no_output_____" ] ], [ [ "import magic", "_____no_output_____" ] ], [ [ "The `magic` package easily guesses that a file contains C code:", "_____no_output_____" ] ], [ [ "magic.from_buffer('''\n#include <stdio.h>\n\nint main(int argc, char *argv[]) {\n printf(\"Hello, world!\\n\")\n}\n''')", "_____no_output_____" ] ], [ [ "It also works well for Python code:", "_____no_output_____" ] ], [ [ "magic.from_buffer('''\ndef foo():\n print(\"Hello, world!\")\n''')", "_____no_output_____" ] ], [ [ "Jupyter Notebooks, however, are identified as `JSON` data or `SGML` documents:", "_____no_output_____" ] ], [ [ "magic.from_buffer(open(os.path.join(current_repo(), # type: ignore\n 'notebooks',\n 'Assertions.ipynb')).read())", "_____no_output_____" ] ], [ [ "We define a set of _delimiters_ for these languages which use _regular expressions_ to identify\n* the _language_ (matching the `magic` output)\n* the _beginning of a unit_, and\n* the _end_ of a unit,\n\nFor Python, for instance, any line starting with `def` or `class` denotes the start of some unit; any line starting with something else denotes the end of a unit. For Jupyter, the delimiters do the same, yet encoded into JSON. The definitions for C are likely to work for a wide range of languages that all use `{` and `}` to delimit units.", "_____no_output_____" ] ], [ [ "import re", "_____no_output_____" ], [ "# ignore\nfrom typing import Pattern", "_____no_output_____" ], [ "DELIMITERS: List[Tuple[Pattern, Pattern, Pattern]] = [\n (\n # Python\n re.compile(r'^python.*'),\n\n # Beginning of element\n re.compile(r'^(async\\s+)?(def|class)\\s+(?P<name>\\w+)\\W.*'),\n\n # End of element\n re.compile(r'^[^#\\s]')\n ),\n (\n # Jupyter Notebooks\n re.compile(r'^(json|exported sgml|jupyter).*'),\n re.compile(r'^\\s+\"(async\\s+)?(def|class)\\s+(?P<name>\\w+)\\W'),\n re.compile(r'^(\\s+\"[^#\\s\\\\]|\\s+\\])')\n ),\n (\n # C source code (actually, any { }-delimited language)\n re.compile(r'^(c |c\\+\\+|c#|java|perl|php).*'),\n re.compile(r'^[^\\s].*\\s+(?P<name>\\w+)\\s*[({].*'),\n re.compile(r'^[}]')\n )\n]", "_____no_output_____" ] ], [ [ "The function `rxdelim()` returns suitable delimiters for a given content, using `DELIMITERS`.", "_____no_output_____" ] ], [ [ "def rxdelim(content: str) -> Tuple[Optional[Pattern], Optional[Pattern]]:\n \"\"\"\n Return suitable begin and end delimiters for the content `content`.\n If no matching delimiters are found, return `None, None`.\n \"\"\"\n tp = magic.from_buffer(content).lower()\n for rxtp, rxbegin, rxend in DELIMITERS:\n if rxtp.match(tp):\n return rxbegin, rxend\n\n return None, None", "_____no_output_____" ] ], [ [ "The function `elem_mapping()` returns a list of the individual elements as found in the file, indexed by line numbers (starting with 1).", "_____no_output_____" ] ], [ [ "Mapping = List[Optional[str]]", "_____no_output_____" ], [ "def elem_mapping(content: str, log: bool = False) -> Mapping:\n \"\"\"Return a list of the elements in `content`, indexed by line number.\"\"\"\n rxbegin, rxend = rxdelim(content)\n if rxbegin is None:\n return []\n if rxend is None:\n return []\n\n mapping: List[Optional[str]] = [None]\n current_elem = None\n lineno = 0\n\n for line in content.split('\\n'):\n lineno += 1\n\n match = rxbegin.match(line)\n if match:\n current_elem = match.group('name')\n elif rxend.match(line):\n current_elem = None\n\n mapping.append(current_elem)\n\n if log:\n print(f\"{lineno:3} {str(current_elem):15} {line}\")\n\n return mapping", "_____no_output_____" ] ], [ [ "Here is an example of how `elem_mapping()` works. During execution (with `log` set to `True`), we already see the elements associated with individual line numbers.", "_____no_output_____" ] ], [ [ "some_c_source = \"\"\"\n#include <stdio.h>\n\nint foo(int x) {\n return x;\n}\n\nstruct bar {\n int x, y;\n}\n\nint main(int argc, char *argv[]) {\n return foo(argc);\n}\n\n\"\"\"\nsome_c_mapping = elem_mapping(some_c_source, log=True)", "_____no_output_____" ] ], [ [ "In the actual mapping, we can access the individual units for any line number:", "_____no_output_____" ] ], [ [ "some_c_mapping[1], some_c_mapping[8]", "_____no_output_____" ] ], [ [ "Here's how this works for Python:", "_____no_output_____" ] ], [ [ "some_python_source = \"\"\"\ndef foo(x):\n return x\n\nclass bar(blue):\n x = 25\n def f(x):\n return 26\n\ndef main(argc):\n return foo(argc)\n\n\"\"\"\nsome_python_mapping = elem_mapping(some_python_source, log=True)", "_____no_output_____" ], [ "# some_jupyter_source = open(\"Slicer.ipynb\").read()\n# some_jupyter_mapping = elem_mapping(some_jupyter_source, log=False)", "_____no_output_____" ] ], [ [ "### Determining Changed Elements\n\nUsing a mapping from `elem_mapping()`, we can determine which elements are affected by a change. The `changed_elems_by_mapping()` function returns the set of affected elements.", "_____no_output_____" ] ], [ [ "def changed_elems_by_mapping(mapping: Mapping, start: int, length: int = 0) -> Set[str]:\n \"\"\"\n Within `mapping`, return the set of elements affected by a change\n starting in line `start` and extending over `length` additional lines.\n \"\"\"\n elems = set()\n for line in range(start, start + length + 1):\n if line < len(mapping) and mapping[line]:\n elem = mapping[line]\n assert elem is not None\n elems.add(elem)\n\n return elems", "_____no_output_____" ] ], [ [ "Here's an example of `changed_elems_by_mapping()`, applied to the Python content, above:", "_____no_output_____" ] ], [ [ "changed_elems_by_mapping(some_python_mapping, start=2, length=4)", "_____no_output_____" ] ], [ [ "The function `elem_size()` returns the size of an element (say, a function).", "_____no_output_____" ] ], [ [ "def elem_size(elem: str, source: str) -> int:\n \"\"\"Within `source`, return the size of `elem`\"\"\"\n source_lines = [''] + source.split('\\n')\n size = 0\n mapping = elem_mapping(source)\n\n for line_no in range(len(mapping)):\n if mapping[line_no] == elem or mapping[line_no] is elem:\n size += len(source_lines[line_no] + '\\n')\n\n return size", "_____no_output_____" ], [ "elem_size('foo', some_python_source)", "_____no_output_____" ], [ "assert sum(elem_size(name, some_python_source) \n for name in ['foo', 'bar', 'main']) == len(some_python_source)", "_____no_output_____" ] ], [ [ "Given an old version and a new version of a (text) file, we can use the `diff_match_patch` module to determine differences, and from these the affected lines:", "_____no_output_____" ] ], [ [ "from ChangeDebugger import diff # minor dependency", "_____no_output_____" ], [ "from diff_match_patch import diff_match_patch", "_____no_output_____" ], [ "def changed_elems(old_source: str, new_source: str) -> Set[str]:\n \"\"\"Determine the elements affected by the change from `old_source` to `new_source`\"\"\"\n patches = diff(old_source, new_source)\n\n old_mapping = elem_mapping(old_source)\n new_mapping = elem_mapping(new_source)\n\n elems = set()\n\n for patch in patches:\n old_start_line = patch.start1 + 1\n new_start_line = patch.start2 + 1\n\n for (op, data) in patch.diffs:\n length = data.count('\\n')\n\n if op == diff_match_patch.DIFF_INSERT:\n elems |= changed_elems_by_mapping(old_mapping, old_start_line)\n elems |= changed_elems_by_mapping(new_mapping, new_start_line, length)\n elif op == diff_match_patch.DIFF_DELETE:\n elems |= changed_elems_by_mapping(old_mapping, old_start_line, length)\n elems |= changed_elems_by_mapping(new_mapping, new_start_line)\n\n old_start_line += length\n new_start_line += length\n\n return elems", "_____no_output_____" ] ], [ [ "Here is how `changed_elems()` works. We define a \"new\" version of `some_python_source`:", "_____no_output_____" ] ], [ [ "some_new_python_source = \"\"\"\ndef foo(y):\n return y\n\nclass qux(blue):\n x = 25\n def f(x):\n return 26\n\ndef main(argc):\n return foo(argc)\n\n\"\"\"", "_____no_output_____" ], [ "changed_elems(some_python_source, some_new_python_source)", "_____no_output_____" ] ], [ [ "Note that the list of changed elements includes added as well as deleted elements.", "_____no_output_____" ], [ "### Putting it all Together\n\nWe introduce a class `FineChangeCounter` that, like `ChangeCounter`, counts changes for individual files; however, `FineChangeCounter` adds additional nodes for all elements affected by the change. For a file consisting of multiple elements, this has the same effect as if the file were a directory, and the elements were all contained as individual files in this directory.", "_____no_output_____" ] ], [ [ "class FineChangeCounter(ChangeCounter):\n \"\"\"Count the changes for files in the repository and their elements\"\"\"\n\n def update_elems(self, node: Node, m: ModifiedFile) -> None:\n old_source = m.source_code_before if m.source_code_before else \"\"\n new_source = m.source_code if m.source_code else \"\"\n\n for elem in changed_elems(old_source, new_source):\n elem_node = node + (elem,)\n\n self.update_size(elem_node, elem_size(elem, new_source))\n self.update_changes(elem_node, m.msg)", "_____no_output_____" ] ], [ [ "Retrieving fine-grained changes takes a bit more time, since all files have to be parsed...", "_____no_output_____" ] ], [ [ "with Timer() as t:\n fine_change_counter = debuggingbook_change_counter(FineChangeCounter)\n\nt.elapsed_time()", "_____no_output_____" ] ], [ [ "... but the result is very much worth it. We can now zoom into individual files and compare the change counts for the individual functions.", "_____no_output_____" ] ], [ [ "fine_change_counter.map()", "_____no_output_____" ] ], [ [ "Like before, we can access the most frequently changed elements, This is the most frequently changed item in the book:", "_____no_output_____" ] ], [ [ "elem_nodes = [node for node in fine_change_counter.changes.keys()\n if len(node) == 3 and node[1].endswith('.ipynb')]\nelem_nodes.sort(key=lambda node: fine_change_counter.changes[node],\n reverse=True)\n[(node, fine_change_counter.changes[node]) for node in elem_nodes[:1]]", "_____no_output_____" ], [ "from bookutils import quiz", "_____no_output_____" ], [ "quiz(\"Which is the _second_ most changed element?\",\n [\n f\"`{elem_nodes[i][2]}` in `{elem_nodes[i][1].split('.ipynb')[0]}`\"\n for i in [3, 1, 2, 0]\n if i < len(elem_nodes)\n ], '1975308642 // 987654321')", "_____no_output_____" ] ], [ [ "Indeed, here come the top five most frequently changed elements:", "_____no_output_____" ] ], [ [ "[(node, fine_change_counter.changes[node]) for node in elem_nodes[:5]]", "_____no_output_____" ] ], [ [ "Now it is time to apply these tools on your own projects. Which are the most frequently changed (and fixed) elements? Why is that so? What can you do to improve things? All these are consequences of debugging – to help having fewer bugs in the future!", "_____no_output_____" ], [ "## Synopsis", "_____no_output_____" ], [ "This chapter provides two classes `ChangeCounter` and `FineChangeCounter` that allow to mine and visualize the distribution of changes in a given `git` repository.", "_____no_output_____" ], [ "`ChangeCounter` is initialized as\n\n```python\nchange_counter = ChangeCounter(repository)\n```\nwhere `repository` is either \n\n* a _directory_ containing a `git` clone (i.e., it contains a `.git` directory)\n* the URL of a `git` repository.\n\nAdditional arguments are being passed to the underlying `Repository` class from the [PyDriller](https://pydriller.readthedocs.io/) Python package. A `filter` keyword argument, if given, is a predicate that takes a modification (from PyDriller) and returns True if it should be included.", "_____no_output_____" ], [ "In a change counter, all elements in the repository are represented as _nodes_ – tuples $(f_1, f_2, ..., f_n)$ that denote a _hierarchy_: Each $f_i$ is a directory holding $f_{i+1}$, with $f_n$ being the actual file.\n\nA `change_counter` provides a number of attributes. `changes` is a mapping of nodes to the number of changes in that node:", "_____no_output_____" ] ], [ [ "change_counter.changes.get(('README.md',), None)", "_____no_output_____" ] ], [ [ "The `messages` attribute holds all commit messages related to that node:", "_____no_output_____" ] ], [ [ "change_counter.messages.get(('README.md',), None)", "_____no_output_____" ] ], [ [ "The `sizes` attribute holds the (last) size of the respective element:", "_____no_output_____" ] ], [ [ "change_counter.sizes.get(('README.md',), None)", "_____no_output_____" ] ], [ [ "`FineChangeCounter` acts like `ChangeCounter`, but also retrieves statistics for elements _within_ the respective files; it has been tested for C, Python, and Jupyter Notebooks and should provide sufficient results for programming languages with similar syntax.", "_____no_output_____" ], [ "The `map()` method of `ChangeCounter` and `FineChangeCounter` produces an interactive tree map that allows to explore the elements of a repository. The redder (darker) a rectangle, the more changes it has seen; the larger a rectangle, the larger its size in bytes.", "_____no_output_____" ] ], [ [ "fine_change_counter.map()", "_____no_output_____" ] ], [ [ "The included classes offer several methods that can be overridden in subclasses to customize what to mine and how to visualize it. See the chapter for details.", "_____no_output_____" ], [ "Here are all the classes defined in this chapter:", "_____no_output_____" ] ], [ [ "# ignore\nfrom ClassDiagram import display_class_hierarchy", "_____no_output_____" ], [ "# ignore\ndisplay_class_hierarchy([FineChangeCounter, FixCounter],\n public_methods=[\n ChangeCounter.__init__,\n ChangeCounter.map\n ],\n project='debuggingbook')", "_____no_output_____" ] ], [ [ "## Lessons Learned\n\n* We can easily _mine_ past changes and map these to individual files and elements\n* This information can be helpful in guiding the debugging and development process\n* Counting _fixes_ needs to be customized to the conventions used in the project at hand", "_____no_output_____" ], [ "## Background\n\nBefore you venture out building your own development analytics, you may want to check out what's already there and available. This [overview at Livable Software](https://livablesoftware.com/tools-mine-analyze-github-git-software-data/) gives a great overview of platforms and tools for mining development data. One of the most ambitious ones is [GrimoireLab](https://chaoss.github.io/grimoirelab/), set to mine data from pretty much _any_ open source repository.", "_____no_output_____" ], [ "## Exercises\n", "_____no_output_____" ], [ "### Exercise 1: Fine-Grained Fixes\n\nConstruct a class `FineFixCounter` that combines fine-grained counting from `FineChangeCounter` with the fix-counting abilities from `FixCounter`. Visualize the treemap.", "_____no_output_____" ], [ "**Solution.** We can use multiple inheritance to combine both. We inherit from `FixCounter` first such that we can see the fix messages.", "_____no_output_____" ] ], [ [ "class FineFixCounter(FixCounter, FineChangeCounter):\n pass", "_____no_output_____" ] ], [ [ "Try things out for yourself:", "_____no_output_____" ] ], [ [ "# fine_fix_counter = debuggingbook_change_counter(FineFixCounter, start_date=datetime(1999, 1, 1))\n# fine_fix_counter.map()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e71a65314ec924a8c53ca5b014fcdf7d3cef299f
10,664
ipynb
Jupyter Notebook
notebooks/Training.ipynb
DOsinga/reinforced_artificial_life
35efd9a64ac52ee470b219e277eecd2c5a722bfb
[ "Apache-2.0" ]
1
2019-12-05T11:40:52.000Z
2019-12-05T11:40:52.000Z
notebooks/Training.ipynb
DOsinga/reinforced_artificial_life
35efd9a64ac52ee470b219e277eecd2c5a722bfb
[ "Apache-2.0" ]
19
2018-10-27T20:41:24.000Z
2019-02-21T17:49:20.000Z
notebooks/Training.ipynb
DOsinga/reinforced_artificial_life
35efd9a64ac52ee470b219e277eecd2c5a722bfb
[ "Apache-2.0" ]
2
2018-10-28T18:52:48.000Z
2020-08-20T15:06:35.000Z
24.126697
99
0.428451
[ [ [ "import sys\nif not '..' in sys.path:\n sys.path.append('..')\nfrom simplegrid.cow import Action\nimport numpy as np\nimport random\n\nfrom unittest.mock import MagicMock\nfrom shared.experiment_settings import ExperimentSettings\nfrom simplegrid.deep_cow import DeepCow\nfrom simplegrid.dqn_agent import DQNAgent\nfrom simplegrid.world import World as World, MapFeature", "_____no_output_____" ], [ "settings = ExperimentSettings('')\nsettings.world_size = 5\nsettings.start_num_creatures = 0\nsettings.layers = [12]\nDeepCow.agent = None\ndeepcow = DeepCow(x=2, y=2, energy=100, settings=settings)\nworld = World(settings, MagicMock())", "_____no_output_____" ], [ "def training_record(world, cow, grass_fraction=0.25, water_fraction=0.10):\n world.reset(MagicMock(), grass_fraction=grass_fraction, water_fraction=water_fraction)\n cow.x = 2\n cow.y = 2\n world.add_new_creature(cow)\n observation = world.get_observation(cow)\n action = cow.step(observation)\n new_creature, reward, done = world.process_action(cow, action)\n if done:\n next_state = None\n else:\n next_observation = world.get_observation(cow)\n next_state = cow.to_internal_state(next_observation)\n return cow.state, cow.action_idx, reward, next_state\n\ntraining_record(world, deepcow)", "_____no_output_____" ], [ "def run_scenario(scenario, world, cow):\n environment = MapFeature.text_scene_to_environment(scenario)\n world.cells = environment\n cow.x = 2\n cow.y = 2\n world.add_new_creature(cow)\n observation = world.get_observation(cow)\n state = cow.to_internal_state(observation)\n act_values = cow.agent.predict(state)\n action_index = np.argmax(act_values[0])\n return [(Action(action_index + 1), act_values[0][action_index])\n for action_index in np.argsort(act_values[0])]\n\nrun_scenario('.....\\n'\n '.....\\n'\n '.....\\n'\n '.....\\n'\n '..~..\\n',\n world, deepcow\n )", "_____no_output_____" ], [ "def score(world, deepcow):\n score = 0\n for i in range(1000):\n state, action, reward, next_state = training_record(world, deepcow)\n if next_state is None:\n score -= 10\n else:\n score += reward\n return score / 1000\n\nscore(world, deepcow)", "_____no_output_____" ], [ "records = [training_record(world, deepcow) for _ in range(200000)]\nrecords[0]", "_____no_output_____" ], [ "agent = deepcow.agent\nfor rec in records:\n agent.remember(*rec)", "_____no_output_____" ], [ "for epoch in range(10):\n for __ in range(len(records) // agent.batch_size):\n loss = agent.replay()\n print(epoch, loss)", "0 0.1081507563086537\n1 0.04180942660896109\n2 0.03738071905293813\n3 0.05667696514477333\n4 0.06424152853433043\n5 0.07367683884222062\n6 0.03925972802098843\n7 0.07053790832093607\n8 0.04638871860612806\n9 0.055363988130314595\n" ], [ "score(world, deepcow)", "_____no_output_____" ], [ "run_scenario('.....\\n'\n '.....\\n'\n '.....\\n'\n '..~..\\n'\n '.....\\n',\n world, deepcow\n )", "_____no_output_____" ], [ "run_scenario('.....\\n'\n '.....\\n'\n '.....\\n'\n '.....\\n'\n '..#..\\n',\n world, deepcow\n )", "_____no_output_____" ], [ "run_scenario('#....\\n'\n '.....\\n'\n '.....\\n'\n '.....\\n'\n '.....\\n',\n world, deepcow\n )", "_____no_output_____" ], [ "run_scenario('.....\\n'\n '.....\\n'\n '#~...\\n'\n '.....\\n'\n '.....\\n',\n world, deepcow\n )", "_____no_output_____" ], [ "run_scenario('.....\\n'\n '#~...\\n'\n '.....\\n'\n '.....\\n'\n '.....\\n',\n world, deepcow\n )", "_____no_output_____" ], [ "run_scenario('.....\\n'\n '.#~..\\n'\n '.....\\n'\n '.....\\n'\n '.....\\n',\n world, deepcow\n )", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e71a67527a4465d8571a5c19dd9d879b7a54a734
386,304
ipynb
Jupyter Notebook
Tissue_DNA-FISH/20201201-single_fov_P-brain_before_clearing_IgH.ipynb
shiwei23/Chromatin_Analysis_Scripts
909b9b81de8fcf04dd4c39ac21a84864ce2003ff
[ "MIT" ]
null
null
null
Tissue_DNA-FISH/20201201-single_fov_P-brain_before_clearing_IgH.ipynb
shiwei23/Chromatin_Analysis_Scripts
909b9b81de8fcf04dd4c39ac21a84864ce2003ff
[ "MIT" ]
null
null
null
Tissue_DNA-FISH/20201201-single_fov_P-brain_before_clearing_IgH.ipynb
shiwei23/Chromatin_Analysis_Scripts
909b9b81de8fcf04dd4c39ac21a84864ce2003ff
[ "MIT" ]
null
null
null
220.36737
118,237
0.889401
[ [ [ "%run \"E:\\Users\\puzheng\\Documents\\Startup_py3.py\"\nsys.path.append(r\"E:\\Users\\puzheng\\Documents\")\n\nimport ImageAnalysis3 as ia\n%matplotlib notebook\n\nfrom ImageAnalysis3 import *\nprint(os.getpid())", "31900\n" ] ], [ [ "# 0. required packages for h5py", "_____no_output_____" ] ], [ [ "import h5py\nfrom ImageAnalysis3.classes import _allowed_kwds\nimport ast", "_____no_output_____" ] ], [ [ "# 1. Create field-of-view class", "_____no_output_____" ] ], [ [ "reload(ia)\nreload(classes)\nreload(classes.batch_functions)\nreload(classes.field_of_view)\nreload(io_tools.load)\n\nreload(visual_tools)\nreload(ia.correction_tools)\nreload(ia.correction_tools.alignment)\nreload(ia.spot_tools.matching)\nreload(ia.segmentation_tools.chromosome)\nreload(ia.spot_tools.fitting)\n\nfov_param = {'data_folder':r'\\\\10.245.74.158\\Chromatin_NAS_5\\20201127-NOAcr_CTP-08_E14_brain_no_clearing\\Before_clearing',\n 'save_folder':r'D:\\Pu_Temp\\20201127_IgH_P-brain\\before_clearing',\n #'save_folder':r'D:\\Pu_Temp\\202009_IgH_proB_DMSO_2color',\n 'experiment_type': 'DNA',\n 'num_threads': 12,\n 'correction_folder':r'\\\\10.245.74.158\\Chromatin_NAS_0\\Corrections\\20201204-Corrections_3color_50',\n 'shared_parameters':{\n 'single_im_size':[50,2048,2048],\n 'corr_channels':['750','647','561'],\n 'num_empty_frames': 0, \n 'corr_hot_pixel':True,\n 'corr_Z_shift':False,\n 'min_num_seeds':500,\n 'max_num_seeds': 2500,\n 'spot_seeding_th':150,\n 'normalize_intensity_local':False,\n 'normalize_intensity_background':False,\n }, \n }", "_____no_output_____" ], [ "sel_fov_id = 6\nfov = classes.field_of_view.Field_of_View(fov_param, _fov_id=sel_fov_id,\n _color_info_kwargs={\n '_color_filename':'Color_Usage',\n }, \n _prioritize_saved_attrs=False,\n )", "Get Folder Names: (ia.get_img_info.get_folders)\n- Number of folders: 8\n- Number of field of views: 60\n- Importing csv file: \\\\10.245.74.158\\Chromatin_NAS_5\\20201127-NOAcr_CTP-08_E14_brain_no_clearing\\Before_clearing\\Analysis\\Color_Usage.csv\n- header: ['Hyb', '750', '647', '561', '488', '405']\n-- Hyb H0R0 exists in this data\n-- DAPI exists in hyb: H0R0\n- 8 folders are found according to color-usage annotation.\n+ loading fov_info from file: D:\\Pu_Temp\\20201127_IgH_P-brain\\before_clearing\\Conv_zscan_06.hdf5\n++ base attributes loaded:['cand_chrom_coords', 'chrom_coords', 'chrom_im', 'ref_im'] in 3.619s.\n+ loading correction from file: D:\\Pu_Temp\\20201127_IgH_P-brain\\before_clearing\\Conv_zscan_06.hdf5\n++ load bleed correction profile directly from savefile.\n++ load chromatic correction profile directly from savefile.\n++ load chromatic_constants correction profile directly from savefile.\n++ load illumination correction profile directly from savefile.\n+ loading segmentation from file: D:\\Pu_Temp\\20201127_IgH_P-brain\\before_clearing\\Conv_zscan_06.hdf5\n++ base attributes loaded:[] in 0.001s.\n-- saving fov_info to file: D:\\Pu_Temp\\20201127_IgH_P-brain\\before_clearing\\Conv_zscan_06.hdf5\n++ base attributes saved:['analysis_folder', 'annotated_folders', 'bead_channel_index', 'cand_chrom_coords', 'channels', 'chrom_coords', 'chrom_im', 'color_dic', 'color_filename', 'color_format', 'correction_folder', 'dapi_channel_index', 'data_folder', 'drift', 'drift_filename', 'drift_folder', 'experiment_folder', 'folders', 'fov_id', 'fov_name', 'map_folder', 'num_threads', 'ref_filename', 'ref_id', 'ref_im', 'rotation', 'save_filename', 'save_folder', 'segmentation_dim', 'segmentation_folder', 'shared_parameters', 'use_dapi'] in 11.283s.\n" ] ], [ [ "## 2. Process image into candidate spots", "_____no_output_____" ] ], [ [ "reload(io_tools.load)\nreload(spot_tools.fitting)\nreload(correction_tools.chromatic)\nreload(classes.batch_functions)\n\n# process image into spots\nid_list, spot_list = fov._process_image_to_spots('unique', \n #_sel_ids=np.arange(41,47),\n _load_common_reference=True,\n _load_with_multiple=False,\n _save_images=True,\n _warp_images=False, \n _overwrite_drift=False,\n _overwrite_image=False,\n _overwrite_spot=False,\n _verbose=True)", "-- No folder selected, allow processing all 8 folders\n-- checking unique, region:[45 46] in 0.004s.\n-- checking unique, region:[41 42 43] in 0.004s.\n-- checking unique, region:[66 67 68] in 0.004s.\n-- checking unique, region:[69 70 71] in 0.005s.\n-- checking unique, region:[72 73 74] in 0.004s.\n-- checking unique, region:[75 76 77] in 0.004s.\n-- checking unique, region:[78 79 80] in 0.004s.\n-- checking unique, region:[81 82 83] in 0.004s.\n+ Start multi-processing of pre-processing for 8 images with 12 threads\n++ processed unique ids: [41 42 43 45 46 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83] in 634.93s.\n" ] ], [ [ "# 3. Find chromosomes", "_____no_output_____" ], [ "## 3.1 load chromosome image", "_____no_output_____" ] ], [ [ "chrom_im = fov._load_chromosome_image(_type='forward', _overwrite=False)", "-- choose chrom images from folder: \\.\n- correct the whole fov for image: \\\\10.245.74.158\\Chromatin_NAS_5\\20201127-NOAcr_CTP-08_E14_brain_no_clearing\\Before_clearing\\H0R0\\Conv_zscan_06.dax\n-- loading illumination correction profile from file:\n\t 750 illumination_correction_750_2048x2048.npy\n-- loading chromatic correction profile from file:\n\t 750 chromatic_correction_750_647_50_2048_2048.npy\n\t 647 None\n\t 561 chromatic_correction_561_647_50_2048_2048.npy\n-- loading image from file:\\\\10.245.74.158\\Chromatin_NAS_5\\20201127-NOAcr_CTP-08_E14_brain_no_clearing\\Before_clearing\\H0R0\\Conv_zscan_06.dax in 14.716s\n-- removing hot pixels for channels:['750'] in 9.967s\n-- illumination correction for channels: 750, in 2.286s\n-- warp image with chromatic correction for channels: ['750'] and drift:[0. 0. 0.] 750, in 132.617s\n-- finish correction in 221.216s\n-- chromosome image has drift: [0. 0. 0.]\n-- saving fov_info to file: D:\\Pu_Temp\\20201127_IgH_P-brain\\before_clearing\\Conv_zscan_06.hdf5\n++ base attributes saved:['chrom_im'] in 5.629s.\n" ], [ "# visualize chromsome image:\nvisual_tools.imshow_mark_3d_v2([fov.chrom_im])", "_____no_output_____" ] ], [ [ "## 3.2 find candidate chromosomes", "_____no_output_____" ] ], [ [ "chrom_coords = fov._find_candidate_chromosomes_by_segmentation(_filt_size=4,\n _binary_per_th=99.7, \n _morphology_size=2,\n _overwrite=True)", "-- adjust seed image with filter size=4\n-- binarize image with threshold: 99.7%\n-- erosion and dialation with size=2.\n-- find close objects.\n-- random walk segmentation, beta=10.\n" ] ], [ [ "## 3.3 select among candidate chromosomes", "_____no_output_____" ] ], [ [ "chrom_coords = fov._select_chromosome_by_candidate_spots(_good_chr_loss_th=0.5,\n _cand_spot_intensity_th=0.5,\n _save=True, \n _overwrite=True)", "+ loading unique from file: D:\\Pu_Temp\\20201127_IgH_P-brain\\before_clearing\\Conv_zscan_06.hdf5\n++ finish loading unique in 0.042s. \n- start select from 929 chromosomes with loss threshold=0.5\n-- remove chr id 1, percentage of lost rounds:0.913.\n-- remove chr id 375, percentage of lost rounds:0.739.\n-- remove chr id 554, percentage of lost rounds:0.696.\n-- remove chr id 712, percentage of lost rounds:0.609.\n-- remove chr id 55, percentage of lost rounds:0.565.\n-- remove chr id 348, percentage of lost rounds:0.565.\n-- remove chr id 755, percentage of lost rounds:0.565.\n-- remove chr id 836, percentage of lost rounds:0.565.\n-- remove chr id 33, percentage of lost rounds:0.522.\n-- remove chr id 598, percentage of lost rounds:0.522.\n-- remove chr id 772, percentage of lost rounds:0.522.\n-- 918 chromosomes are kept.\n-- saving fov_info to file: D:\\Pu_Temp\\20201127_IgH_P-brain\\before_clearing\\Conv_zscan_06.hdf5\n++ base attributes saved:['chrom_coords'] in 0.027s.\n" ] ], [ [ "### visualize chromosomes selections", "_____no_output_____" ] ], [ [ "%matplotlib notebook\n%matplotlib notebook\n## visualize\ncoord_dict = {'coords':[np.flipud(_coord) for _coord in fov.chrom_coords],\n 'class_ids':list(np.zeros(len(fov.chrom_coords),dtype=np.int)),\n }\n\nvisual_tools.imshow_mark_3d_v2([fov.chrom_im], \n given_dic=coord_dict,\n save_file=None,\n )\n", "_____no_output_____" ] ], [ [ "## select spots based on chromosomes", "_____no_output_____" ] ], [ [ "fov._load_from_file('unique')", "+ loading unique from file: D:\\Pu_Temp\\20201127_IgH_P-brain\\before_clearing\\Conv_zscan_06.hdf5\n++ finish loading unique in 0.016s. \n" ], [ "intensity_th = 0.5\nfrom ImageAnalysis3.spot_tools.picking import assign_spots_to_chromosomes\n\nkept_spots_list = []\nfor _spots in fov.unique_spots_list:\n kept_spots_list.append(_spots[_spots[:,0] > intensity_th])\n# finalize candidate spots\ncand_chr_spots_list = [[] for _ct in fov.chrom_coords]\nfor _spots in kept_spots_list:\n _cands_list = assign_spots_to_chromosomes(_spots, fov.chrom_coords)\n for _i, _cands in enumerate(_cands_list):\n cand_chr_spots_list[_i].append(_cands)\nprint(f\"kept chromosomes: {len(fov.chrom_coords)}\")", "kept chromosomes: 918\n" ], [ "reload(spot_tools.picking)\nfrom ImageAnalysis3.spot_tools.picking import convert_spots_to_hzxys\n\ndna_cand_hzxys_list = [convert_spots_to_hzxys(_spots, fov.shared_parameters['distance_zxy'])\n for _spots in cand_chr_spots_list]\ndna_reg_ids = fov.unique_ids", "_____no_output_____" ], [ "# select_hzxys close to the chromosome center\ndist_th = 3000 # upper limit is 5000nm\nsel_dna_cand_hzxys_list = []\nfor _cand_hzxys, _chrom_coord in zip(dna_cand_hzxys_list, fov.chrom_coords):\n _sel_cands_list = []\n \n for _cands in _cand_hzxys:\n if len(_cands) == 0:\n _sel_cands_list.append([])\n else:\n _dists = np.linalg.norm(_cands[:,1:4] - _chrom_coord*np.array([200,108,108]), axis=1)\n _sel_cands_list.append(_cands[_dists < dist_th])\n \n # append\n sel_dna_cand_hzxys_list.append(_sel_cands_list)", "_____no_output_____" ] ], [ [ "### EM pick spots", "_____no_output_____" ] ], [ [ "reload(ia.spot_tools.picking)\n# load functions\nfrom ImageAnalysis3.spot_tools.picking import Pick_spots_by_intensity, EM_pick_scores_in_population, generate_reference_from_population,evaluate_differences\n\n%matplotlib inline\nniter= 10\nnkeep = len(sel_dna_cand_hzxys_list)\nnum_threads = 12\n# initialize\ninit_dna_hzxys = Pick_spots_by_intensity(sel_dna_cand_hzxys_list[:nkeep])\n# set save list\nsel_dna_hzxys_list, sel_dna_scores_list, all_dna_scores_list = [init_dna_hzxys], [], []\nfor _iter in range(niter):\n print(f\"- iter:{_iter}\")\n # generate reference\n ref_ct_dists, ref_local_dists, ref_ints = generate_reference_from_population(\n sel_dna_hzxys_list[-1], dna_reg_ids, \n sel_dna_hzxys_list[-1][:nkeep], dna_reg_ids,\n num_threads=num_threads,\n collapse_regions=True,\n )\n plt.figure(figsize=(4,2))\n plt.hist(np.ravel(ref_ints), bins=np.arange(0,20,0.5))\n plt.figure(figsize=(4,2))\n plt.hist(np.ravel(ref_ct_dists), bins=np.arange(0,5000,100))\n plt.figure(figsize=(4,2))\n plt.hist(np.ravel(ref_local_dists), bins=np.arange(0,5000,100))\n plt.show()\n # scoring\n sel_hzxys, sel_scores, all_scores = EM_pick_scores_in_population(\n sel_dna_cand_hzxys_list[:nkeep], dna_reg_ids, sel_dna_hzxys_list[-1], \n ref_ct_dists, ref_local_dists, ref_ints,\n sel_dna_hzxys_list[-1], dna_reg_ids, num_threads=num_threads,\n )\n update_rate = evaluate_differences(sel_hzxys, sel_dna_hzxys_list[-1])\n print(f\"-- region kept: {update_rate:.4f}\")\n sel_dna_hzxys_list.append(sel_hzxys)\n sel_dna_scores_list.append(sel_scores)\n all_dna_scores_list.append(all_scores)\n \n if update_rate > 0.99:\n break", "- iter:0\n- generate reference metrics from picked chrs.\n-- multiprocessing process references with 12 threads, in 9.704s\n-- collapse all regions into 1d.\n" ], [ "from scipy.spatial.distance import pdist, squareform\nsel_iter = -1\n\nfinal_dna_hzxys_list = []\ndistmap_list = []\nscore_th = np.exp(-6)\nbad_spot_percentage = 0.6\nfor _hzxys, _scores in zip(sel_dna_hzxys_list[sel_iter], sel_dna_scores_list[sel_iter]):\n _kept_hzxys = np.array(_hzxys).copy()\n _kept_hzxys[_scores < score_th] = np.nan\n if np.mean(np.isnan(_kept_hzxys).sum(1)>0)<bad_spot_percentage:\n final_dna_hzxys_list.append(_kept_hzxys)\n distmap_list.append(squareform(pdist(_kept_hzxys[:,1:4])))\n\ndistmap_list = np.array(distmap_list)\nmedian_distmap = np.nanmedian(distmap_list, axis=0)", "_____no_output_____" ], [ "loss_rates = np.mean(np.sum(np.isnan(final_dna_hzxys_list), axis=2)>0, axis=0)\nfig, ax = plt.subplots(figsize=(4,2),dpi=200)\nax.plot(loss_rates, '.-')\n#ax.set_xticks(np.arange(0,150,20))\nplt.show()", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize=(4,3),dpi=200)\nax = ia.figure_tools.distmap.plot_distance_map(median_distmap,\n #median_distmap[kept_inds][:,kept_inds], \n color_limits=[0,800],\n ax=ax,\n ticks=np.arange(0,150,20), \n figure_dpi=200)\nax.set_title(f\"before clearing, n={len(distmap_list)}\", fontsize=7.5)\nplt.gcf().subplots_adjust(bottom=0.1)\nplt.show()", "_____no_output_____" ], [ "plt.figure()\nfor _reg_id in range(3):\n plt.hist(fov.unique_spots_list[_reg_id][:,0], bins=np.arange(0,4000,40), \n label=f\"{_reg_id}\", alpha=0.5)\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "plt.figure()\nfor _reg_id in range(3):\n plt.hist(fov.unique_spots_list[_reg_id][:,4], bins=np.arange(0,4000,40), \n label=f\"{_reg_id}\", alpha=0.5)\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "sel_ids = []\nfor _fd, _infos in fov.color_dic.items():\n for _info in _infos[:2]:\n if _info[0] == 'u':\n sel_ids.append(int(_info[1:]))\nsel_ids = np.array(sel_ids)", "_____no_output_____" ], [ "np.sort(sel_ids)", "_____no_output_____" ], [ "sel_inds = np.sort([np.where(_id==fov.unique_ids)[0][0] for _id in sel_ids])", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize=(4,3),dpi=200)\nax = ia.figure_tools.distmap.plot_distance_map(#median_distmap,\n median_distmap[sel_inds][:,sel_inds], \n color_limits=[0,800],\n ax=ax,\n ticks=np.arange(0,150,20), \n figure_dpi=200)\nax.set_title(f\"before clearing, n={len(distmap_list)}\", fontsize=7.5)\nplt.gcf().subplots_adjust(bottom=0.1)\nplt.show()", "_____no_output_____" ], [ "fov.unique_spots_list[0].shape", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e71a6a67859e6d9f6967cbe9a37d83fd318060fe
86,283
ipynb
Jupyter Notebook
notebooks_en/3_Lead_in_Lipstick.ipynb
UMD-ENMA165/EngComp2_takeoff
8a2a06b8e91be39ba96b2995d2d2c423ab0802fa
[ "BSD-3-Clause" ]
8
2018-11-19T18:32:43.000Z
2021-11-11T21:43:38.000Z
notebooks_en/3_Lead_in_Lipstick.ipynb
UMD-ENMA165/EngComp2_takeoff
8a2a06b8e91be39ba96b2995d2d2c423ab0802fa
[ "BSD-3-Clause" ]
4
2018-04-16T18:56:38.000Z
2021-06-01T22:17:24.000Z
notebooks_en/3_Lead_in_Lipstick.ipynb
UMD-ENMA165/EngComp2_takeoff
8a2a06b8e91be39ba96b2995d2d2c423ab0802fa
[ "BSD-3-Clause" ]
12
2018-04-15T16:04:21.000Z
2022-03-16T20:39:11.000Z
94.608553
15,106
0.794479
[ [ [ "###### Content under Creative Commons Attribution license CC-BY 4.0, code under BSD 3-Clause License © 2017 L.A. Barba, N.C. Clementi", "_____no_output_____" ], [ "# Lead in Lipstick\n\nAfter completing [Lesson 1](http://go.gwu.edu/engcomp2lesson1) and [Lesson 2](http://go.gwu.edu/engcomp2lesson2) of \"Take off with stats,\" Module 2 of our course in _Engineering Computations_, here we'll work out a full example of what you can do with all that you've learned.\n\nThis example is based on the lecture by Prof. Kristin Sainani at Stanford, [\"Exploring real data: lead in lipstick,\"](https://youtu.be/nlKIT-_b2jU) of her online course [\"Statistics in Medicine,\"](https://lagunita.stanford.edu/courses/Medicine/MedStats-SP/SelfPaced/about). We followed along her narration, searched online for the sources she cited and the data from the FDA studies, and worked out the descriptive statistics using Python. We hope you'll enjoy it!", "_____no_output_____" ], [ "## In the news\n\nIn 2007, some alarming reports appeared in the media: a US consumer-rights group had tested 33 brand-name lipsticks, and found that 61% had detectable lead levels of 0.03 to 0.65 parts per million (ppm). A full one-third of the lipsticks tested exceeded the lead level set by the US Food and Drug Administration (FDA) as the limit for candy: 0.1 ppm. Here are some media reports:\n\n* Reuters published on Oct. 12, 2007: [Lipsticks contain lead, consumer group says](https://www.reuters.com/article/us-lipstick-lead/lipsticks-contain-lead-consumer-group-says-idUSN1140964520071012)—it quotes a doctor as saying: \"Lead builds up in the body over time and lead-containing lipstick applied several times a day, every day, can add up to significant exposure levels.\"\n* CTV.ca News published [FDA to examine claim of lead levels in lipstick](http://www.ctvnews.ca/fda-to-examine-claim-of-lead-levels-in-lipstick-1.259946)—it quoted one member of the Campaign for Safe Cosmetics as saying: \"We want the companies to immediately re-formulate their products to get the lead out and ultimately, really we need to change the laws and force these companies to be accountable to women's health.\"\n* The New York Times was more measured in [The Claim: Some Red Lipstick Brands Contain High Lead Levels](http://www.nytimes.com/2007/11/13/health/13real.html) (Nov. 13, 2007), concluding: \"Studies have found that lead in lipstick is not a cause for concern, but research is continuing.\"\n\nThe FDA did carry out new studies in 2009 and 2012 to try to determine if lead content was a concern for lipstick users. These new studies generated some new scary headlines!\n\n* On the Washington Post: [400 lipsticks found to contain lead, FDA says](https://www.washingtonpost.com/business/economy/400-lipstick-brands-contain-lead-fda-says/2012/02/14/gIQAhOyeDR_story.html?utm_term=.e3622592e0e7)—the FDA is quoted as stating \"We do not consider the lead levels we found in the lipsticks to be a safety concern…\"\n* In Time Magazine: [What’s in Your Lipstick? FDA Finds Lead in 400 Shades](http://healthland.time.com/2012/02/15/whats-in-your-lipstick-fda-finds-lead-in-400-shades/)—a campaigner is quoted as saying: \"We want to see the FDA recommend a limit based on the lowest level a company can achieve, like candy manufacturers are required.\"\n\nShould lipstick users be concerned? Let's fact-check those scary headlines using our stats chops with Python!", "_____no_output_____" ], [ "## The FDA studies\n\nWe located a web page of the US Food and Drug Administration, titled [Limiting Lead in Lipstick and Other Cosmetics](https://www.fda.gov/cosmetics/productsingredients/products/ucm137224.htm#reference1), that describes their efforts to assess the safety concerns from lead impurities in cosmetics. The web page includes data tables for the initial study in 2009, with 22 lipsticks, and the expanded study in 2012, with 400 lipsticks. \n\nWe copied these tables from the web page and created CSV files with the data. If you have a clone of all our lesson files, you already have the data. But if you downloaded this notebook on its own, you may need to get the data separately. See the Note below.\n\nLet's begin by loading our Python libraries for data analysis: `numpy`, `pandas` and `pyplot`. We'll also load the `rcParams` module for setting Matplotlib's plotting parameters, and set the font family and size to serif 16 points.", "_____no_output_____" ] ], [ [ "import numpy\nimport pandas\nfrom matplotlib import pyplot\n%matplotlib inline\n\n#set font styles\npyplot.rc('font', family='serif', size=16)", "_____no_output_____" ] ], [ [ "##### Note:\n\nWe'll be reading the data from CSV files using `pandas`. If you don't have the data files locally, change the code in the cell below to read the data from the files hosted in our repository:\n\n```Python\nURL = 'http://go.gwu.edu/engcomp2data3a'\nleadlips2009 = pandas.read_csv(URL)\n```", "_____no_output_____" ] ], [ [ "# Load the FDA 2009 data set using pandas, and assign it to a dataframe\nleadlips2009 = pandas.read_csv(\"../data/FDA2009-lipstickdata.csv\")", "_____no_output_____" ] ], [ [ "As always, we take a quick peek at the data, now saved in a `pandas` dataframe named `leadlips2009`, and then we get a view of its distribution by plotting a histograms.", "_____no_output_____" ] ], [ [ "leadlips2009[0:5]", "_____no_output_____" ] ], [ [ "Let's see a histogram of the data column containing the lead content. It's easy with `pandas`, because we can use the column label as a plot argument.", "_____no_output_____" ] ], [ [ "leadlips2009.hist(column='Pb ppm', bins=4, edgecolor='white')\npyplot.title('Lead levels in lipstick, n=22 (2009) \\n');", "_____no_output_____" ] ], [ [ "Above, we used the built-in plotting capability of `pandas`. Just for kicks, let's get the same plot but using `pyplot` directly. To do that, remember that we need the data in a NumPy array, for which we use the `Series.values` method.", "_____no_output_____" ] ], [ [ "lead2009 = leadlips2009['Pb ppm'].values", "_____no_output_____" ], [ "pyplot.figure(figsize=(6,4))\npyplot.hist(lead2009, bins=4, color='#3498db', histtype='bar', edgecolor='white') \npyplot.title('Lead levels in lipstick, n=22 (2009) \\n')\npyplot.xlabel('ppm')\npyplot.ylabel('Count');", "_____no_output_____" ] ], [ [ "The histograms look the same, except for style. If you are following along with Sainani's lecture, however, you'll note some differences. We confirm that the data is the same by getting the descriptive statistics shown 4-min into the video:", "_____no_output_____" ] ], [ [ "print('The mean value is {:.2f}'.format(leadlips2009['Pb ppm'].mean()))\nprint('The median is {:.2f}'.format(leadlips2009['Pb ppm'].median()))\nprint('The standard deviation is {:.2f}'.format(leadlips2009['Pb ppm'].std()))\nprint('The maximum value is {:.2f}'.format(leadlips2009['Pb ppm'].max()))", "The mean value is 1.07\nThe median is 0.73\nThe standard deviation is 0.96\nThe maximum value is 3.06\n" ] ], [ [ "All of these match the statistics shown in the video. We do see some slight differences in the percentile values, however. Check them out:", "_____no_output_____" ] ], [ [ "print('The 99 percentile is {:.2f}'.format(leadlips2009['Pb ppm'].quantile(.99)))\nprint('The 95 percentile is {:.2f}'.format(leadlips2009['Pb ppm'].quantile(.95)))\nprint('The 90 percentile is {:.2f}'.format(leadlips2009['Pb ppm'].quantile(.90)))\nprint('The 75 percentile is {:.2f}'.format(leadlips2009['Pb ppm'].quantile(.75)))", "The 99 percentile is 3.06\nThe 95 percentile is 3.02\nThe 90 percentile is 2.37\nThe 75 percentile is 1.69\n" ] ], [ [ "##### Challenge question\n\nDespite the small difference in some percentile values from those shown on the video, we do think this is the same data that Sainani uses in her example. Look carefully at the histograms: can you explain the differences? (Play around with the plots here as much as you need to explain it.)", "_____no_output_____" ], [ "Let's load the data for the extended study in 2012. \n\n##### Note:\nIf you don't have the data files locally, change the code in the cell below to read the data from the files hosted in our repository:\n\n```Python\nURL = 'http://go.gwu.edu/engcomp2data3b'\nleadlips2012 = pandas.read_csv(URL)\n```", "_____no_output_____" ] ], [ [ "# Load the FDA 2012 data set using pandas, and assign it to a dataframe\nleadlips2012 = pandas.read_csv(\"../data/FDA2012-lipstickdata.csv\")", "_____no_output_____" ] ], [ [ "Take a quick peek at the first few rows of the dataframe we just created, and then make a histogram of the column containing the lead values (notice that it has a different label than the previous dataframe).", "_____no_output_____" ] ], [ [ "leadlips2012[0:5]", "_____no_output_____" ], [ "leadlips2012.hist(column='Lead (ppm)', bins=10, edgecolor='white')\npyplot.title('Lead levels in lipstick, n=400 (2012) \\n');", "_____no_output_____" ] ], [ [ "Now, let's get the descriptive statistics for this data set, and confirm that they match with those shown in Dr. Sainani's video. ", "_____no_output_____" ] ], [ [ "print('The mean value is {:.2f}'.format(leadlips2012['Lead (ppm)'].mean()))\nprint('The median is {:.2f}'.format(leadlips2012['Lead (ppm)'].median()))\nprint('The standard deviation is {:.2f}'.format(leadlips2012['Lead (ppm)'].std()))\nprint('The maximum value is {:.2f}'.format(leadlips2012['Lead (ppm)'].max()))", "The mean value is 1.11\nThe median is 0.89\nThe standard deviation is 0.97\nThe maximum value is 7.19\n" ] ], [ [ "The mean value, median, and standard deviation did not change much between the 2009 and 2012 studies, even though the earlier study only tested 22 samples. As Prof. Sainani points out, this goes to show that you can begin to describe a feature even with modest sample sizes.\n\nThe maximum value in the second study was a lot higher: 7.19 compared to 3.06. The reason for seeing this higher maximum value in the later study is that, for a _right skewed_ distribution like this one, there are infrequent occurrences of a higher concentration of lead. These start to be detected with larger sample sizes.\n\nNext, we compute a few percentiles (noticing slight differences with the values shown by Sainani).", "_____no_output_____" ] ], [ [ "print('The 99 percentile is {:.2f}'.format(leadlips2012['Lead (ppm)'].quantile(.99)))\nprint('The 95 percentile is {:.2f}'.format(leadlips2012['Lead (ppm)'].quantile(.95)))\nprint('The 90 percentile is {:.2f}'.format(leadlips2012['Lead (ppm)'].quantile(.90)))\nprint('The 75 percentile is {:.2f}'.format(leadlips2012['Lead (ppm)'].quantile(.75)))", "The 99 percentile is 4.89\nThe 95 percentile is 2.74\nThe 90 percentile is 2.22\nThe 75 percentile is 1.49\n" ] ], [ [ "In the previous lesson, you learned to make box plots using `pyplot`, which requires extracting the values of the data series of interest into NumPy arrays. It turns out, `pandas` can make box plots directly with a column of the dataframe. ", "_____no_output_____" ] ], [ [ "leadlips2012.boxplot(column='Lead (ppm)', figsize=(6,8))\npyplot.title('Lead levels in lipstick, n=400 (2012) \\n');", "_____no_output_____" ] ], [ [ "The box plot also indicates a right skewed distribution, and shows a number of outliers on the high end of the range: some lipsticks have an especially high level of lead.", "_____no_output_____" ], [ "## Lead exposure from lipstick\n\nA European study of exposure to various cosmetic products [Ref. 2] offers some useful statistics about lipstick use. In figure 6, the paper shows a histogram of lipstick applied by the participants in the study. The distribution is right skewed: most users apply a moderate amount of lipstick daily, but there are a few heavy users in the tail of the distribution. The number of participants was 30,000, and the summary statistics are:\n\n* mean value = 24.61 mg/day,\n* median = 17.11 mg/day,\n* minimum = 0.13 mg/day,\n* maximum = 217.53 mg/day\n* 95th percentile = 72.51 mg/day\n\nProf. Sainani suggests the following exercise: suppose that users ingest half of the lipstick they apply daily—seems like a conservative estimate, given that some lipstick will end up on cups, napkins, and (as Sainani amusingly points out) other people. We'd like to calculate:\n\n1. the typical lead exposure from lipstick, using the medians\n2. the highest daily lead exposure from lipstick, using the maxima\n\nFrom the 2012 FDA study of lead in lipstick: the median is 0.89 ppm (µg/g) and the maximum is 7.19 ppm. From the European study on exposure to cosmetics, the median daily usage of lipstick is 17.11 mg, and the maximum is 217.53. Now… keep your units straight!\n\n$ 1 \\mu\\text{g} = 10^{-3} \\text{mg} = 10^{-6} \\text{g}$", "_____no_output_____" ] ], [ [ "# Typical user: 0.89 µg/g * 17.11 mg/day (divide by 1000 to get µg)\nprint('The typical daily exposure to lead from lipstick is {:.4f} µg/day.'.format(0.89 *17.11/1000))\nprint('Half of this amount is ingested: {:.4f} µg/day.'.format(0.89 *17.11/1000/2))", "The typical daily exposure to lead from lipstick is 0.0152 µg/day.\nHalf of this amount is ingested: 0.0076 µg/day.\n" ], [ "# Maximum usage: 7.19 µg/g * 217.53 mg/day / 1000 to get µg\nprint('The maximum daily exposure to lead from lipstick is {:.2f} µg/day.'.format(7.19 *217.53/1000))\nprint('Half of this amount is ingested: {:.2f} µg/day.'.format(7.19 *217.53/1000/2))", "The maximum daily exposure to lead from lipstick is 1.56 µg/day.\nHalf of this amount is ingested: 0.78 µg/day.\n" ] ], [ [ "The maximum daily exposure is 100 times larger than the typical exposure, based on the median. Note that this maximum occurs for one user over 30,000 (the size of the study sample), and one lipstick over 400—so it's a chance of one in 12 million!", "_____no_output_____" ], [ "## Is this bad?\n\nThe US Food and Drug Administration provides a recommended _maximum_ lead level of 0.1 ppm in candy to be consumed by small children [3]. But most food products are well below the maximum. \nFor example, the FDA data on 40 samples of milk chocolate in the years 1991–2002 showed a mean lead level of 0.025 ppm [4]. That's of course much lower than the concentration of lead in lipstick, but the _consumption_ of chocolate is much higher! Forbes reported that the average American eats about 9.5 lbs (4.3 kg) of chocolate each year [6].", "_____no_output_____" ] ], [ [ "print('The average American consumes {:.1f} grams of chocolate per day.'.format(4.3*1000/365))\nprint('This amounts to {:.2f} µg of lead exposure from chocolate (mean of FDA data).'.format(4.3*1000/365*0.025))", "The average American consumes 11.8 grams of chocolate per day.\nThis amounts to 0.29 µg of lead exposure from chocolate (mean of FDA data).\n" ] ], [ [ "Compared to the median exposure to lead from lipstick of 0.0076 µg per day, the exposure from chocolate is almost 40 times higher!\n\nClearly the consumer group that generated all those headlines was scaremongering. And now you have the tools to fact-check many of those scary health-related \"fake news.\"", "_____no_output_____" ], [ "## References\n\n1. [Limiting Lead in Lipstick and Other Cosmetics](https://www.fda.gov/cosmetics/productsingredients/products/ucm137224.htm#reference1), US Food and Drug Administration.\n2. European consumer exposure to cosmetic products, a framework for conducting population exposure assessments (2007). Hall, B., et al., _Food and Chemical Toxicology_ **45**(11): 2097-2108. [Available on PubMed.](https://www.ncbi.nlm.nih.gov/pubmed/17683841)\n3. US FDA Guidance for Industry: [Lead in Candy Likely To Be Consumed Frequently by Small Children: Recommended Maximum Level and Enforcement Policy](https://www.fda.gov/Food/GuidanceRegulation/GuidanceDocumentsRegulatoryInformation/ucm077904.htm) (2005, revised 2006).\n4. US FDA [Supporting Document for Recommended Maximum Level for Lead in Candy Likely To Be Consumed Frequently by Small Children](https://www.fda.gov/food/foodborneillnesscontaminants/metals/ucm172050.htm#lead) (2006).\n5. [The World's Biggest Chocolate Consumers](https://www.forbes.com/sites/niallmccarthy/2015/07/22/the-worlds-biggest-chocolate-consumers-infographic/#5399969b4484), Forbes, July 22nd 2015.", "_____no_output_____" ], [ "### Recommended viewing\n\nThis lesson was based on the followign lecture from [\"Statistics in Medicine,\"](https://lagunita.stanford.edu/courses/Medicine/MedStats-SP/SelfPaced/about), a free course in Stanford Online by Prof. Kristin Sainani:\n* [Exploring real data: lead in lipstick](https://youtu.be/nlKIT-_b2jU)", "_____no_output_____" ] ], [ [ "# Execute this cell to load the notebook's style sheet, then ignore it\nfrom IPython.core.display import HTML\ncss_file = '../style/custom.css'\nHTML(open(css_file, \"r\").read())", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ] ]
e71a6ed1b73efed9e8d70a623004b28994b58b79
8,483
ipynb
Jupyter Notebook
sandbox/RipplesInTheCosmos.ipynb
gnizq64/DESI-HighSchool
f9a8430b536acda8d6720792e982d603dc289a58
[ "BSD-3-Clause" ]
null
null
null
sandbox/RipplesInTheCosmos.ipynb
gnizq64/DESI-HighSchool
f9a8430b536acda8d6720792e982d603dc289a58
[ "BSD-3-Clause" ]
null
null
null
sandbox/RipplesInTheCosmos.ipynb
gnizq64/DESI-HighSchool
f9a8430b536acda8d6720792e982d603dc289a58
[ "BSD-3-Clause" ]
null
null
null
59.739437
5,045
0.799835
[ [ [ "import numpy as np\nimport astropy.io.fits as fits\n\nfrom IPython.display import YouTubeVideo", "_____no_output_____" ] ], [ [ "# Ripples in the Cosmos", "_____no_output_____" ], [ "This notebook will eventually explain how we take the observed angular positions of galaxies and their redshift and extract new conclusions about the nature of Dark Energy with _Baryon Acoustic Oscillations_ and _Redshift-Space Distortions_.", "_____no_output_____" ], [ "For now, it'll simply motivate that we can use the observed color of each galaxies to get a rough estimate of their distance from us. With this, we can build 3D maps of the distribution of matter in the Universe. \n\nDESI will be capable of estimating the distance much more precisely than from colors alone, and therefore tell us much more about Dark Energy. But until DESI spends many more nights observing, we'll have to make use of the color-derived map, which looks something like this! ", "_____no_output_____" ] ], [ [ "YouTubeVideo('mlASReWhAjA')", "_____no_output_____" ] ], [ [ "To find out more about recreating this with Blender, check out legacy/tools/Blender.ipynb.", "_____no_output_____" ], [ "To be continued.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e71a7d6dbc16fd4720078aefee9e2d9a4e686f91
7,239
ipynb
Jupyter Notebook
visualisation/sales_data.ipynb
himanshu324/Data-Science
91b0abd237e685c6b2c9c6bb672b38ef13c7ae0b
[ "MIT" ]
null
null
null
visualisation/sales_data.ipynb
himanshu324/Data-Science
91b0abd237e685c6b2c9c6bb672b38ef13c7ae0b
[ "MIT" ]
null
null
null
visualisation/sales_data.ipynb
himanshu324/Data-Science
91b0abd237e685c6b2c9c6bb672b38ef13c7ae0b
[ "MIT" ]
null
null
null
84.174419
5,692
0.872911
[ [ [ "import pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\n%matplotlib inline ", "_____no_output_____" ], [ "sales = pd.read_csv('sales_data.csv',parse_dates=['Date'])", "_____no_output_____" ], [ "sales['Unit_Cost'].plot(kind='box',vert=False,figsize=(14,6))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
e71a829bb3585ac957df37d634217fdb601dc273
4,853
ipynb
Jupyter Notebook
docs/howto/convert-model-serving.ipynb
yonittanenbaum/mlrun
fb3274abb7f5c620f510e32076a65c617092d222
[ "Apache-2.0" ]
null
null
null
docs/howto/convert-model-serving.ipynb
yonittanenbaum/mlrun
fb3274abb7f5c620f510e32076a65c617092d222
[ "Apache-2.0" ]
null
null
null
docs/howto/convert-model-serving.ipynb
yonittanenbaum/mlrun
fb3274abb7f5c620f510e32076a65c617092d222
[ "Apache-2.0" ]
null
null
null
23.673171
123
0.508964
[ [ [ "# Model Serving Function", "_____no_output_____" ] ], [ [ "import mlrun", "_____no_output_____" ], [ "%nuclio cmd -c pip install lightgbm", "_____no_output_____" ], [ "%nuclio config spec.build.baseImage = \"mlrun/mlrun\"\n%nuclio config kind = \"serving\"", "%nuclio: setting spec.build.baseImage to 'mlrun/mlrun'\n%nuclio: setting kind to 'serving'\n" ], [ "import numpy as np\nfrom cloudpickle import load\n\nclass LGBMModel(mlrun.serving.V2ModelServer):\n \n def load(self):\n model_file, extra_data = self.get_model('.pkl')\n self.model = load(open(model_file, 'rb'))\n\n def predict(self, body):\n try:\n feats = np.asarray(body['inputs'])\n result = self.model.predict(feats)\n return result.tolist()\n except Exception as e:\n raise Exception(\"Failed to predict %s\" % e)", "_____no_output_____" ], [ "# nuclio: end-code", "_____no_output_____" ] ], [ [ "## Deploy and Test The Function", "_____no_output_____" ] ], [ [ "models_path = 'https://s3.wasabisys.com/iguazio/models/lightgbm/SampleModel.pkl'", "_____no_output_____" ], [ "fn = mlrun.code_to_function('lightgbm-serving',\n description=\"LightGBM Serving\",\n categories=['serving', 'ml'],\n labels={'author': 'edmondg', 'framework': 'lightgbm'})\nfn.spec.default_class = 'LGBMModel'", "_____no_output_____" ], [ "fn.add_model('nyc-taxi-server', model_path=models_path)", "_____no_output_____" ], [ "# deploy the function\nfn.apply(mlrun.platforms.auto_mount())\naddress = fn.deploy()", "> 2021-01-28 10:56:00,391 [info] Starting remote function deploy\n2021-01-28 10:56:00 (info) Deploying function\n2021-01-28 10:56:00 (info) Building\n2021-01-28 10:56:00 (info) Staging files and preparing base images\n2021-01-28 10:56:00 (info) Building processor image\n2021-01-28 10:56:01 (info) Build complete\n2021-01-28 10:56:09 (info) Function deploy complete\n> 2021-01-28 10:56:10,399 [info] function deployed, address=default-tenant.app.jinkwubtllaf.iguazio-cd1.com:32169\n" ], [ "# test the function\nmy_data = '''{\"inputs\":[[5.1, 3.5, 1.4, 3, 5.1, 3.5, 1.4, 0.2, 5.1, 3.5, 1.4, 0.2, 5.1, 3.5, 1.4, 0.2]]}'''\nfn.invoke('/v2/models/nyc-taxi-server/predict', my_data)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
e71a8bc347211c8716a7f7440c75ab48fca131cc
28,841
ipynb
Jupyter Notebook
01_Getting_&_Knowing_Your_Data/Occupation/Exercise_with_Solution.ipynb
martgn/pandas_exercises
596adaf7bc7c17c009b9f26c9e8156fdf8328259
[ "BSD-3-Clause" ]
null
null
null
01_Getting_&_Knowing_Your_Data/Occupation/Exercise_with_Solution.ipynb
martgn/pandas_exercises
596adaf7bc7c17c009b9f26c9e8156fdf8328259
[ "BSD-3-Clause" ]
null
null
null
01_Getting_&_Knowing_Your_Data/Occupation/Exercise_with_Solution.ipynb
martgn/pandas_exercises
596adaf7bc7c17c009b9f26c9e8156fdf8328259
[ "BSD-3-Clause" ]
null
null
null
25.704991
197
0.352519
[ [ [ "# Ex3 - Getting and Knowing your Data\n\nCheck out [Occupation Exercises Video Tutorial](https://www.youtube.com/watch?v=W8AB5s-L3Rw&list=PLgJhDSE2ZLxaY_DigHeiIDC1cD09rXgJv&index=4) to watch a data scientist go through the exercises", "_____no_output_____" ], [ "This time we are going to pull data directly from the internet.\nSpecial thanks to: https://github.com/justmarkham for sharing the dataset and materials.\n\n### Step 1. Import the necessary libraries", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ] ], [ [ "### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user). ", "_____no_output_____" ], [ "### Step 3. Assign it to a variable called users and use the 'user_id' as index", "_____no_output_____" ] ], [ [ "users = pd.read_csv('https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user', \n sep='|', index_col='user_id')", "_____no_output_____" ] ], [ [ "### Step 4. See the first 25 entries", "_____no_output_____" ] ], [ [ "users.head(25)", "_____no_output_____" ] ], [ [ "### Step 5. See the last 10 entries", "_____no_output_____" ] ], [ [ "users.tail(10)", "_____no_output_____" ] ], [ [ "### Step 6. What is the number of observations in the dataset?", "_____no_output_____" ] ], [ [ "users.shape[0]", "_____no_output_____" ] ], [ [ "### Step 7. What is the number of columns in the dataset?", "_____no_output_____" ] ], [ [ "users.shape[1]", "_____no_output_____" ] ], [ [ "### Step 8. Print the name of all the columns.", "_____no_output_____" ] ], [ [ "users.columns", "_____no_output_____" ] ], [ [ "### Step 9. How is the dataset indexed?", "_____no_output_____" ] ], [ [ "# \"the index\" (aka \"the labels\")\nusers.index", "_____no_output_____" ] ], [ [ "### Step 10. What is the data type of each column?", "_____no_output_____" ] ], [ [ "users.dtypes", "_____no_output_____" ] ], [ [ "### Step 11. Print only the occupation column", "_____no_output_____" ] ], [ [ "users.occupation\n\n#or\n\nusers['occupation']", "_____no_output_____" ] ], [ [ "### Step 12. How many different occupations are in this dataset?", "_____no_output_____" ] ], [ [ "users.occupation.nunique()\n#or by using value_counts() which returns the count of unique elements\n#users.occupation.value_counts().count()", "_____no_output_____" ] ], [ [ "### Step 13. What is the most frequent occupation?", "_____no_output_____" ] ], [ [ "#Because \"most\" is asked\nusers.occupation.value_counts().head(1).index[0]\n\n#or\n#to have the top 5\n\n# users.occupation.value_counts().head()", "_____no_output_____" ] ], [ [ "### Step 14. Summarize the DataFrame.", "_____no_output_____" ] ], [ [ "users.describe() #Notice: by default, only the numeric columns are returned. ", "_____no_output_____" ] ], [ [ "### Step 15. Summarize all the columns", "_____no_output_____" ] ], [ [ "users.describe(include = \"all\") #Notice: By default, only the numeric columns are returned.", "_____no_output_____" ] ], [ [ "### Step 16. Summarize only the occupation column", "_____no_output_____" ] ], [ [ "users.occupation.describe()", "_____no_output_____" ] ], [ [ "### Step 17. What is the mean age of users?", "_____no_output_____" ] ], [ [ "round(users.age.mean())", "_____no_output_____" ] ], [ [ "### Step 18. What is the age with least occurrence?", "_____no_output_____" ] ], [ [ "users.age.value_counts().tail() #7, 10, 11, 66 and 73 years -> only 1 occurrence", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e71a9210b126fbfaede23681bfe29d959b9a8b52
6,485
ipynb
Jupyter Notebook
Chapter_3/Tutorial_3_Coding.ipynb
wooihaw/ECE3066_2022
8d6e8848507958ae341d86d040b317d77ad21701
[ "CC0-1.0" ]
null
null
null
Chapter_3/Tutorial_3_Coding.ipynb
wooihaw/ECE3066_2022
8d6e8848507958ae341d86d040b317d77ad21701
[ "CC0-1.0" ]
null
null
null
Chapter_3/Tutorial_3_Coding.ipynb
wooihaw/ECE3066_2022
8d6e8848507958ae341d86d040b317d77ad21701
[ "CC0-1.0" ]
null
null
null
23.929889
243
0.56808
[ [ [ "# Initialization\n%matplotlib inline\nfrom warnings import filterwarnings\nfilterwarnings('ignore')", "_____no_output_____" ] ], [ [ "## Exercise 1\n### Regression\n#### Build a regression model to estimate the weight based on height", "_____no_output_____" ] ], [ [ "from pandas import read_csv, get_dummies\nfrom sklearn.model_selection import train_test_split as split\nfrom sklearn.linear_model import LinearRegression\n\ndf = read_csv('data/genders_heights_weights.csv')\nX1 = df.values[:, 1].reshape(-1, 1)\ny1 = df.values[:, 2]\nX1_train, X1_test, y1_train, y1_test = split(X1, y1, random_state=42)", "_____no_output_____" ] ], [ [ "To do: \n- Build a linear regression model (name it as lnr1) to estimate weight using height\n- Evaluate the model's performance with R2 score", "_____no_output_____" ] ], [ [ "h = eval(input('Enter height: '))\nprint(f'Estimated weight is: {lnr1.predict([[h]])}kg')", "_____no_output_____" ] ], [ [ "To do: Build another linear regression model to estimate the weight based on height and gender", "_____no_output_____" ] ], [ [ "h = eval(input('Enter height: '))\ng = eval(input('Enter gender: '))\nprint(f'Estimated weight is: {lnr2.predict([[h, g]])}kg')", "_____no_output_____" ] ], [ [ "## Exercise 2 \n### Classification\n#### Compare 7 classification models for handwritten digit recognition", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nfrom sklearn.datasets import load_digits\nfrom sklearn.model_selection import train_test_split as split, KFold, cross_val_score\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.decomposition import PCA\n\nX, y = load_digits(return_X_y=True)\n\n# View one of the image\nplt.imshow(X[0, :].reshape(8, 8), cmap='gray')\nplt.axis(False)\nplt.title(f'Digit: {y[0]}')\nplt.show()", "_____no_output_____" ] ], [ [ "To do: \n- Check the number of features in the dataset", "_____no_output_____" ], [ "To do:\n- Use spot-checking technique to compare 7 classification models", "_____no_output_____" ], [ "To do: \n- Use Univariate Selection to select 20 best features. Evaluate the performance of the best model above on these features using 5-fold cross validation.", "_____no_output_____" ], [ "To do: \n- Use Principle Component Analysis (PCA) to reduce the dimensionality of the original features to 10. Evaluate the performance of k-Nearest Neighbors, Logistic Regression and Decision Tree on these features using 5-fold cross validation.", "_____no_output_____" ], [ "## Exercise 3\n### Clustering\n#### Construct a model to group the truck drivers", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\n\n# Read dataset\ndf = pd.read_csv('data/drivers.csv', sep='\\t')\n\n# Store features (Column 1 & 2)\nX = df.iloc[:, 1:].values\n\n# Apply k-mean clustering with 2 clusters\nkm = KMeans(n_clusters=2).fit(X)\n\nplt.scatter(X[:, 0], X[:, 1], c=km.labels_)\nplt.show()", "_____no_output_____" ] ], [ [ "To do: To group the truck drivers into 4 clusters and label the data samples according to their cluster", "_____no_output_____" ], [ "To do: Add labels to df and save it as a CSV file called 'drivers_labelled.csv'", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e71a93bb58f575d8ac1f4e34df357ee3b601705d
55,810
ipynb
Jupyter Notebook
Session 3/Code/simple-linear-regresion.ipynb
jakariamd/Data-Warehousing-and-Data-Mining-Sessional
edcf4339d63092f5e72d50e68155744f1f34a1f6
[ "MIT" ]
6
2020-08-13T15:16:35.000Z
2021-09-28T14:09:33.000Z
Session 3/Code/simple-linear-regresion.ipynb
jakariamd/Data-Warehousing-and-Data-Mining-Sessional
edcf4339d63092f5e72d50e68155744f1f34a1f6
[ "MIT" ]
null
null
null
Session 3/Code/simple-linear-regresion.ipynb
jakariamd/Data-Warehousing-and-Data-Mining-Sessional
edcf4339d63092f5e72d50e68155744f1f34a1f6
[ "MIT" ]
8
2020-08-25T21:36:37.000Z
2021-01-02T17:58:53.000Z
55,810
55,810
0.935478
[ [ [ "%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = (20.0, 10.0)\n\n#Reading Data\ndata = pd.read_csv('../input/headbrain/headbrain.csv')\nprint(data.shape)\ndata.head()", "(237, 4)\n" ], [ "#collecting X and Y\nX = data['Head Size(cm^3)'].values\nY = data['Brain Weight(grams)'].values\n\n#Mean X and Y\nmean_x = np.mean(X)\nmean_y = np.mean(Y)\n\n#Total number of values\nn = len(X)\n\n#Using the formula to calculate b1 and b2\nnumer = 0\ndenom = 0\nfor i in range(n):\n numer += (X[i] - mean_x) * (Y[i] - mean_y)\n denom += (X[i] - mean_x) ** 2\n\n\n# y = m*x + c\nm = numer/denom\nc = mean_y - (m * mean_x)\n\n#Print coefficient\nprint(m, c)", "0.26342933948939945 325.57342104944223\n" ], [ "#Plotting Values and Regression Line\n\nmax_x = np.max(X) + 100\nmin_x = np.min(X) - 100\n\n#Calculating Line values x and y\nx = np.linspace(min_x, max_x, 1000)\ny = m * x + c\n\n#Plotting Line\nplt.plot(x, y, color='g', label='Regression Line')\n#plotting Scatter Points\nplt.scatter(X, Y, c='r', label='Scatter Plot')\n\nplt.xlabel('Head Size in cm3')\nplt.ylabel('Brain Weight in grams')\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "distActual = 0\ndistPredicted = 0\n\nfor i in range(n):\n y_pred = m * X[i] + c\n distPredicted += (y_pred - mean_y)**2\n distActual += (Y[i] - mean_y)**2\n\nr2 = (distPredicted/distActual)\nprint(r2)", "0.6393117199570001\n" ], [ "from sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import r2_score\n\n#convert X to single col\nX = X.reshape((n,1))\n\n#Creating model\nreg = LinearRegression()\n#Fitting training data\nreg = reg.fit(X, Y)\n#Y prediction\nY_pred = reg.predict(X)\n\n#Calculating RMSE and R2 score\nr2Score = r2_score(Y, Y_pred)\n\nprint(r2Score)\n", "0.639311719957\n0.639311719957\n" ], [ "print(reg.coef_)\nprint(reg.intercept_)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]