hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
sequence | cell_types
sequence | cell_type_groups
sequence |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d048fd62850aac097b162f7ccb058602a7b909a2 | 5,759 | ipynb | Jupyter Notebook | exercise-1/exercise.ipynb | ajm1813/ch4-python-intro | 823d29f257ef8fafeb441ba88b07d405ed9a94c3 | [
"MIT"
] | null | null | null | exercise-1/exercise.ipynb | ajm1813/ch4-python-intro | 823d29f257ef8fafeb441ba88b07d405ed9a94c3 | [
"MIT"
] | null | null | null | exercise-1/exercise.ipynb | ajm1813/ch4-python-intro | 823d29f257ef8fafeb441ba88b07d405ed9a94c3 | [
"MIT"
] | null | null | null | 21.569288 | 195 | 0.542455 | [
[
[
"# Exercise 1\nAdd the specified code for each code cell, running the cells _in order_.",
"_____no_output_____"
],
[
"Create a variable `food` that stores your favorite kind of food. Print or output the variable.",
"_____no_output_____"
]
],
[
[
"food = \"pizza\"",
"_____no_output_____"
]
],
[
[
"Create a variable `restaurant` that stores your favorite place to eat that kind of food.",
"_____no_output_____"
]
],
[
[
"restaurant = \"Delfinos pizza\"",
"_____no_output_____"
]
],
[
[
"Print the message `\"I'm going to RESTAURANT for some FOOD\"`, replacing the restaurant and food with your variables.",
"_____no_output_____"
]
],
[
[
"print (\"I'm going to \" + restaurant + \" for some \" + food)",
"I'm going to Delfinos pizza for some pizza\n"
]
],
[
[
"Create a variable `num_friends` equal to the number of friends you would like to eat with.",
"_____no_output_____"
]
],
[
[
"num_friends = 5",
"_____no_output_____"
]
],
[
[
"Print a message `\"I'm going with X friends\"`, replacing the X with the number of friends. ",
"_____no_output_____"
]
],
[
[
"print (\"I'm going with \" + str(num_friends) + \" friends \")",
"I'm going with 5 friends \n"
]
],
[
[
"Create a variable `meal_price`, which is how expensive you think one meal at the restaurant would be. This price should be a `float`.",
"_____no_output_____"
]
],
[
[
"meal_price = 35.90",
"_____no_output_____"
]
],
[
[
"Update (re-assign) the `meal_price` variable so it includes a 15% tip—that is, so the price is 15% higher. Output the variable.",
"_____no_output_____"
]
],
[
[
"meal_price = meal_price * 1.15",
"_____no_output_____"
]
],
[
[
"Create a variable `total_cost` that has the total estimated cost of the bill for you and all of your friends. Output or print the variable",
"_____no_output_____"
]
],
[
[
"total_cost = meal_price * num_friends",
"_____no_output_____"
]
],
[
[
"Create a variable `budget` representing your spending budget for a night out.",
"_____no_output_____"
]
],
[
[
"budget = 500",
"_____no_output_____"
]
],
[
[
"Create a variable `max_friends`, which is the maximum number of friends you can invite, at the estimated meal price, while staying within your budget. Output or print this value.\n- Be carefully that you only invite whole people!",
"_____no_output_____"
]
],
[
[
"max_friends = int (budget/meal_price)",
"_____no_output_____"
]
],
[
[
"Bonus: Create a variable `chorus` that is the string `\"FOOD time!\"` _repeated_ once for each of the friends you are able to bring. _Hint_ use the **`*`** operator. Print out the variable.",
"_____no_output_____"
]
],
[
[
"print (\"food time!\\n \" * 5)",
"food time!\n food time!\n food time!\n food time!\n food time!\n \n"
]
],
[
[
"Modify the above cell so that each `\"FOOD time!\"` is on a separate line (_hint_: use a newline character!), then rerun the cell.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d04905aa9b9177ebc2ca338bcfd52698f1a32cdf | 43,529 | ipynb | Jupyter Notebook | ADSDB/Optimization-Costs.ipynb | MiguelHeCa/miri-notes | 8e9256011c5eecf427c5e3761ff70db998adfd72 | [
"MIT"
] | null | null | null | ADSDB/Optimization-Costs.ipynb | MiguelHeCa/miri-notes | 8e9256011c5eecf427c5e3761ff70db998adfd72 | [
"MIT"
] | null | null | null | ADSDB/Optimization-Costs.ipynb | MiguelHeCa/miri-notes | 8e9256011c5eecf427c5e3761ff70db998adfd72 | [
"MIT"
] | null | null | null | 27.070274 | 151 | 0.443704 | [
[
[
"# OPTIMIZATION PHASES\n\n### List of variables\n\n<table>\n <thead>\n <tr>\n <th style=\"width: 10%\">Variable</th>\n <th style=\"width: 45%\">Description</th>\n <th style=\"width: 30%\">Comment</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>$B$</td>\n <td>Number of full blocks/pages that need the records</td>\n <td>$\\lceil \\frac{|T|}{R} \\rceil$; $B \\ll |T|$</td>\n </tr>\n <tr>\n <td>$R$</td>\n <td>Number of records per block/page</td>\n <td></td>\n </tr>\n <tr>\n <td>$|T|$</td>\n <td>Cardinality. Number of tuples of a table</td>\n <td>Size of table</td>\n </tr>\n <tr>\n <td>$D$</td>\n <td>Time to access (read or write) a disk block</td>\n <td>Approximately 0'010 seconds</td>\n </tr>\n <tr>\n <td>$C$</td>\n <td>Time for the CPU to process a record</td>\n <td>Approximately 10<sup>-9</sup></td>\n </tr>\n <tr>\n <td>$d$</td>\n <td>Tree order</td>\n <td>Usually greater than 100</td>\n </tr>\n <tr>\n <td>$h$</td>\n <td>Tree depth minus 1</td>\n <td>$\\lceil \\log_u |T| \\rceil - 1$</td>\n </tr>\n <tr>\n <td>$v$</td>\n <td>Number of different values in a search</td>\n <td></td>\n </tr>\n <tr>\n <td>$u$</td>\n <td>$\\%load \\cdot 2d$</td>\n <td></td>\n </tr>\n <tr>\n <td>$k$</td>\n <td>Number of repetitions of every value in the search</td>\n <td></td>\n </tr>\n <tr>\n <td>ndist(A)</td>\n <td>Number of different values for attribute A</td>\n <td>Obtained from DB catalog</td>\n </tr>\n <tr>\n <td>max</td>\n <td>Maximum value of an attribute</td>\n <td>Obtained from DB catalog</td>\n </tr>\n <tr>\n <td>min</td>\n <td>Minimum value of an attribute</td>\n <td>Obtained from DB catalog</td>\n </tr>\n <tr>\n <td>$H$</td>\n <td>Time to evaluate the hash function</td>\n <td></td>\n </tr>\n <tr>\n <td>$M$</td>\n <td>Memory pages for a join/sorting algorithm</td>\n <td></td>\n </tr>\n <tr>\n <td>bits</td>\n <td>Bits per index block</td>\n <td></td>\n </tr>\n <tr>\n <td></td>\n <td>Domain Cardinality</td>\n <td>Maximum number of different values</td>\n </tr>\n </tbody>\n</table>\n\n### List of variables for intermediate results\n\nRecord length. $\\sum$ attribute length<sub>i</sub> (+ control information)\n$$|R|$$\n\nNumber of records per block\n$$R_R = \\lfloor \\frac{B}{|R|} \\rfloor$$\n\nNumber of blocks per table\n$$B_R = \\lceil \\frac{|R|}{R_R} \\rceil$$\n\n\n### Cardinalities estimation\n\nSelectivity Factor. % of tuples in the output regarding the input. ~0: very selective ~1: not very selective\n$$\\mathrm{SF}$$\n\nOutput of cardinality estimation\n$$|O| = \\mathrm{SF} \\cdot |R|$$ or $$|O| = \\mathrm{SF} \\cdot |R1| \\cdot |R2|$$ \n\nSelection\n$$|\\mathrm{selection}(R)| = \\mathrm{SF} \\cdot |R|$$\n\nJoin\n$$|\\mathrm{join}(R, S)| = \\mathrm{SF} \\cdot |R| \\cdot |S|$$\n\nUnions with repetitions\n$$|\\mathrm{union}(R, S)| = |R| + |S|$$\n\nUnions without repetitions\n$$|\\mathrm{union}(R, S)| = |R| + |S| - |\\mathrm{union}(R, S)|$$\n\nDifference (anti-join)\n$$|\\mathrm{difference}(R, S)| = |R| - |\\mathrm{union}(R, S)|$$\n",
"_____no_output_____"
],
[
"## Optimization phases\n\n**Asumptions**\n\n* Materialized views\n* Focus on Disk access time\n* Physical address of the record\n* Only consider cases\n 1. No index\n 2. Unordered B-Tree with addresses (B<sup>+</sup>)\n 3. Unordered Hash with addresses\n 4. Orderered B-Tree with addresses (Clustered)\n\n\n### Unordered B-tree with addresses (B<sup>+</sup>)\n\n**Assumptions**\n\n* In every tree node **2d** addresses fit\n* Tree load 66% (2/3)\n\n### Orderered B-Tree with addresses (Clustered)\n\n**Assumptions**\n\n* Tree load 66% (2/3) (index and table blocks)\n\n### Unordered Hash with addresses\n\n**Assumptions**\n\n* No blocks for excess\n* The same number of entries fit in a bucket block as in a tree block\n* Bucket blocks at 80% (4/5)",
"_____no_output_____"
],
[
"## Space\n\n\n**No index**\n\n$$B$$\n\n**B<sup>+</sup>**\n\n$$\\sum_1^{h+1} \\lceil \\frac{|T|}{u^i} \\rceil + B$$\n\n**Clustered**\n\n$$\\sum_1^{h+1} \\lceil \\frac{|T|}{u^i} \\rceil + \\lceil 1.5B \\rceil$$\n\n**Hash**\n\n$$1 + \\lceil 1.25(\\frac{|T|}{2d}) \\rceil + B$$\n",
"_____no_output_____"
],
[
"Example:\n\n$$\\mathrm{Lvl_1} = \\frac{|T|}{u}$$\n\n$$\\mathrm{Lvl_2} = \\frac{|T|}{u^2}$$\n\n$$\\mathrm{Lvl_3} = \\frac{|T|}{u^3}$$",
"_____no_output_____"
],
[
"## Access paths\n\n### Table scan\n\nThe whole table\n\n<div style=\"text-align: right\"> $u = \\frac{2}{3} \\cdot 2d$ </div>\n\n**No index**\n\n$$B \\cdot D$$\n\n**B<sup>+</sup>**\n\n<span style=\"color:orange\">Only useful for sort</span>\n\n$$\\lceil \\frac{|T|}{u} \\rceil \\cdot D + |T| \\cdot D$$\n\n**Clustered**\n\n$$\\lceil 1.5B \\rceil \\cdot D$$\n\n**Hash**\n\n<span style=\"color:red\">Useless</span>\n\n$$\\lceil 1.25(\\frac{|T|}{2d}) \\rceil \\cdot D + |T| \\cdot D $$\n\n",
"_____no_output_____"
],
[
"### Search one tuple\n\nEquality of unique attribute\n\n<div style=\"text-align: right\">\n $u = \\frac{2}{3} \\cdot 2d$ <br>\n $h = \\lceil \\log_u |T| \\rceil - 1$\n</div>\n\n**No index**\n\n$$0.5B \\cdot D$$\n\n**B<sup>+</sup>**\n\n$$h \\cdot D + D$$\n\n**Clustered**\n\n$$h \\cdot D + D$$\n\n**Hash**\n\n$$H + D + D$$\n",
"_____no_output_____"
],
[
"### Search several tuples\n\nInterval\n\nNo unique attribute\n\n<div style=\"text-align: right\">\n $u = \\frac{2}{3} \\cdot 2d$ <br>\n $h = \\lceil \\log_u |T| \\rceil - 1$ <br>\n $|O|$: cardinality of Output <br>\n $v$: value in range <br>\n $k$: repetitions per value <br>\n</div>\n\n**No index**\n\n$$B \\cdot D$$\n\n**B<sup>+</sup>**\n\n$$h \\cdot D + \\frac{|O| - 1}{u} \\cdot D + |O| \\cdot D$$\n\n**Clustered**\n\n$$h \\cdot D + D + 1.5 \\left( \\frac{|O|-1}{R} \\right) \\cdot D$$\n\n**Hash**\n\n$$v = 1: 1 \\cdot (H + D + k \\cdot D) = H + D + k \\cdot D$$\n\n$$v > 1: v \\cdot (H + D + k \\cdot D)$$\n\n$$v \\;\\mathrm{is\\;unknown}: \\mathrm{Useless}$$\n\n",
"_____no_output_____"
],
[
"### Statistics in Oracle\n\nDBA is responsible for the statistics.\n\n`ANALYZE [TABLE|INDEX|CLUSTER] <name> [COMPUTE|ESTIMATE] STATISTICS;`\n\n```sql\nANALYZE TABLE departments COMPUTE STATISTICS; \nANALYZE TABLE employees COMPUTE STATISTICS;\n```\n\n`DBMS_STATS.GATHER_TABLE_STATS( <esquema>, <table> );`\n\n```sql\nDBMS_STATS.GATHER_TABLE_STATS(\"username\", \"departments\");\nDBMS_STATS.GATHER_TABLE_STATS(\"username\", \"employees\");\n```\n\nKinds of statistics\n\n| Relations | Attributes |\n|:--|:--|\n| Cardinality | Length |\n| Number of blocks | Domain cardinality |\n| Average length of records | Number of existing different values |\n| | Maximum value |\n| | Minimum value |\n\nMain hypothesis in most DBMS\n* Uniform distribution of values for each attribute\n* Independence of attributes",
"_____no_output_____"
],
[
"## Selectivity Factor of a Selection\n\nAssuming equi-probability of values\n\n`WHERE A = c`\n$$\\mathrm{SF}(A = c) = \\frac{1}{\\mathrm{ndist}(A)}$$\n\nAssuming uniform distribution and $A \\in [\\min, \\max]$ \n\n`WHERE A > c`\n\n$$\n\\mathrm{SF}(A > c) = \\frac{\\max - c}{\\max - \\min} =\n\\begin{cases}\n0 & \\quad \\text{if}\\; c \\geq \\max \\\\\n1 & \\quad \\text{if}\\; c < \\min\n\\end{cases}\n$$\n\n`WHERE A < c`\n\n$$\n\\mathrm{SF}(A < c) = \\frac{c - \\min}{\\max - \\min} =\n\\begin{cases}\n0 & \\quad \\text{if}\\; c \\leq \\min \\\\\n1 & \\quad \\text{if}\\; c > \\max\n\\end{cases}\n$$\n\n\nAssuming $\\text{ndist}(A)$ is big enough\n\n`WHERE A <= c`\n$$\n\\mathrm{SF}(A \\leq c) = \\mathrm{SF}(A < c)\n$$\n\n`WHERE A >= c`\n$$\n\\mathrm{SF}(A \\geq c) = \\mathrm{SF}(A > c)\n$$\n\nAssuming P and Q statistically **independent**\n\n`WHERE P AND Q`\n$$\n\\text{SF}(P \\;\\text{AND}\\; Q) = \\text{SF}(P) \\cdot \\text{SF}(Q)\n$$\n\n`WHERE P OR Q`\n$$\n\\text{SF}(P \\;\\text{OR}\\; Q) = \\text{SF}(P) + \\text{SF}(Q) - \\text{SF}(P) \\cdot \\text{SF}(Q)\n$$\n\n`WHERE NOT P`\n$$\n\\text{SF}(\\text{NOT}\\;P) = 1 - \\text{SF}(P)\n$$\n\n`WHERE A IN (c1, c2, ... , cn)`\n$$\n\\text{SF}(A \\in (c_1, c_2, \\dots, c_n)) = \\min(1, \\frac{n}{\\mathrm{ndist}(A)})\n$$\n\n`WHERE A BETWEEN (c1, c2)`\n$$\n\\text{SF}(c_1 \\leq A \\leq c_2) = \\frac{\\min(c_2, \\max)-\\max(c_1, \\min)}{\\max - \\min}\n$$",
"_____no_output_____"
],
[
"### Selectivity Factor of a Join\n\nFor $R[A \\theta B]S$\n\nToo difficult to approximate this general case. Usually, the required statistics are not available because it\nwould be too expensive to maintain them.\n\nResults depend on operator:\n\n$$\n\\text{SF}(R[A\\times B]S) = 1\n$$\n\n$$\n\\text{SF}(R[A \\neq B]S) = 1 \n$$\n\n$$\n\\text{SF}(R[A=B]S) = \\frac{1}{|R|} \n\\begin{cases}\nS_B & \\quad \\text{is not null} \\\\\nS_B & \\quad \\text{FK to } R_A \\\\\nR_A & \\quad \\text{PK}\n\\end{cases}\n$$\n\nIf there is no FK\n\n$$\n\\text{SF}(R[A=B]S) = \\frac{1}{\\max(\\text{ndist}(A), \\text{ndist}(B))}\n$$\n\n$$\n\\text{SF}(R[A<B]S) = {^1/_2}\n$$\n$$\n\\text{SF}(R[A \\leq B]S) = {^1/_2}\n$$",
"_____no_output_____"
],
[
"## Phases of physiscal optimization\n\n1. Alternatives generation\n2. Intermediate results estimation\n3. Cost estimation for each algorithm\n4. Choose the best option",
"_____no_output_____"
],
[
"## Example\n\n```sql\nSELECT \n DISTINCT w.strength\nFROM\n wines w, producers p, vintages v\nWHERE\n v.wineId = w.wineId\n AND\n p.prodId = v.prodId\n AND\n p.region = \"Priorat\"\n AND\n v.quantity > 100;\n```\n\n\n\nTables have the following structures\n\nProducers\n* Clustered by `prodId`\n* B<sup>+</sup> by `region`\n\nWines\n* Clustered by `wineId`\n\nVintages\n* Clustered by `wineId` and `prodId`\n\nStatistics:\nTables (extra space due to being clustered needs to be added)\n\n$$\n\\begin{matrix}\n|P| = 10000 & |W| = 5000 & |V| = 100000 \\\\\nR_p = 12 & R_w = 10 & R_v = 20 \\\\\nB_p = 834 & B_w = 500 & B_v = 5000\n\\end{matrix}\n$$\n\nAttributes\n\nprodId, wineId and strength: $|R_R| = 5$ bytes\n\n$\\text{ndist(region)} = 30$\n\n$\\min(\\text{quantity}) = 10$ $\\max(\\text{quantity}) = 500$\n\n$\\text{ndist(strength)} = 10$\n\n\n**System Parameters**\n\n$B = 500$ bytes per intermediate disk block\n\n$D = 1$\n\n$C = 0$\n\n$d = 75$\n\nDBMS:\n\n* Block Nested Loops (6 Memory pages, $M = 4$)\n* Row Nested Loops\n* Sort Match (with 3 memory pages for sorting, $M = 2$)\n",
"_____no_output_____"
]
],
[
[
"import math\n\nc_P, c_W, c_V = 10000, 5000, 100000\nR_p, R_w, R_v = 12, 10, 20\nB_p, B_w, B_v = math.ceil(c_P / R_p), math.ceil(c_W / R_w), math.ceil(c_V / R_v)\n\nprint(\"Cardinality of {}: {}, Records: {}, number of Full Blocks: {}\".format('P', c_P, R_p, B_p))\nprint(\"Cardinality of {}: {}, Records: {}, number of Full Blocks: {}\".format('W', c_W, R_w, B_w))\nprint(\"Cardinality of {}: {}, Records: {}, number of Full Blocks: {}\".format('V', c_V, R_v, B_v))",
"Cardinality of P: 10000, Records: 12, number of Full Blocks: 834\nCardinality of W: 5000, Records: 10, number of Full Blocks: 500\nCardinality of V: 100000, Records: 20, number of Full Blocks: 5000\n"
]
],
[
[
"### Phase 1. Alternatives generation\n\n```sql\nSELECT \n DISTINCT w.strength\nFROM\n wines w, producers p, vintages v\nWHERE\n v.wineId = w.wineId AND p.prodId = v.prodId \n AND p.region = \"Priorat\"\n AND v.quantity > 100;\n```\n\nChange selection and join arrangement\n\n",
"_____no_output_____"
],
[
"### Phase 2. Intermediate results estimation\n\n```sql\nSELECT \n DISTINCT w.strength\nFROM\n wines w, producers p, vintages v\nWHERE\n v.wineId = w.wineId AND p.prodId = v.prodId \n AND p.region = \"Priorat\"\n AND v.quantity > 100;\n```\n\n**PT1 and PT2**\n\n**Selection over V: V'**\n\n\n\nRecord length of prodId and wineId:\n$$|R_{V'}| = 5 + 5 = 10$$\n\nSelectivity factor of selection:\n$$\n\\mathrm{SF}(A > c) = \\frac{\\max - c}{\\max - \\min}\n$$\n\nWhere $c = 100$ and the query specifies `v.quantity > 100`, then:\n\n$$\n\\text{SF}(\\text{quantity} > 100) = \\frac{500 - 100}{500 - 10} = 0.81632\n$$\n\nOutput cardinality of V':\n$$\n|O| = \\text{SF} \\cdot |R|\n$$\n\n$$\n|V'| = \\text{SF}(\\text{quantity} > 100) \\cdot |V| = 0.81632 \\cdot 100000 = 81632\n$$\n\nNumber of records per block:\n$$\nR_{V'} = \\lfloor \\frac{B}{|R_{V'}|} \\rfloor = \\lfloor \\frac{500}{10} \\rfloor = 50\n$$\n\nNumber of blocks needed for V':\n$$\nB_{V'} = \\lceil \\frac{|V'|}{R_{V'}} \\rceil = \\lceil \\frac{81632}{50} \\rceil = 1633\n$$",
"_____no_output_____"
]
],
[
[
"c = 100\nmin_v = 10\nmax_v = 500\nSF_v_prime = (max_v - c) / (max_v - min_v)\nprint(\"Selectivity factor of V': {} \\n\".format(SF_v_prime))\n\nC_v_prime = math.floor(SF_v_prime * c_V)\nprint(\"Cardinality output of V': {} \\n\".format(C_v_prime))\n\nR_v_len = 5 + 5\nB = 500\nR_v_prime = math.floor(B / R_v_len)\nprint(\"V' number of records per block : {} \\n\".format(R_v_prime))\n\nB_v_prime = math.ceil(C_v_prime / R_v_prime)\nprint(\"Blocks needed for V': {} \\n\".format(B_v_prime))",
"Selectivity factor of V': 0.8163265306122449 \n\nCardinality output of V': 81632 \n\nV' number of records per block : 50 \n\nBlocks needed for V': 1633 \n\n"
]
],
[
[
"**Selection over P: P'**\n\n\n\nRecord length of prodId:\n$$|R_{P'}| = 5$$\n\nSelectivity factor of selection:\n$$\n\\mathrm{SF}(A = c) = \\frac{1}{\\text{ndist}(A)}\n$$\n\nWhere $c = 'Priorat'$ and the query specifies `p.region = 'Priorat'`, then:\n\n$$\n\\text{SF}(\\text{region} = \\text{Priorat}) = \\frac{1}{30} = 0.033333\n$$\n\nOutput cardinality of P':\n$$\n|O| = \\text{SF} \\cdot |R|\n$$\n\n$$\n|P'| = \\text{SF}(\\text{region} = \\text{Priorat}) \\cdot |P| = 0.03333 \\cdot 10000 = 333\n$$\n\nNumber of records per block:\n$$\nR_{P'} = \\lfloor \\frac{B}{|R_{P'}|} \\rfloor = \\lfloor \\frac{500}{5} \\rfloor = 100\n$$\n\nNumber of blocks needed for P':\n$$\nB_{P'} = \\lceil \\frac{|P'|}{R_{P'}} \\rceil = \\lceil \\frac{333}{100} \\rceil = 4\n$$",
"_____no_output_____"
]
],
[
[
"ndist_region = 30\nSF_p_prime = 1 / ndist_region\nprint(\"Selectivity factor of P': {} \\n\".format(SF_p_prime))\n\nC_p_prime = math.floor(SF_p_prime * c_P)\nprint(\"Cardinality output of P': {} \\n\".format(C_p_prime))\n\nR_p_len = 5\nB = 500\nR_p_prime = math.floor(B / R_p_len)\nprint(\"P' number of records per block : {} \\n\".format(R_p_prime))\n\nB_p_prime = math.ceil(C_p_prime / R_p_prime)\nprint(\"Blocks needed for P': {} \\n\".format(B_p_prime))",
"Selectivity factor of P': 0.03333333333333333 \n\nCardinality output of P': 333 \n\nP' number of records per block : 100 \n\nBlocks needed for P': 4 \n\n"
]
],
[
[
"**PT1**\n\n**Join between W and V': WV'**\n\n\n\nRecord length of `strength` and `prodId`:\n$$\n|R_{WV'}| = 5 + 5\n$$\n\nSelectivity factor\n\n$$\n\\text{SF}_{WV'} = \\frac{1}{|W|} = \\frac{1}{5000} = 0.0002\n$$\n\nCardinality ouput of WV'\n$$\n|WV'| = SF_{WV'} \\cdot |W| \\cdot |V'| = \\frac{1}{5000} \\cdot 5000 \\cdot 81632 = 81632\n$$\n\nNumber of rows per block for WV':\n$$\nR_{WV'} = \\lfloor \\frac{B}{|R_{WV'}|} \\rfloor = \\lfloor \\frac{500}{5} \\rfloor = 50\n$$\n\nNumber of blocks used for WV':\n\n$$\nB_{WV'} = \\lceil \\frac{|WV'|}{R_{WV'}} \\rceil = \\lceil \\frac{81632}{50} \\rceil = 1633\n$$",
"_____no_output_____"
]
],
[
[
"SF_wv_prime = 1 / c_W\nprint(\"Selectivity factor of WV': {} \\n\".format(SF_wv_prime))\n\nC_wv_prime = math.floor(SF_wv_prime * c_W * C_v_prime)\nprint(\"Cardinality output of WV': {} \\n\".format(C_wv_prime))\n\nR_wv_prime_len = 5 + 5\nB = 500\nR_wv_prime = math.floor(B / R_wv_prime_len)\nprint(\"WV' number of records per block : {} \\n\".format(R_wv_prime))\n\nB_wv_prime = math.ceil(C_wv_prime / R_wv_prime)\nprint(\"Blocks needed for WV': {} \\n\".format(B_wv_prime))",
"Selectivity factor of WV': 0.0002 \n\nCardinality output of WV': 81632 \n\nWV' number of records per block : 50 \n\nBlocks needed for WV': 1633 \n\n"
]
],
[
[
"**Join between WV' and P': WV'P'**\n\n\n\nRecord length for `strength`:\n$$\n|R_{WV'P'}| = 5\n$$\n\nSelectivity Factor, assuming quantity and region independent\n$$\n\\text{SF(WV'} \\cdot \\text{P')} = \\frac{1}{|P'|} \\cdot \\frac{1}{ndist(\\text{region})} = \\frac{1}{333 \\cdot 30} = 10^{-4}\n$$\n\nCardinality output\n$$\n|WV'P'| = SF_{WV'P'} \\cdot |WV'| \\cdot |P'| = 10^{-4} \\cdot 81632 \\cdot 333 = 2721\n$$\n\nRecords per block\n$$\nR_{WV'P'} = \\lfloor \\frac{B}{|R_{WV'P'}|} \\rfloor = \\lfloor \\frac{500}{5} \\rfloor = 100\n$$\n\nBlocks for WV'P'\n$$\nB_{WV'P'} = \\lceil \\frac{|WV'P'|}{R_{WV'P'}} \\rceil = \\lceil \\frac{1234}{100} \\rceil = 28\n$$",
"_____no_output_____"
]
],
[
[
"SF_wvp_prime = (1 / C_p_prime) * (1 / ndist_region)\nprint(\"Selectivity factor of WV'P': {} \\n\".format(SF_wvp_prime))\n\nC_wvp_prime = math.floor(SF_wvp_prime * C_wv_prime * C_p_prime)\nprint(\"Cardinality output of WV'P': {} \\n\".format(C_wvp_prime))\n\nR_wvp_prime_len = 5\nB = 500\nR_wvp_prime = math.floor(B / R_wvp_prime_len)\nprint(\"WV'P' number of records per block : {} \\n\".format(R_wvp_prime))\n\nB_wvp_prime = math.ceil(C_wvp_prime / R_wvp_prime)\nprint(\"Blocks needed for WV'P': {} \\n\".format(B_wvp_prime))",
"Selectivity factor of WV'P': 0.00010010010010010009 \n\nCardinality output of WV'P': 2721 \n\nWV'P' number of records per block : 100 \n\nBlocks needed for WV'P': 28 \n\n"
]
],
[
[
"**PT2**\n\n\n\n**Join V' and P': V'P'**\n\nAssuming independence of variables\n\nRecord length for `wineId`\n$$\n|R_{V'P'}| = 5\n$$\n\nSelectivity factor\n$$\n\\text{SF}_{V'P'} = \\frac{1}{ndist(\\text{region})} \\cdot \\frac{1}{|P'|} = \\frac{1}{30} \\cdot \\frac{1}{333} = 10^{-4}\n$$\n\nOutput cardinality\n$$\n|V'P'| = \\text{SF}_{V'P'} \\cdot |V'| \\cdot |P'| = 10^{-4} \\cdot 81632 \\cdot 333 = 2721\n$$\n\nNumber of records per blocks\n$$\nR_{V'P'} = \\lfloor \\frac{B}{R_{V'P'}} \\rfloor = \\lfloor \\frac{500}{R_{5}} \\rfloor = 100\n$$\n\nBlocks needed for V'P'\n$$\nB_{V'P'} = \\lceil \\frac{|V'P'|}{R_{V'P'}} \\rceil = \\lceil \\frac{2721}{100} \\rceil = 28\n$$",
"_____no_output_____"
]
],
[
[
"ndist_region = 30\nSF_vp_prime = (1 / ndist_region) * (1 / C_p_prime)\nprint(\"Selectivity factor of V'P': {} \\n\".format(SF_vp_prime))\n\nC_vp_prime = math.floor(SF_vp_prime * C_v_prime * C_p_prime)\nprint(\"Cardinality output of V'P': {} \\n\".format(C_vp_prime))\n\nR_vp_len = 5\nB = 500\nR_vp_prime = math.floor(B / R_vp_len)\nprint(\"V'P' number of records per block : {} \\n\".format(R_vp_prime))\n\nB_vp_prime = math.ceil(C_vp_prime / R_vp_prime)\nprint(\"Blocks needed for V'P': {} \\n\".format(B_vp_prime))",
"Selectivity factor of V'P': 0.00010010010010010009 \n\nCardinality output of V'P': 2721 \n\nV'P' number of records per block : 100 \n\nBlocks needed for V'P': 28 \n\n"
]
],
[
[
"**Join between W and V'P': WV'P'**\n\n\n\nRecord length for WV'P'\n$$\n|R_{WV'P'}| = 5\n$$\n\nSelectivity Factor for WV'P'\n$$\n\\text{SF} = \\frac{1}{|W|} = \\frac{1}{5000} = 0.0002\n$$\n\nCardinality Output\n$$\n|WV'P'| = SF \\cdot |W| \\cdot |V'P'| = 10^{-4} \\cdot 5000 \\cdot 2721 = 2721\n$$\n\nNumber of records per block\n$$\nR_{WV'P'} = \\lfloor \\frac{B}{|R_{WV'P'}|} \\rfloor = \\lfloor \\frac{500}{5} \\rfloor = 100\n$$\n\nBlocks needes for WV'P'\n$$\nB_{WV'P'} = \\lceil \\frac{|WV'P'|}{R_{WV'P'}} \\rceil = \\lceil \\frac{2721}{100} \\rceil = 28\n$$",
"_____no_output_____"
]
],
[
[
"SF_wv_pr_p_pr = 1 / c_W\nprint(\"Selectivity factor of WV'P': {} \\n\".format(SF_wv_pr_p_pr))\n\nC_wv_pr_p_pr = math.floor(SF_wv_pr_p_pr * c_W * C_vp_prime)\nprint(\"Cardinality output of WV'P': {} \\n\".format(C_wv_pr_p_pr))\n\nR_wv_pr_p_pr_len = 5\nB = 500\nR_wv_pr_p_pr = math.floor(B / R_wv_pr_p_pr_len)\nprint(\"WV'P' number of records per block : {} \\n\".format(R_wv_pr_p_pr))\n\nB_wv_pr_p_pr = math.ceil(C_wv_pr_p_pr / R_wv_pr_p_pr)\nprint(\"Blocks needed for WV'P': {} \\n\".format(B_wv_pr_p_pr))",
"Selectivity factor of WV'P': 0.0002 \n\nCardinality output of WV'P': 2721 \n\nWV'P' number of records per block : 100 \n\nBlocks needed for WV'P': 28 \n\n"
]
],
[
[
"**PT1/PT2**\n\n**Final result = O**\n\nRecord length\n$$\n|R_O| = 5\n$$\n\nOutput cardinality\n$$\n|O| = \\text{ndist}(\\text{strength}) = 100\n$$\n\nNumber of records\n$$\nR_O = \\lfloor \\frac{B}{|R_O|} \\rfloor = \\lfloor \\frac{500}{5} \\rfloor = 100\n$$\n\nBlocks needed\n$$\nB_O = \\lceil \\frac{|O|}{R_O} \\rceil = \\lceil \\frac{100}{100} \\rceil = 1\n$$",
"_____no_output_____"
]
],
[
[
"ndist_strength = 100\nC_o = ndist_strength\nprint(\"Cardinality output of O: {} \\n\".format(C_o))\n\nR_o_len = 5\nB = 500\nR_o = math.floor(B / R_o_len)\nprint(\"O number of records per block : {} \\n\".format(R_o))\n\nB_o = math.ceil(C_o / R_o)\nprint(\"Blocks needed for O: {} \\n\".format(B_o))",
"Cardinality output of O: 100 \n\nO number of records per block : 100 \n\nBlocks needed for O: 1 \n\n"
]
],
[
[
"**Map result**\n\n",
"_____no_output_____"
],
[
"### Phase 3. Cost estimation for each algorithm\n\nRecall:\n\n$$\nu = \\frac{2}{3} \\cdot 2(75) = 100\n$$\n\n**AP1/AP2**\n\n**Selection over V: V'**\n\nRecall that for Vintages is clustered by wineId and prodId\nAvailable access paths: No index\n\n$$\n\\text{cost}_{\\text{scan}}(V') = \\lceil 1.5 B_{V} \\rceil \\cdot D = \\lceil 1.5 \\cdot 5000 \\rceil \\cdot 1 = 7500\n$$\n\nChosen algorithm: **Scan**\n\n**Selection over P: P'**\n\nAvailable access paths: B<sup>+</sup> and no index\n\nFor a table scan\n$$\n\\text{cost}_{\\text{scan}}(P') = \\lceil 1.5 B_{P} \\rceil \\cdot D = \\lceil 1.5 \\cdot 834 \\rceil \\cdot 1 = 1251\n$$\n\nTree depth of h for B<sup>+</sup> is:\n\n$$\nh = \\lceil \\log_u |P| \\rceil - 1 = \\lceil \\log_u |P| \\rceil - 1 = \\lceil \\log_{100} 10000 \\rceil - 1 = 1\n$$\n\nFor an index of several tuples\n$$\n\\begin{align}\n\\text{cost}_{B^+}(P') \n& = h \\cdot D + \\frac{|P'| - 1}{u} \\cdot D + |P'| \\cdot D \\\\\n& = h \\cdot D + \\frac{SF_{\\text{region = 'Priorat'}} \\cdot |P| - 1}{u} \\cdot D + SF_{\\text{region = 'Priorat'}} \\cdot |P| \\cdot D \\\\\n& = 1 \\cdot 1 + \\frac{{^1/_{30}} \\cdot 10000 - 1}{100} \\cdot D + {^1/_{30}} \\cdot 10000 \\cdot 1 \\\\\n& = 1 + \\frac{332}{100} + 333 \\\\\n& = 337.33\n\\end{align}\n$$\n\nChosen algorithm: **B<sup>+</sup>**\n\n",
"_____no_output_____"
]
],
[
[
"load = 2/3\nd = 75\nu = load * (2 * d)\nh = math.ceil(math.log(c_P, u)) - 1\nD = 1\nprint(\"load is: {}\\nd is: {}\\nu is: {}\\nh is: {}\\nD is: {}\\n\".format(load, d, u, h, D))\ncost_scan_p = math.ceil(1.5 * B_p) * D\ncost_bplus_p = (h * D) + ((C_p_prime / u) * D) + (C_p_prime * D)\nprint(\"Cost of scan is: {} \\nCost of B+ is: {}\".format(cost_scan_p, cost_bplus_p))",
"load is: 0.6666666666666666\nd is: 75\nu is: 100.0\nh is: 1\nD is: 1\n\nCost of scan is: 1251 \nCost of B+ is: 337.33\n"
]
],
[
[
"**PT1**\n\n**Join over W and V': WV'**\n\nAvailable algorithms: Block Nested Loops (BML), Row Nested Loops (RML) and Sort-Match (SM)\n\n*Block Nested Loops*\n\nRecall:\n\n$$\nM = 4\n$$\n\n$\\lceil 1.5 B_{W} \\rceil < B_{V'}$ we use the commutative property of joins\n\n$$\n\\begin{align}\n\\text{cost}_{\\text{BML}}(WV') \n& = \\lceil 1.5 B_{W} \\rceil + \\lceil \\frac{1.5 B_{W}}{M} \\rceil \\cdot B_{V'} \\\\\n& = \\lceil 1.5 \\cdot 500 \\rceil + \\lceil \\frac{1.5\\cdot 500}{4} \\rceil \\cdot 1633 \\\\\n& = 307,754\n\\end{align}\n$$\n\n*Row Nested Loops*\n\nLook for attributes of W\n\n$V'$ does not use extra space any more for being ordered\n\n$$\n\\begin{align}\n\\text{cost}_{\\text{RML}}(WV') \n& = B_{V'} + |V'| \\cdot \\left( \\lceil \\log_u |W| \\rceil - 1 + 1 + (\\frac{1.5(k-1)}{10} \\right) \\\\\n& = 1633 + 81,632 \\cdot \\left( \\lceil \\log_{100} 5000 \\rceil - 1 + 1 \\right) \\\\\n& = 164,887\n\\end{align}\n$$\n\n<span style='color:red'>Note: This wasn't explained. Maybe $k = 1$ but needs confirmation.</span>\n\n*Sort-Match*\n\n$W$ is ordered by `wineID`, $V'$ is still ordered y `wineID` and `prodID`.\n\n$$\n\\text{cost}_{\\text{SM}}(WV') = \\lceil 1.5 B_{W} \\rceil + B_{V'} = \\lceil 1.5 \\cdot 500 \\rceil + 1633 = 2383\n$$\n\nChosen algorithm: **Sort-Match**",
"_____no_output_____"
],
[
"**Join between WV' and P': WV'P'**\n\n*Block Nested Loops*\n\n$B_{P'} < B_{WV'}$ we use the commutative property of joins\n\n$$\n\\begin{align}\n\\text{cost}_{\\text{BML}}(WV'P') \n& = B_{P'} + \\lceil \\frac{B_{P'}}{M} \\rceil \\cdot B_{WV'} \\\\\n& = 4 + \\lceil \\frac{4}{4} \\rceil \\cdot 1633 \\\\\n& = 1637\n\\end{align}\n$$\n\n<span style='color:red'>Note: It isn't explained why BML is analyzed but not RML.</span>\n\n*Sort-Match*\n\nNeither WV’ nor P’ are ordered by `prodID`\n\n$$\n\\begin{align}\n\\text{cost}_{\\text{SM}}(WV'P') \n&= 2 B_{WV'} \\cdot \\lceil \\log_2 B_{WV'} \\rceil + 2 B_{P'} \\cdot \\lceil \\log_2 B_{P'} \\rceil + B_{WV'} + B_{P'}\\\\\n&= 2 \\cdot 1633 \\cdot \\lceil \\log_2 1633 \\rceil + 2 \\cdot 4 \\cdot \\lceil \\log_2 4 \\rceil + 1633 + 4 \\\\\n&= 37,579\n\\end{align}\n$$\n\nChosen algorithm: **Block Nested Loop**",
"_____no_output_____"
]
],
[
[
"print(\"B_p' is {}\\nB_wv' is {}\".format(B_p_prime, B_wv_prime))",
"B_p' is 4\nB_wv' is 1633\n"
],
[
"(2 * B_wv_prime * math.ceil(math.log(B_wv_prime, 2))) + (2 * B_p_prime * math.ceil(math.log(B_p_prime, 2))) + B_wv_prime + B_p_prime",
"_____no_output_____"
]
],
[
[
"**PT2**\n\n**Join between V' and P': V'P'\n\nAvailable algorithms: BNL and SM.\n\n*Block Nested Loops*\n\n$B_{P'} < B_{V'}$ we use the commutative property of joins\n\n$$\n\\begin{align}\n\\text{cost}_{\\text{BML}}(V'P') \n& = B_{P'} + \\lceil \\frac{B_{P'}}{M} \\rceil \\cdot B_{V'} \\\\\n& = 4 + \\lceil \\frac{4}{4} \\rceil \\cdot 1633 \\\\\n& = 1637\n\\end{align}\n$$\n\n*Sort-Match*\n\nNeither V’ nor P’ are ordered by `prodID`\n\n$$\n\\begin{align}\n\\text{cost}_{\\text{SM}}(V'P') \n&= 2 B_{V'} \\cdot \\lceil \\log_2 B_{V'} \\rceil + 2 B_{P'} \\cdot \\lceil \\log_2 B_{P'} \\rceil + B_{V'} + B_{P'}\\\\\n&= 2 \\cdot 1633 \\cdot \\lceil \\log_2 1633 \\rceil + 2 \\cdot 4 \\cdot \\lceil \\log_2 4 \\rceil + 1633 + 4 \\\\\n&= 37,579\n\\end{align}\n$$\n\nChosen algorithm: **Block Nested Loop**",
"_____no_output_____"
]
],
[
[
"print(\"B_p' is {}\\nB_v' is {}\".format(B_p_prime, B_v_prime))",
"B_p' is 4\nB_v' is 1633\n"
]
],
[
[
"**Join between W and V'P': WV'P'**\n\nAvailable algorithms: Block Nested Loops (BML), Row Nested Loops (RML) and Sort-Match (SM)\n\n*Block Nested Loops*\n\n$B_{V'P'} < \\lceil 1.5 B_{W} \\rceil$ we use the commutative property of joins\n\n$$\n\\begin{align}\n\\text{cost}_{\\text{BML}}(WV'P') \n& = B_{V'P'} + \\lceil \\frac{B_{V'P'}}{M} \\rceil \\cdot \\lceil 1.5 B_{W} \\rceil \\\\\n& = 28 + \\lceil \\frac{28}{4} \\rceil \\cdot \\lceil 750 \\rceil \\\\\n& = 5278\n\\end{align}\n$$\n\n*Row Nested Loops*\n\nLook for attributes of W\n\n$$\n\\begin{align}\n\\text{cost}_{\\text{RML}}(WV'P') \n& = B_{V'P'} + |V'P'| \\cdot \\left( \\lceil \\log_u |W| \\rceil - 1 + 1 + (\\frac{1.5(k-1)}{10} \\right) \\\\\n& = 28 + 2721 \\cdot \\left( \\lceil \\log_{100} 5000 \\rceil - 1 + 1 \\right) \\\\\n& = 5470\n\\end{align}\n$$\n\n*Sort-Match*\n\nW is ordered by `wineID`, V'P' is not sorted by `wineID`\n\n$$\n\\begin{align}\n\\text{cost}_{\\text{SM}}(WV'P') \n&= 2 B_{V'P'} \\cdot \\lceil \\log_2 B_{V'P'} \\rceil + \\lceil 1.5 B_{W} \\rceil + B_{V'P'} \\\\\n&= 2 \\cdot 28 \\cdot \\lceil \\log_2 28 \\rceil + \\lceil 1.5 \\cdot 500 \\rceil + 28 \\\\\n&= 1058\n\\end{align}\n$$\n\nChosen algorithm: **Sort-Match**",
"_____no_output_____"
]
],
[
[
"print(\"B_v'p' is {}\\n1.5*B_w is {}\\n|V'P'| is {}\".format(B_vp_prime, math.ceil(1.5*B_w), C_vp_prime))",
"B_v'p' is 28\n1.5*B_w is 750\n|V'P'| is 2721\n"
],
[
"28 + math.ceil(28/4) * 750",
"_____no_output_____"
],
[
"28+(2721*(math.ceil(math.log(5000, 100)) - 1 + 1))",
"_____no_output_____"
],
[
"Cost_v_prime = 1633 + 7500\nCost_p_prime = 4 + 337\nCost_wv = 1633 + 2383\nCost_vp = 28 + 1637\nCost_wvp_pt1 = 28 + 1637\nCost_wvp_pt2 = 28 + 1058\nCost_o = 1 + 252\nCost_pt1 = Cost_v_prime + Cost_p_prime + Cost_wv + Cost_wvp_pt1 + Cost_o\nCost_pt2 = Cost_v_prime + Cost_p_prime + Cost_vp + Cost_wvp_pt2 + Cost_o\nprint(\"Total cost of:\\nPT1: {}\\nPT2: {}\".format(Cost_pt1, Cost_pt2))",
"Total cost of:\nPT1: 15408\nPT2: 12478\n"
]
],
[
[
"**Map result**\n\n<span style=\"color:red\">Output algorithm is Merge Sort but it's not explained, nor its cost calculation</span>\n\n\n\n### Phase 4. Choose the best option\n\n**PT2**",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
d0491b90c48d02ebe7f29aa0248a6d59d68a3984 | 4,542 | ipynb | Jupyter Notebook | webinar_1/Lesson 1.ipynb | superbe/KagglePlatform | 970400086b5886629987d03e5229cc736523e53a | [
"MIT"
] | null | null | null | webinar_1/Lesson 1.ipynb | superbe/KagglePlatform | 970400086b5886629987d03e5229cc736523e53a | [
"MIT"
] | null | null | null | webinar_1/Lesson 1.ipynb | superbe/KagglePlatform | 970400086b5886629987d03e5229cc736523e53a | [
"MIT"
] | null | null | null | 21.027778 | 410 | 0.56605 | [
[
[
"# Спортивный анализ данных. Платформа Kaggle",
"_____no_output_____"
],
[
"## Урок 1. Введение в спортивный анализ данных, Exploration Data Analysis",
"_____no_output_____"
],
[
"### Домашняя работа к уроку 1",
"_____no_output_____"
],
[
"Ссылка на наборы данных: https://drive.google.com/file/d/1j8zuKbI-PW5qKwhybP4S0EtugbPqmeyX/view?usp=sharing",
"_____no_output_____"
],
[
"#### Задание 1",
"_____no_output_____"
],
[
"Сделать базовый анализ данных: вывести размерность датасетов, посчитать базовые статистики, выполнить анализ пропусков, сделать выводы.",
"_____no_output_____"
]
],
[
[
"# В работе. Как-то все наложилось. Надеюсь на этой неделе все нагнать. \n# Посмотрел. Очень серьезный курс, темы сложные. Зря его вынесли во вне четверти.",
"_____no_output_____"
]
],
[
[
"#### Задание 2",
"_____no_output_____"
],
[
"Сделать базовый анализ целевой переменной, сделать выводы;",
"_____no_output_____"
]
],
[
[
"# В работе",
"_____no_output_____"
]
],
[
[
"#### Задание 3",
"_____no_output_____"
],
[
"Построить распределение признаков в зависимости от значения целевой переменной и распределение признаков для обучающей и тестовой выборки (если машина не позволяет построить распределение для всех признаков, то выполнить задание для признаков var_0, var_1, var_2, var_5, var_9, var_10, var_13, var_20, var_26, var_40, var_55, var_80, var_106, var_109, var_139, var_175, var_184, var_196), сделать выводы;",
"_____no_output_____"
]
],
[
[
"# В работе",
"_____no_output_____"
]
],
[
[
"#### Задание 4",
"_____no_output_____"
],
[
"Построить распределение основных статистики признаков (среднее, стандартное отклонение) в разрезе целевой переменной и распределение основных статистик обучающей и тестовой выборки, сделать выводы;",
"_____no_output_____"
]
],
[
[
"# В работе",
"_____no_output_____"
]
],
[
[
"#### Задание 5",
"_____no_output_____"
],
[
"Построить распределение коэффициентов корреляции между признаками. Есть ли зависимость между признаками (будем считать, что связь между признаками отсутствует, если коэффициент корреляции < 0.2)?",
"_____no_output_____"
]
],
[
[
"# В работе",
"_____no_output_____"
]
],
[
[
"#### Задание 6",
"_____no_output_____"
],
[
"Выявить 10 признаков, которые обладают наибольшей нелинейной связью с целевой переменной.",
"_____no_output_____"
]
],
[
[
"# В работе",
"_____no_output_____"
]
],
[
[
"#### Задание 7",
"_____no_output_____"
],
[
"Провести анализ идентичности распределения признаков на обучающей и тестовой выборках, сделать выводы.",
"_____no_output_____"
]
],
[
[
"# В работе",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
d0492745b3161f4e4f9e90fca9c2eabf33874f02 | 2,231 | ipynb | Jupyter Notebook | Euler 206 - Concealed square.ipynb | Radcliffe/project-euler | 5eb0c56e2bd523f3dc5329adb2fbbaf657e7fa38 | [
"MIT"
] | 6 | 2016-05-11T18:55:35.000Z | 2019-12-27T21:38:43.000Z | Euler 206 - Concealed square.ipynb | Radcliffe/project-euler | 5eb0c56e2bd523f3dc5329adb2fbbaf657e7fa38 | [
"MIT"
] | null | null | null | Euler 206 - Concealed square.ipynb | Radcliffe/project-euler | 5eb0c56e2bd523f3dc5329adb2fbbaf657e7fa38 | [
"MIT"
] | null | null | null | 25.643678 | 160 | 0.532048 | [
[
[
"Euler Problem 206\n=================\n\nFind the unique positive integer whose square has the form \n 1_2_3_4_5_6_7_8_9_0,\n\nwhere each \"_\" is a single digit.",
"_____no_output_____"
]
],
[
[
"from itertools import product\nfor a, b, c, d in product(range(10), repeat=4):\n N = 10203040596979899\n N += a*10**15 + b*10**13 + c*10**11 + d*10**9\n sqrtN = int(N**0.5)\n s = str(sqrtN**2)\n if s[0:17:2] == '123456789':\n print(sqrtN * 10)\n break",
"1389019170\n"
]
],
[
[
"**Explanation:** Note that if a square is divisible by 10 then it is also divisible by 100, so we must place 0 in the last blank space. Dividing by 100\nyields another square, which has the form\n 1_2_3_4_5_6_7_8_9.\n \n\nWe insert digits a, b, c, d into the first 4 blank spaces:\n\n 1a2b3c4d5_6_7_8_9\n \nThere is at most one way to fill in the remaining blanks to obtain a square,\nbecause the difference between two distinct 17-digit squares is at least\n$(10^8 + 1)^2 - (10^8)^2 = 2\\times 10^8 + 1$, so the first nine digits cannot\nagree.\n\nTo find the candidate square root, we fill in the remaining blanks with 9s,\ncompute the square root, and round down to the nearest integer.\n\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d0492b529b8697b94ce0f4e426f860c38e1674ef | 133,316 | ipynb | Jupyter Notebook | S06 - DS in the Real World/BLU15 - Model CSI/BLU15 - Learning Unit - Model CSI.ipynb | LDSSA/batch4-students | c0547ee0cf10645a0244336c976b304cff2f2000 | [
"MIT"
] | 19 | 2020-06-10T09:24:18.000Z | 2022-01-25T15:19:29.000Z | S06 - DS in the Real World/BLU15 - Model CSI/BLU15 - Learning Unit - Model CSI.ipynb | LDSSA/batch4-students | c0547ee0cf10645a0244336c976b304cff2f2000 | [
"MIT"
] | 25 | 2020-05-16T14:25:41.000Z | 2022-03-12T00:41:55.000Z | S06 - DS in the Real World/BLU15 - Model CSI/BLU15 - Learning Unit - Model CSI.ipynb | LDSSA/batch4-students | c0547ee0cf10645a0244336c976b304cff2f2000 | [
"MIT"
] | 9 | 2020-08-04T22:08:14.000Z | 2021-12-16T17:24:30.000Z | 37.291189 | 18,044 | 0.487728 | [
[
[
"# BLU15 - Model CSI",
"_____no_output_____"
],
[
"## Intro:\n\nIt often happens that your data distribution changes with time. \n\nMore than that, sometimes you don't know how a model was trained and what was the original training data. \n\nIn this learning unit we're going to try to identify whether an existing model meets our expectations and redeploy it. ",
"_____no_output_____"
],
[
"## Problem statement:\nAs an example, we're going to use the same problem that you met in the last BLU. ",
"_____no_output_____"
],
[
"You're already familiar with the problem, but just as a reminder:",
"_____no_output_____"
],
[
"> The police department has received lots of complaints about its stop and search policy. Every time a car is stopped, the police officers have to decide whether or not to search the car for contraband. According to critics, these searches have a bias against people of certain backgrounds.",
"_____no_output_____"
],
[
"You got a model from your client, and **here is the model's description:**\n\n> It's a LightGBM model (LGBMClassifier) trained on the following features:\n> - Department Name\n> - InterventionLocationName\n> - InterventionReasonCode\n> - ReportingOfficerIdentificationID\n> - ResidentIndicator\n> - SearchAuthorizationCode\n> - StatuteReason\n> - SubjectAge\n> - SubjectEthnicityCode\n> - SubjectRaceCode\n> - SubjectSexCode\n> - TownResidentIndicator\n\n> All the categorical feature were one-hot encoded. The only numerical feature (SubjectAge) was not changed. The rows that contain rare categorical features (the ones that appear less than N times in the dataset) were removed. Check the original_model.ipynb notebook for more details.",
"_____no_output_____"
],
[
"P.S., if you never heard about lightgbm, XGboost and other gradient boosting, I highly recommend you to read this [article](https://mlcourse.ai/articles/topic10-boosting/) or watch these videos: [part1](https://www.youtube.com/watch?v=g0ZOtzZqdqk), [part2](https://www.youtube.com/watch?v=V5158Oug4W8)\n\nIt's not essential for this BLU, so you might leave this link as a desert after you go through the learning materials and solve the exercises, but these are very good models you can use later on, so I suggest reading about them. ",
"_____no_output_____"
],
[
"**Here are the requirements that the police department created:**\n\n> - A minimum 50% success rate for searches (when a car is searched, it should be at least 50% likely that contraband is found)\n> - No police sub-department should have a discrepancy bigger than 5% between the search success rate between protected classes (race, ethnicity, gender) \n> - The largest possible amount of contraband found, given the constraints above. ",
"_____no_output_____"
],
[
"**And here is the description of how the current model succeeds with the requirements:**\n\n- precision score = 50%\n- recall = 89.3%\n- roc_auc_score for the probability predictions = 82.7%\n\nThe precision and recall above are met for probability predictions with a specified threshold equal to **0.21073452797732833**\n\nIt's not said whether the second requirement is met, and as it was not met in the previous learning unit, let's ignore it for now. ",
"_____no_output_____"
],
[
"## Model diagnosing:",
"_____no_output_____"
],
[
"\nLet's firstly try to compare these models to the ones that we created in the previous BLU:\n\n\n\n| Model | Baseline | Second iteration | New model | Best model |\n|-------------------|---------|--------|--------|--------| \n| Requirement 1 - success rate | 0.53 | 0.38 | 0.5 | 1 |\n| Requirement 2 - global discrimination (race) | 0.105 | 0.11 | NaN | 1 |\n| Requirement 2 - global discrimination (sex) | 0.012 | 0.014 | NaN | 1 |\n| Requirement 2 - global discrimination (ethnicity) | 0.114 | 0.101 | NaN | 2 | \n| Requirement 2 - # department discrimination (race) | 27 | 17 | NaN | 2 |\n| Requirement 2 - # department discrimination (sex) | 19 | 23 | NaN | 1 |\n| Requirement 2 - # department discrimination (ethnicity) | 24 | NaN | 23 | 2 |\n| Requirement 3 - contraband found (Recall) | 0.65 | 0.76 | 0.893 | 3 | \n",
"_____no_output_____"
],
[
"As we can see, the last model has the exact required success rate (Requirement 1) as we need, and a very good Recall (Requirement 3).\n\nBut it might be risky to have such a specific threshold, as we might end up success rate < 0.5 really quickly. It might be a better idea to have a bigger threshold (e.g. 0.25), but let's see.",
"_____no_output_____"
],
[
"Let's imagine that the model was trained long time ago.\n\nAnd now you're in the future trying to evaluate the model, because things might have changed. Data distribution is not always the same, so something that used to work even a year ago could be completely wrong today. \n\nEspecially in 2020!",
"_____no_output_____"
],
[
"<img src=\"media/future_2020.jpg\" width=400/>",
"_____no_output_____"
],
[
"First of all, let's start the server which is running this model.\n\nOpen the shell, ",
"_____no_output_____"
],
[
"```sh\n\npython protected_server.py\n\n\n```",
"_____no_output_____"
],
[
"And read a csv files with new observations from 2020:",
"_____no_output_____"
]
],
[
[
"import joblib\nimport pandas as pd\nimport json\nimport joblib\nimport pickle\nfrom sklearn.metrics import precision_score, recall_score, roc_auc_score\nfrom sklearn.metrics import confusion_matrix\nimport requests\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom sklearn.metrics import precision_recall_curve\n\n%matplotlib inline",
"_____no_output_____"
],
[
"df = pd.read_csv('./data/new_observations.csv')",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
]
],
[
[
"Let's start from sending all those requests and comparing the model prediction results with the target values.",
"_____no_output_____"
],
[
"The model is already prepared to convert our observations to the format its expecting, the only thing we need to change is making department and intervention location names lowercase, and we're good to extract fields from the dataframe and put them to the post request.",
"_____no_output_____"
]
],
[
[
"# lowercaes departments and location names\ndf['Department Name'] = df['Department Name'].apply(lambda x: str(x).lower())\ndf['InterventionLocationName'] = df['InterventionLocationName'].apply(lambda x: str(x).lower())",
"_____no_output_____"
],
[
"url = \"http://127.0.0.1:5000/predict\"\nheaders = {'Content-Type': 'application/json'}",
"_____no_output_____"
],
[
"def send_request(index: int, obs: dict, url: str, headers: dict):\n observation = {\n \"id\": index,\n \"observation\": {\n \"Department Name\": obs[\"Department Name\"],\n \"InterventionLocationName\": obs[\"InterventionLocationName\"],\n \"InterventionReasonCode\": obs[\"InterventionReasonCode\"],\n \"ReportingOfficerIdentificationID\": obs[\"ReportingOfficerIdentificationID\"],\n \"ResidentIndicator\": obs[\"ResidentIndicator\"],\n \"SearchAuthorizationCode\": obs[\"SearchAuthorizationCode\"],\n \"StatuteReason\": obs[\"StatuteReason\"],\n \"SubjectAge\": obs[\"SubjectAge\"],\n \"SubjectEthnicityCode\": obs[\"SubjectEthnicityCode\"],\n \"SubjectRaceCode\": obs[\"SubjectRaceCode\"],\n \"SubjectSexCode\": obs[\"SubjectSexCode\"],\n \"TownResidentIndicator\": obs[\"TownResidentIndicator\"]\n }\n }\n\n r = requests.post(url, data=json.dumps(observation), headers=headers)\n result = json.loads(r.text)\n return result",
"_____no_output_____"
],
[
"responses = [send_request(i, obs, url, headers) for i, obs in df.iterrows()]",
"_____no_output_____"
],
[
"print(responses[0])",
"{'prediction': False, 'proba': 0.20558802427270148}\n"
],
[
"df['proba'] = [r['proba'] for r in responses]\nthreshold = 0.21073452797732833\n# we're going to use the threshold we got from the client\ndf['prediction'] = [1 if p >= threshold else 0 for p in df['proba']]",
"_____no_output_____"
]
],
[
[
"**NOTE:** We could also load the model and make predictions locally (without using the api), but:\n\n1. I wanted to show you how you might send requests in a similar situation\n2. If you have a running API and some model file, you always need to understand how the API works (if it makes any kind of data preprocessing), which might sometimes be complicated, and if you're trying to analyze the model running in production, you still need to make sure that the local predictions you do are equal to the one that the production api does.",
"_____no_output_____"
]
],
[
[
"confusion_matrix(df['ContrabandIndicator'], df['prediction'])",
"_____no_output_____"
]
],
[
[
"If you're not familiar with confusion matrixes, **here is an explanation of the values:**",
"_____no_output_____"
],
[
"<img src=\"./media/confusion_matrix.jpg\" alt=\"drawing\" width=\"500\"/>\n",
"_____no_output_____"
],
[
"\nThese values don't seem to be good. Let's once again take a look on the client's requirements and see if we still meet them:",
"_____no_output_____"
],
[
"> A minimum 50% success rate for searches (when a car is searched, it should be at least 50% likely that contraband is found)\n",
"_____no_output_____"
]
],
[
[
"def verify_success_rate_above(y_true, y_pred, min_success_rate=0.5):\n \"\"\"\n Verifies the success rate on a test set is above a provided minimum\n\n \n \"\"\"\n \n precision = precision_score(y_true, y_pred, pos_label=True)\n is_satisfied = (precision >= min_success_rate)\n \n return is_satisfied, precision\n",
"_____no_output_____"
],
[
"verify_success_rate_above(df['ContrabandIndicator'], df['prediction'], 0.5)",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
],
[
"> The largest possible amount of contraband found, given the constraints above.\n\nAs the client says, their model recall was 0.893. And what now?",
"_____no_output_____"
]
],
[
[
"def verify_amount_found(y_true, y_pred):\n \"\"\"\n Verifies the amout of contraband found in the test dataset - a.k.a the recall in our test set\n \"\"\"\n \n recall = recall_score(y_true, y_pred, pos_label=True) \n return recall\n",
"_____no_output_____"
],
[
"verify_amount_found(df['ContrabandIndicator'], df['prediction'])",
"_____no_output_____"
]
],
[
[
"<img src=\"./media/no_please_2.jpg\" alt=\"drawing\" width=\"500\"/>\n",
"_____no_output_____"
],
[
"Okay, relax, it happens. Let's start from checking different thresholds. Maybe the selected threshold was to specific and doesn't work anymore. \nWhat about 0.25?",
"_____no_output_____"
]
],
[
[
"threshold = 0.25\ndf['prediction'] = [1 if p >= threshold else 0 for p in df['proba']]",
"_____no_output_____"
],
[
"verify_success_rate_above(df['ContrabandIndicator'], df['prediction'], 0.5)",
"_____no_output_____"
],
[
"verify_amount_found(df['ContrabandIndicator'], df['prediction'])",
"_____no_output_____"
]
],
[
[
"<img src=\"./media/poker.jpg\" alt=\"drawing\" width=\"200\"/>",
"_____no_output_____"
],
[
"Okay, let's try the same technique to identify the best threshold as they originally did. Maybe we find something good enough.",
"_____no_output_____"
],
[
"It's not a good idea to verify such things on the test data, but we're going to use it just to confirm the model's performance, not to select the threshold.",
"_____no_output_____"
]
],
[
[
"precision, recall, thresholds = precision_recall_curve(df['ContrabandIndicator'], df['proba'])",
"_____no_output_____"
],
[
"precision = precision[:-1]\nrecall = recall[:-1]",
"_____no_output_____"
],
[
"fig=plt.figure()\nax1 = plt.subplot(211)\nax2 = plt.subplot(212)\nax1.hlines(y=0.5,xmin=0, xmax=1, colors='red')\nax1.plot(thresholds,precision)\nax2.plot(thresholds,recall)\nax1.get_shared_x_axes().join(ax1, ax2)\nax1.set_xticklabels([])\nplt.xlabel('Threshold')\nax1.set_title('Precision')\nax2.set_title('Recall')\nplt.show()",
"_____no_output_____"
]
],
[
[
"So what do we see? There is some threshold value (around 0.6) that gives us precision >= 0.5. \n\nBut the threshold is so big, that the recall at this point is really-really low. \n\nLet's calculate the exact values:",
"_____no_output_____"
]
],
[
[
"min_index = [i for i, prec in enumerate(precision) if prec >= 0.5][0]\nprint(min_index)",
"1474\n"
],
[
"thresholds[min_index]",
"_____no_output_____"
],
[
"precision[min_index]",
"_____no_output_____"
],
[
"recall[min_index]",
"_____no_output_____"
]
],
[
[
"<img src=\"./media/incredible.jpg\" alt=\"drawing\" width=\"400\"/>",
"_____no_output_____"
],
[
"\nBefore we move on, we need to understand why this happens, so that we can decide what kind of action to perform.",
"_____no_output_____"
],
[
"Let's try to analyze the changes in data and discuss different things we might want to do.",
"_____no_output_____"
]
],
[
[
"old_df = pd.read_csv('./data/train_searched.csv')",
"_____no_output_____"
],
[
"old_df.head()",
"_____no_output_____"
]
],
[
[
"We're going to apply the same changes to the dataset as in the original model notebook unit to understand what was the original data like and how the current dataset differs.",
"_____no_output_____"
]
],
[
[
"old_df = old_df[(old_df['VehicleSearchedIndicator']==True)]",
"_____no_output_____"
],
[
"# lowercaes departments and location names\nold_df['Department Name'] = old_df['Department Name'].apply(lambda x: str(x).lower())\nold_df['InterventionLocationName'] = old_df['InterventionLocationName'].apply(lambda x: str(x).lower())",
"_____no_output_____"
],
[
"train_features = old_df.columns.drop(['VehicleSearchedIndicator', 'ContrabandIndicator'])\ncategorical_features = train_features.drop(['InterventionDateTime', 'SubjectAge'])\nnumerical_features = ['SubjectAge']\ntarget = 'ContrabandIndicator'",
"_____no_output_____"
],
[
"# I'm going to remove less common features. \n# Let's create a dictionary with the minimum required number of appearences\nmin_frequency = {\n \"Department Name\": 50,\n \"InterventionLocationName\": 50,\n \"ReportingOfficerIdentificationID\": 30,\n \"StatuteReason\": 10\n}",
"_____no_output_____"
],
[
"def filter_values(df: pd.DataFrame, column_name: str, threshold: int):\n value_counts = df[column_name].value_counts()\n to_keep = value_counts[value_counts > threshold].index\n filtered = df[df[column_name].isin(to_keep)]\n return filtered",
"_____no_output_____"
],
[
"for feature, threshold in min_frequency.items():\n old_df = filter_values(old_df, feature, threshold)",
"_____no_output_____"
],
[
"old_df.shape",
"_____no_output_____"
],
[
"old_df.head()",
"_____no_output_____"
],
[
"old_df['ContrabandIndicator'].value_counts(normalize=True)",
"_____no_output_____"
],
[
"df['ContrabandIndicator'].value_counts(normalize=True)",
"_____no_output_____"
]
],
[
[
"Looks like we got a bit more contraband now, and it's already a good sign:\n\nif the training data had a different target feature distribution than the test set, the model's predictions might have a different distribution as well. It's a good practice to have the same target feature distribution both in training and test sets. \n\nLet's investigate further",
"_____no_output_____"
]
],
[
[
"new_department_names = df['Department Name'].unique()\nold_department_names = old_df['Department Name'].unique()\n\nunknown_departments = [department for department in new_department_names if department not in old_department_names]\nlen(unknown_departments)",
"_____no_output_____"
],
[
"df[df['Department Name'].isin(unknown_departments)].shape",
"_____no_output_____"
]
],
[
[
"So we have 10 departments that the original model was not trained on, but they are only 23 rows from the test set. \n\nLet's repeat the same thing for the Intervention Location names",
"_____no_output_____"
]
],
[
[
"new_location_names = df['InterventionLocationName'].unique()\nold_location_names = old_df['InterventionLocationName'].unique()\n\nunknown_locations = [location for location in new_location_names if location not in old_location_names]\nlen(unknown_locations)",
"_____no_output_____"
],
[
"df[df['InterventionLocationName'].isin(unknown_locations)].shape[0]",
"_____no_output_____"
],
[
"print('unknown locations: ', df[df['InterventionLocationName'].isin(unknown_locations)].shape[0] * 100 / df.shape[0], '%')",
"unknown locations: 5.3 %\n"
]
],
[
[
"Alright, a bit more of unknown locations. \n\nWe don't know if the feature was important for the model, so these 5.3% of unknown locations might be important or not.\n\nBut it's worth keeping it in mind. \n\n**Here are a few ideas of what we could try to do:**\n\n1. Reanalyze the filtered locations, e.g. filter more rare ones.\n2. Create a new category for the rare locations\n3. Analyze the unknown locations for containing typos\n\nLet's go further and take a look on the relation between department names and the number of contrabands they find.\n\nWe're going to select the most common department names, and then see the percentage of contraband indicator in each one for the training and test sets",
"_____no_output_____"
]
],
[
[
"common_departments = df['Department Name'].value_counts().head(20).index",
"_____no_output_____"
],
[
"departments_new = df[df['Department Name'].isin(common_departments)]\ndepartments_old = old_df[old_df['Department Name'].isin(common_departments)]",
"_____no_output_____"
],
[
"pd.crosstab(departments_new['ContrabandIndicator'], departments_new['Department Name'], normalize=\"columns\")",
"_____no_output_____"
],
[
"pd.crosstab(departments_old['ContrabandIndicator'], departments_old['Department Name'], normalize=\"columns\")",
"_____no_output_____"
]
],
[
[
"We can clearly see that some departments got a huge difference in the contraband indicator.\n\nE.g. Bridgeport used to have 93% of False contrabands, and now has only 62%.\n\nSimilar situation with Danbury and New Haven. \n\nWhy? Hard to say. There are really a lot of variables here. Maybe the departments got instructed on how to look for contraband. \n\nBut we might need to retrain the model. \n\nLet's just finish reviewing other columns.",
"_____no_output_____"
]
],
[
[
"common_location = df['InterventionLocationName'].value_counts().head(20).index",
"_____no_output_____"
],
[
"locations_new = df[df['InterventionLocationName'].isin(common_location)]\nlocations_old = old_df[old_df['InterventionLocationName'].isin(common_location)]",
"_____no_output_____"
],
[
"pd.crosstab(locations_new['ContrabandIndicator'], locations_new['InterventionLocationName'], normalize=\"columns\")",
"_____no_output_____"
],
[
"pd.crosstab(locations_old['ContrabandIndicator'], locations_old['InterventionLocationName'], normalize=\"columns\")",
"_____no_output_____"
]
],
[
[
"What do we see? First of all, the InterventionLocationName and the Department Name are often same.\n\nIt sounds pretty logic, as probably policeman's usually work in the area of their department. But we could try to create a feature saying whether InterventionLocationName is equal to the Department Name.\n\nOr maybe we could just get rid of one of them, if all the values are equal. \n\nWhat else?\n\nWell, There are similar changes in the Contraband distribution as in Department Name case.\n\nLet's move on:",
"_____no_output_____"
]
],
[
[
"pd.crosstab(df['ContrabandIndicator'], df['InterventionReasonCode'], normalize=\"columns\")",
"_____no_output_____"
],
[
"pd.crosstab(old_df['ContrabandIndicator'], old_df['InterventionReasonCode'], normalize=\"columns\")",
"_____no_output_____"
]
],
[
[
"There are some small changes, but they don't seem to be significant. \n\nEspecially that all the 3 values have around 33% of Contraband.\n\nTime for officers:",
"_____no_output_____"
]
],
[
[
"df['ReportingOfficerIdentificationID'].value_counts()",
"_____no_output_____"
],
[
"filter_values(df, 'ReportingOfficerIdentificationID', 2)['ReportingOfficerIdentificationID'].nunique()",
"_____no_output_____"
]
],
[
[
"Well, looks like there are a lot of unique values for the officer id (1166 for 2000 records), and there are not so many common ones (only 206 officers have more than 2 rows in the dataset) so it doesn't make much sense to analyze it.",
"_____no_output_____"
],
[
"Let's quickly go throw the rest of the columns:",
"_____no_output_____"
]
],
[
[
"df.columns",
"_____no_output_____"
],
[
"rest = ['ResidentIndicator', 'SearchAuthorizationCode',\n 'StatuteReason', 'SubjectEthnicityCode',\n 'SubjectRaceCode', 'SubjectSexCode','TownResidentIndicator']\n\nfor col in rest:\n display(pd.crosstab(df['ContrabandIndicator'], df[col], normalize=\"columns\"))\n display(pd.crosstab(old_df['ContrabandIndicator'], old_df[col], normalize=\"columns\"))",
"_____no_output_____"
]
],
[
[
"We see that all the columns got changes, but they don't seem to be so significant as in the Departments cases.\n\nAnyway, it seems like we need to retrain the model.",
"_____no_output_____"
],
[
"<img src=\"./media/retrain.jpg\" alt=\"drawing\" width=\"400\"/>",
"_____no_output_____"
],
[
"Retraining a model is always a decision we need to think about.\n\nWas this change in data constant, temporary or seasonal?\n\nIn other words, do we expect the data distribution to stay as it is? To change back after Covid? To change from season to season? \n\n**Depending on that, we could retrain the model differently:**\n\n- **If it's a seasonality**, we might want to add features like season or month and train the same model to predict differently depending on the season. We could also investigate time-series classification algorithms.\n\n- **If it's something that is going to change back**, we might either train a new model for this particular period in case the current data distrubution changes were temporary. Otherwise, if we expect the data distribution change here and back from time to time (and we know these periods in advance), we could create a new feature that would help model understand which period it is.\n\n> E.g. if we had a task of predicting beer consumption and had a city that has a lot of football matches, we might add a feature like **football_championship** and make the model predict differently for this occasions. \n\n- **If the data distribution has simply changed and we know that it's never going to come back**, we can simply retrain the model.\n\n> But in some cases we have no idea why some changes appeared (e.g. in this case of departments having more contraband).\n\n- In this case it might be a good idea to train a new model on the new datast and create some monitoring for these features distribution, so we could react when things change. \n\n> So, in our case we don't know what was the reason of data distribution changes, so we'd like to train a model on the new dataset. \n\n> The only thing is the size of the dataset. Original dataset had around 50k rows, and our new set has only 2000. It's not enough to train a good model, so this time we're going to combine both the datasets and add a new feature helping model to distinguish between them. If we had more data, it would be probably better to train a completely new model.",
"_____no_output_____"
],
[
"And we're done!",
"_____no_output_____"
],
[
"<img src=\"./media/end.jpg\" alt=\"drawing\" width=\"400\"/>\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
d049378c37ee1bb972bedfc1e398171deb2a0767 | 666 | ipynb | Jupyter Notebook | R/KLD_plot.ipynb | jeiros/Jupyter_notebooks | 4ae24613887101833aea5c89250ac39aa5d0a6ff | [
"MIT"
] | null | null | null | R/KLD_plot.ipynb | jeiros/Jupyter_notebooks | 4ae24613887101833aea5c89250ac39aa5d0a6ff | [
"MIT"
] | 1 | 2018-04-11T10:53:34.000Z | 2018-04-11T10:53:34.000Z | R/KLD_plot.ipynb | jeiros/Jupyter_notebooks | 4ae24613887101833aea5c89250ac39aa5d0a6ff | [
"MIT"
] | null | null | null | 15.488372 | 33 | 0.492492 | [
[
[
"library(ggplot2)\nlibrary(reshape)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code"
]
] |
d0493b41602939e995501a1b37768244f9e3555a | 8,598 | ipynb | Jupyter Notebook | 09A - Reviewing Automated Machine Learning Explanations.ipynb | LucaSavio/DP100 | 0ad84abb01e5541c15c86be6f4f39f10108f455f | [
"MIT"
] | null | null | null | 09A - Reviewing Automated Machine Learning Explanations.ipynb | LucaSavio/DP100 | 0ad84abb01e5541c15c86be6f4f39f10108f455f | [
"MIT"
] | null | null | null | 09A - Reviewing Automated Machine Learning Explanations.ipynb | LucaSavio/DP100 | 0ad84abb01e5541c15c86be6f4f39f10108f455f | [
"MIT"
] | null | null | null | 43.20603 | 537 | 0.619563 | [
[
[
"# Reviewing Automated Machine Learning Explanations\n\nAs machine learning becomes more and more and more prevelant, the predictions made by models have greater influence over many aspects of our society. For example, machine learning models are an increasingly significant factor in how banks decide to grant loans or doctors prioritise treatments. The ability to interpret and explain models is increasingly important, so that the rationale for the predictions made by machine learning models can be explained and justified, and any inadvertant bias in the model can be identified.\n\nWhen you use automated machine learning to train a model, you have the option to generate explanations of feature importance that quantify the extent to which each feature influences label prediction. In this lab, you'll explore the explanations generated by an automated machine learning experiment.",
"_____no_output_____"
],
[
"## Connect to Your Workspace\n\nThe first thing you need to do is to connect to your workspace using the Azure ML SDK.\n\n> **Note**: If the authenticated session with your Azure subscription has expired since you completed the previous exercise, you'll be prompted to reauthenticate.",
"_____no_output_____"
]
],
[
[
"import azureml.core\nfrom azureml.core import Workspace\n\n# Load the workspace from the saved config file\nws = Workspace.from_config()\nprint('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name))",
"_____no_output_____"
]
],
[
[
"## Run an Automated Machine Learning Experiment\n\nTo reduce time in this lab, you'll run an automated machine learning experiment with only three iterations.\n\nNote that the **model_explainability** configuration option is set to **True**.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nfrom azureml.train.automl import AutoMLConfig\nfrom azureml.core.experiment import Experiment\nfrom azureml.widgets import RunDetails\nfrom azureml.core.compute import ComputeTarget, AmlCompute\nfrom azureml.core.compute_target import ComputeTargetException\nfrom azureml.core import Dataset\n\ncluster_name = \"gmalc-aml-clust\" # Change to your compute cluster name\n\n# Prepare data for training\ndefault_ds = ws.get_default_datastore()\nif 'diabetes dataset' not in ws.datasets:\n default_ds.upload_files(files=['./data/diabetes.csv', './data/diabetes2.csv'], # Upload the diabetes csv files in /data\n target_path='diabetes-data/', # Put it in a folder path in the datastore\n overwrite=True, # Replace existing files of the same name\n show_progress=True)\n\n #Create a tabular dataset from the path on the datastore (this may take a short while)\n tab_data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, 'diabetes-data/*.csv'))\n\n # Register the tabular dataset\n try:\n tab_data_set = tab_data_set.register(workspace=ws, \n name='diabetes dataset',\n description='diabetes data',\n tags = {'format':'CSV'},\n create_new_version=True)\n print('Dataset registered.')\n except Exception as ex:\n print(ex)\nelse:\n print('Dataset already registered.')\ntrain_data = ws.datasets.get(\"diabetes dataset\")\n\n# Configure Auto ML\nautoml_config = AutoMLConfig(name='Automated ML Experiment',\n task='classification',\n compute_target='local',\n enable_local_managed=True,\n training_data = train_data,\n n_cross_validations = 2,\n label_column_name='Diabetic',\n iterations=3,\n primary_metric = 'AUC_weighted',\n max_concurrent_iterations=3,\n featurization='off',\n model_explainability=True # Generate feature importance!\n )\n\n# Run the Auto ML experiment\nprint('Submitting Auto ML experiment...')\nautoml_experiment = Experiment(ws, 'diabetes_automl')\nautoml_run = automl_experiment.submit(automl_config)\nautoml_run.wait_for_completion(show_output=True)\nRunDetails(automl_run).show()",
"_____no_output_____"
]
],
[
[
"## View Feature Importance\n\nWhen the experiment has completed in the widget above, click the run that produced the best result to see its details. Then scroll to the bottom of the visualizations to see the relative feature importance.\n\nYou can also view feature importance for the best model produced by the experiment by using the **ExplanationClient** class:",
"_____no_output_____"
]
],
[
[
"from azureml.contrib.interpret.explanation.explanation_client import ExplanationClient\nfrom azureml.core.run import Run\n\n# Wait for the best model explanation run to complete\nmodel_explainability_run_id = automl_run.get_properties().get('ModelExplainRunId')\nprint(model_explainability_run_id)\nif model_explainability_run_id is not None:\n model_explainability_run = Run(experiment=automl_experiment, run_id=model_explainability_run_id)\n model_explainability_run.wait_for_completion(show_output=True)\n\n# Get the best model (2nd item in outputs)\nbest_run, fitted_model = automl_run.get_output()\n\n# Get the feature explanations\nclient = ExplanationClient.from_run(best_run)\nengineered_explanations = client.download_model_explanation()\nfeature_importances = engineered_explanations.get_feature_importance_dict()\n\n# Overall feature importance\nprint('Feature\\tImportance')\nfor key, value in feature_importances.items():\n print(key, '\\t', value)",
"_____no_output_____"
]
],
[
[
"## View the Model Explanation in Azure Machine Learning studio\n\nWith the experiment run completed, click the link in the widget to see the run in Azure Machine Learning studio, and view the **Explanations** tab. Then:\n\n1. Select the explainer that was created by the automated machine learning run.\n2. View the **Global Importance** chart, which shows the overall global feature importance.\n3. View the **Summary Importance** chart, which shows each data point from the test data in a *swarm*, *violin*, or *box* plot.\n4. Select an individual point to see the **Local Feature Importance** for the individual prediction for the selected data point.",
"_____no_output_____"
],
[
"> **More Information**: For more information Automated machine Learning, see the [Azure ML documentation](https://docs.microsoft.com/azure/machine-learning/how-to-machine-learning-interpretability-automl).",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
d049427ca86e751b8605f2eaebe3fce2bf11844c | 35,642 | ipynb | Jupyter Notebook | intro-to-pytorch/Part 4 - Fashion-MNIST (Solution).ipynb | sizigia/deep-learning-v2-pytorch | c1754af92ef9a6a4ac992ae7c0925e48c95bbc66 | [
"MIT"
] | null | null | null | intro-to-pytorch/Part 4 - Fashion-MNIST (Solution).ipynb | sizigia/deep-learning-v2-pytorch | c1754af92ef9a6a4ac992ae7c0925e48c95bbc66 | [
"MIT"
] | null | null | null | intro-to-pytorch/Part 4 - Fashion-MNIST (Solution).ipynb | sizigia/deep-learning-v2-pytorch | c1754af92ef9a6a4ac992ae7c0925e48c95bbc66 | [
"MIT"
] | null | null | null | 146.674897 | 24,056 | 0.883396 | [
[
[
"# Classifying Fashion-MNIST\n\nNow it's your turn to build and train a neural network. You'll be using the [Fashion-MNIST dataset](https://github.com/zalandoresearch/fashion-mnist), a drop-in replacement for the MNIST dataset. MNIST is actually quite trivial with neural networks where you can easily achieve better than 97% accuracy. Fashion-MNIST is a set of 28x28 greyscale images of clothes. It's more complex than MNIST, so it's a better representation of the actual performance of your network, and a better representation of datasets you'll use in the real world.\n\n<img src='assets/fashion-mnist-sprite.png' width=500px>\n\nIn this notebook, you'll build your own neural network. For the most part, you could just copy and paste the code from Part 3, but you wouldn't be learning. It's important for you to write the code yourself and get it to work. Feel free to consult the previous notebooks though as you work through this.\n\nFirst off, let's load the dataset through torchvision.",
"_____no_output_____"
]
],
[
[
"import torch\nfrom torchvision import datasets, transforms\nimport helper\n\n# Define a transform to normalize the data\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,))])\n# Download and load the training data\ntrainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)\n\n# Download and load the test data\ntestset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)",
"_____no_output_____"
]
],
[
[
"Here we can see one of the images.",
"_____no_output_____"
]
],
[
[
"image, label = next(iter(trainloader))\nhelper.imshow(image[0,:]);",
"_____no_output_____"
]
],
[
[
"## Building the network\n\nHere you should define your network. As with MNIST, each image is 28x28 which is a total of 784 pixels, and there are 10 classes. You should include at least one hidden layer. We suggest you use ReLU activations for the layers and to return the logits or log-softmax from the forward pass. It's up to you how many layers you add and the size of those layers.",
"_____no_output_____"
]
],
[
[
"from torch import nn, optim\nimport torch.nn.functional as F",
"_____no_output_____"
],
[
"# TODO: Define your network architecture here\nclass Classifier(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(784, 256)\n self.fc2 = nn.Linear(256, 128)\n self.fc3 = nn.Linear(128, 64)\n self.fc4 = nn.Linear(64, 10)\n \n def forward(self, x):\n # make sure input tensor is flattened\n x = x.view(x.shape[0], -1)\n \n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = F.log_softmax(self.fc4(x), dim=1)\n \n return x",
"_____no_output_____"
]
],
[
[
"# Train the network\n\nNow you should create your network and train it. First you'll want to define [the criterion](http://pytorch.org/docs/master/nn.html#loss-functions) (something like `nn.CrossEntropyLoss` or `nn.NLLLoss`) and [the optimizer](http://pytorch.org/docs/master/optim.html) (typically `optim.SGD` or `optim.Adam`).\n\nThen write the training code. Remember the training pass is a fairly straightforward process:\n\n* Make a forward pass through the network to get the logits \n* Use the logits to calculate the loss\n* Perform a backward pass through the network with `loss.backward()` to calculate the gradients\n* Take a step with the optimizer to update the weights\n\nBy adjusting the hyperparameters (hidden units, learning rate, etc), you should be able to get the training loss below 0.4.",
"_____no_output_____"
]
],
[
[
"# TODO: Create the network, define the criterion and optimizer\nmodel = Classifier()\ncriterion = nn.NLLLoss()\noptimizer = optim.Adam(model.parameters(), lr=0.003)",
"_____no_output_____"
],
[
"# TODO: Train the network here\nepochs = 5\n\nfor e in range(epochs):\n running_loss = 0\n for images, labels in trainloader:\n log_ps = model(images)\n loss = criterion(log_ps, labels)\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n running_loss += loss.item()\n else:\n print(f\"Training loss: {running_loss/len(trainloader)}\")",
"Training loss: 283.4510831311345\nTraining loss: 274.7842669263482\nTraining loss: 267.907463490963\nTraining loss: 258.2156918346882\nTraining loss: 251.79347000271082\n"
],
[
"%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport helper\n\n# Test out your network!\n\ndataiter = iter(testloader)\nimages, labels = dataiter.next()\nimg = images[1]\n\n# TODO: Calculate the class probabilities (softmax) for img\nps = torch.exp(model(img))\n\n# Plot the image and probabilities\nhelper.view_classify(img, ps, version='Fashion')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
d0494ee592bbab5eeb8080d6a8d0b74d77e5256f | 6,634 | ipynb | Jupyter Notebook | Decision_Algo.ipynb | hlal1/Bebop-Autonomous-Control | 9cfd5f7475d828fbc932baf70eaeccafcdf48970 | [
"MIT"
] | 2 | 2018-12-02T04:17:18.000Z | 2018-12-18T10:56:10.000Z | Decision_Algo.ipynb | hlal1/Bebop-Autonomous-Control | 9cfd5f7475d828fbc932baf70eaeccafcdf48970 | [
"MIT"
] | null | null | null | Decision_Algo.ipynb | hlal1/Bebop-Autonomous-Control | 9cfd5f7475d828fbc932baf70eaeccafcdf48970 | [
"MIT"
] | null | null | null | 30.431193 | 122 | 0.513717 | [
[
[
"import numpy as np\n#Load the predicted 9x12 array\n\n#1st pass\nim1=np.array([[4,4,4,4,4,4,4,4,4,4,4,4],\n [6,6,2,1,6,6,6,6,6,1,1,2],\n [6,6,6,1,1,6,6,6,6,1,1,2],\n [2,6,6,6,1,5,5,5,6,1,1,2],\n [5,6,6,6,5,5,5,5,5,1,5,5],\n [5,5,2,5,5,5,5,5,5,1,5,5],\n [5,5,2,5,5,5,5,5,5,6,5,5],\n [2,6,6,6,5,5,5,5,5,6,2,2],\n [2,6,6,6,6,6,6,2,2,6,2,2]])\n\n#zoomed into driveway\nim2=np.array([[2,2,2,1,1,1,2,6,6,6,6,6],\n [2,2,2,1,1,1,2,6,6,6,6,6],\n [2,2,2,1,1,1,2,6,6,6,6,6],\n [2,2,2,1,1,1,1,6,6,6,6,6],\n [2,2,2,6,1,1,1,6,6,6,6,6],\n [6,6,6,6,1,1,1,1,6,6,6,6],\n [6,6,6,6,6,1,1,1,6,6,6,6],\n [6,6,6,6,6,6,1,1,2,2,2,2],\n [6,6,6,6,6,6,6,1,5,5,5,5]])",
"_____no_output_____"
],
[
"#%%timeit\nfrom scipy.ndimage.measurements import label\nfrom scipy.ndimage.measurements import center_of_mass\n\nA=im1\n\n#Center of the 9x12 array\nimg_center=np.array([4,5.5])\n\n#Label all the driveways and roofs\ndriveway, num_driveway = label(A==1)\nroof, num_roof = label(A==5)\n\n#Save number of driveways into array\nd=np.arange(1,num_driveway+1)\nr=np.arange(1,num_roof+1)\n\n#Find the center of the all the driveways\ndriveway_center=center_of_mass(A,driveway,d)\nroof_center=center_of_mass(A,roof,r)\n\nprint(driveway_center)\n\n#Function to find the closest roof/driveway\ndef closest(list,img_center):\n closest=list[0]\n for c in list:\n if np.linalg.norm(c-img_center) < np.linalg.norm(closest-img_center):\n closest = c\n return closest\n\n#Find the closest roof to the center of the image\nclosest_roof=closest(roof_center,img_center)\n\n#Find the closest driveway to the closest roof\nclosest_driveway=closest(driveway_center,np.asarray(closest_roof))\nprint(closest_driveway)",
"[(2.0, 3.5), (2.625, 9.375)]\n(2.0, 3.5)\n"
],
[
"#Look for 3x3 driveway when we have reached a certain height (maybe 5m above ground)\na=im2\n\n#Sliding window function\ndef sliding_window_view(arr, shape):\n n = np.array(arr.shape) \n o = n - shape + 1 # output shape\n strides = arr.strides\n \n new_shape = np.concatenate((o, shape), axis=0)\n new_strides = np.concatenate((strides, strides), axis=0)\n return np.lib.stride_tricks.as_strided(arr ,new_shape, new_strides)\n\n#Creates a 7x10 ndarray with all the 3x3 submatrices\nsub_image=sliding_window_view(a,(3,3))\n\n#Empty list\ndriveway_list=[]\n\n#Loop through the 7x10 ndarray\nfor i in range(0,7):\n for j in range(i,10):\n #Calculate the total of the submatrices\n output=sum(sum(sub_image[i,j]))\n #if the output is 9, that means we have a 3x3 that is all driveway\n if output==9:\n #append the i(row) and j(column) to a list declared previously\n #we add 1 to the i and j to find the center of the 3x3\n driveway_list.append((i+1,j+1))\n \n#Call closest function to find driveway closest to house. \nclosest_driveway=closest(driveway_list,np.asarray(closest_roof))\nprint(closest_driveway)\n",
"(4, 5)\n"
],
[
"#Read altitude from csv & Ground Sampling\nimport csv\n\ndef GSD(alt):\n sensor_height=4.5 #mm\n sensor_width=6.17 #mm\n focal_length=1.8\n image_height=1080 #pixels\n image_width=1920 #pixels\n\n #GSD = (sensor height (mm) x flight height (m) x 100) / (focal lenght (mm) x image height (pixel))\n GSD_x=((sensor_width*altitude*100)/(focal_length*image_width))\n GSD_y=((sensor_height*altitude*100)/(focal_length*image_height))\n return (GSD_x,GSD_y)\n\n#Read alt.csv\nwith open('alt.csv', 'r') as csvfile:\n alt_list = [line.rstrip('\\n') for line in csvfile]\n \n#chose last value in alt_list \naltitude=int(alt_list[-1]) #in meters\nmultiplier=GSD(altitude) #cm/pixel\nmove_coordinates=np.asarray(closest_driveway)*np.asarray(multiplier)*40 #40 is the center of the 80x80 superpixel\nprint(closest_driveway)\nprint(multiplier)\nprint(move_coordinates)",
"(4, 5)\n(1.7853009259259258, 2.314814814814815)\n[285.64814815 462.96296296]\n"
],
[
"# Write to CSV\nimport csv\nwith open('coordinates.csv', 'a', newline='') as csvfile:\n filewriter = csv.writer(csvfile, delimiter=',')\n filewriter.writerow(move_coordinates)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
d049656ae68d26c53611e4ae327ba67aec94e227 | 100,465 | ipynb | Jupyter Notebook | EinsteinPy/Einstein Tensor symbolic calculation.ipynb | IsaacW4/Advanced-GR | 0351c368321b1a2375e2d328347f79b513be4c08 | [
"Apache-2.0"
] | null | null | null | EinsteinPy/Einstein Tensor symbolic calculation.ipynb | IsaacW4/Advanced-GR | 0351c368321b1a2375e2d328347f79b513be4c08 | [
"Apache-2.0"
] | null | null | null | EinsteinPy/Einstein Tensor symbolic calculation.ipynb | IsaacW4/Advanced-GR | 0351c368321b1a2375e2d328347f79b513be4c08 | [
"Apache-2.0"
] | null | null | null | 320.974441 | 77,605 | 0.774946 | [
[
[
"# Einstein Tensor calculations using Symbolic module",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pytest\nimport sympy\nfrom sympy import cos, simplify, sin, sinh, tensorcontraction\nfrom einsteinpy.symbolic import EinsteinTensor, MetricTensor, RicciScalar\n\nsympy.init_printing()",
"_____no_output_____"
]
],
[
[
"### Defining the Anti-de Sitter spacetime Metric",
"_____no_output_____"
]
],
[
[
"syms = sympy.symbols(\"t chi theta phi\")\nt, ch, th, ph = syms\nm = sympy.diag(-1, cos(t) ** 2, cos(t) ** 2 * sinh(ch) ** 2, cos(t) ** 2 * sinh(ch) ** 2 * sin(th) ** 2).tolist()\nmetric = MetricTensor(m, syms)",
"_____no_output_____"
]
],
[
[
"### Calculating the Einstein Tensor (with both indices covariant)",
"_____no_output_____"
]
],
[
[
"einst = EinsteinTensor.from_metric(metric)\neinst.tensor()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d04979de7fc1fcdf718e241fa454f4ea30020a53 | 9,715 | ipynb | Jupyter Notebook | crawler/ixi-crawl.ipynb | OpenNeuroLab/metasearch | 34ee02d439fa2ea74bded810c19018ce1819be9d | [
"Apache-2.0"
] | 15 | 2016-10-31T15:26:38.000Z | 2021-05-31T01:34:48.000Z | crawler/ixi-crawl.ipynb | OpenNeuroLab/metasearch | 34ee02d439fa2ea74bded810c19018ce1819be9d | [
"Apache-2.0"
] | 16 | 2016-11-18T05:23:34.000Z | 2019-12-18T01:41:58.000Z | crawler/ixi-crawl.ipynb | OpenNeuroLab/metasearch | 34ee02d439fa2ea74bded810c19018ce1819be9d | [
"Apache-2.0"
] | 9 | 2016-10-31T15:27:35.000Z | 2022-01-06T11:09:18.000Z | 41.517094 | 103 | 0.613073 | [
[
[
"import requests\nimport pandas as pd",
"_____no_output_____"
],
[
"csv_data = []\nnext_page_url = 'https://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/'\nwhile (next_page_url):\n req = requests.get(next_page_url)\n info = req.json()\n for val in info['data']:\n csv_data.append(dict(MRI=val['links']['download'], \n name=val['attributes']['name'].split('-')[0]))\n next_page_url = info['links']['next']\n print(next_page_url)",
"https://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=2\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=3\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=4\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=5\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=6\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=7\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=8\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=9\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=10\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=11\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=12\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=13\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=14\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=15\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=16\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=17\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=18\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=19\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=20\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=21\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=22\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=23\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=24\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=25\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=26\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=27\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=28\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=29\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=30\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=31\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=32\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=33\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=34\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=35\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=36\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=37\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=38\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=39\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=40\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=41\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=42\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=43\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=44\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=45\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=46\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=47\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=48\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=49\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=50\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=51\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=52\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=53\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=54\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=55\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=56\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=57\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=58\nhttps://api.osf.io/v2/nodes/5h7sv/files/osfstorage/5839b5d76c613b020d2942f4/?page=59\nNone\n"
],
[
"df = pd.DataFrame(csv_data)\ndf.head()",
"_____no_output_____"
],
[
"df.to_csv('IXI.csv', index=False, header=False)",
"_____no_output_____"
],
[
"len(info['data'])",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
d0498322e2a7a7dbd58e6b83897c296a497a78b5 | 58,943 | ipynb | Jupyter Notebook | 02_Filtering_&_Sorting/Chipotle/Exercises_with_solutions.ipynb | duongv/pandas_exercises | ed574a87a5d4c3756046f15124755bfe865c91da | [
"BSD-3-Clause"
] | null | null | null | 02_Filtering_&_Sorting/Chipotle/Exercises_with_solutions.ipynb | duongv/pandas_exercises | ed574a87a5d4c3756046f15124755bfe865c91da | [
"BSD-3-Clause"
] | null | null | null | 02_Filtering_&_Sorting/Chipotle/Exercises_with_solutions.ipynb | duongv/pandas_exercises | ed574a87a5d4c3756046f15124755bfe865c91da | [
"BSD-3-Clause"
] | null | null | null | 37.615188 | 135 | 0.36671 | [
[
[
"# Ex1 - Filtering and Sorting Data",
"_____no_output_____"
],
[
"This time we are going to pull data directly from the internet.\nSpecial thanks to: https://github.com/justmarkham for sharing the dataset and materials.\n\n### Step 1. Import the necessary libraries",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
]
],
[
[
"### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv). ",
"_____no_output_____"
],
[
"### Step 3. Assign it to a variable called chipo.",
"_____no_output_____"
]
],
[
[
"url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv'\n\nchipo = pd.read_csv(url, sep = '\\t')",
"_____no_output_____"
]
],
[
[
"### Step 4. How many products cost more than $10.00?",
"_____no_output_____"
]
],
[
[
"# clean the item_price column and transform it in a float\nprices = [float(value[1 : -1]) for value in chipo.item_price]\n\n# reassign the column with the cleaned prices\nchipo.item_price = prices\n\n# delete the duplicates in item_name and quantity\nchipo_filtered = chipo.drop_duplicates(['item_name','quantity'])\n\n# select only the products with quantity equals to 1\nchipo_one_prod = chipo_filtered[chipo_filtered.quantity == 1]\n\nchipo_one_prod[chipo_one_prod['item_price']>10].item_name.nunique()",
"_____no_output_____"
]
],
[
[
"### Step 5. What is the price of each item? \n###### print a data frame with only two columns item_name and item_price",
"_____no_output_____"
]
],
[
[
"# delete the duplicates in item_name and quantity\n# chipo_filtered = chipo.drop_duplicates(['item_name','quantity'])\nchipo[(chipo['item_name'] == 'Chicken Bowl') & (chipo['quantity'] == 1)]\n\n# select only the products with quantity equals to 1\n# chipo_one_prod = chipo_filtered[chipo_filtered.quantity == 1]\n\n# select only the item_name and item_price columns\n# price_per_item = chipo_one_prod[['item_name', 'item_price']]\n\n# sort the values from the most to less expensive\n# price_per_item.sort_values(by = \"item_price\", ascending = False).head(20)",
"_____no_output_____"
]
],
[
[
"### Step 6. Sort by the name of the item",
"_____no_output_____"
]
],
[
[
"chipo.item_name.sort_values()\n\n# OR\n\nchipo.sort_values(by = \"item_name\")",
"_____no_output_____"
]
],
[
[
"### Step 7. What was the quantity of the most expensive item ordered?",
"_____no_output_____"
]
],
[
[
"chipo.sort_values(by = \"item_price\", ascending = False).head(1)",
"_____no_output_____"
]
],
[
[
"### Step 8. How many times were a Veggie Salad Bowl ordered?",
"_____no_output_____"
]
],
[
[
"chipo_salad = chipo[chipo.item_name == \"Veggie Salad Bowl\"]\n\nlen(chipo_salad)",
"_____no_output_____"
]
],
[
[
"### Step 9. How many times people orderd more than one Canned Soda?",
"_____no_output_____"
]
],
[
[
"chipo_drink_steak_bowl = chipo[(chipo.item_name == \"Canned Soda\") & (chipo.quantity > 1)]\nlen(chipo_drink_steak_bowl)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0499c57669bb369689a5e6a5c197ac119a66490 | 44,822 | ipynb | Jupyter Notebook | in_progress/Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.ipynb | Steve-Hawk/nrpytutorial | 42d7450dba8bf43aa9c2d8f38f85f18803de69b7 | [
"BSD-2-Clause"
] | null | null | null | in_progress/Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.ipynb | Steve-Hawk/nrpytutorial | 42d7450dba8bf43aa9c2d8f38f85f18803de69b7 | [
"BSD-2-Clause"
] | null | null | null | in_progress/Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.ipynb | Steve-Hawk/nrpytutorial | 42d7450dba8bf43aa9c2d8f38f85f18803de69b7 | [
"BSD-2-Clause"
] | null | null | null | 44.687936 | 687 | 0.54567 | [
[
[
"<script async src=\"https://www.googletagmanager.com/gtag/js?id=UA-59152712-8\"></script>\n<script>\n window.dataLayer = window.dataLayer || [];\n function gtag(){dataLayer.push(arguments);}\n gtag('js', new Date());\n\n gtag('config', 'UA-59152712-8');\n</script>\n\n# Start-to-Finish Example: Unit Testing `GiRaFFE_NRPy`: $A_k$ to $B^i$\n\n## Author: Patrick Nelson\n\n## This module Validates the A-to-B routine for `GiRaFFE`.\n\n**Notebook Status:** <font color='green'><b>Validated</b></font>\n\n**Validation Notes:** This module will validate the routines in [Tutorial-GiRaFFE_NRPy-A2B](Tutorial-GiRaFFE_NRPy-A2B.ipynb).\n\n### NRPy+ Source Code for this module: \n* [GiRaFFE_NRPy/GiRaFFE_NRPy_A2B.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_A2B.py) [\\[**tutorial**\\]](Tutorial-GiRaFFE_NRPy-A2B.ipynb) Generates the driver to compute the magnetic field from the vector potential in arbitrary spactimes.\n\n## Introduction:\n\nThis notebook validates our A-to-B solver for use in `GiRaFFE_NRPy`. Because the original `GiRaFFE` used staggered grids and we do not, we can not trivially do a direct comparison to the old code. Instead, we will compare the numerical results with the expected analytic results. \n\nIt is, in general, good coding practice to unit test functions individually to verify that they produce the expected and intended output. Here, we expect our functions to produce the correct cross product in an arbitrary spacetime. To that end, we will choose functions that are easy to differentiate, but lack the symmetries that would trivialize the finite-difference algorithm. Higher-order polynomials are one such type of function. \n\nWhen this notebook is run, if `Use_Gaussian_Data` is `True`, the difference between the approximate and exact magnetic field will be output to text files that can be found in the same directory as this notebook. These will be read in in [Step 3](#convergence), and used there to confirm second order convergence of the algorithm. Otherwise, is `Use_Gaussian_Data` is `False`, polynomial data will be used and the significant digits of agreement between the approximate and exact magnetic field will be printed to the screen right after the code is run [here](#compile_run).\n",
"_____no_output_____"
],
[
"<a id='toc'></a>\n\n# Table of Contents\n$$\\label{toc}$$\n\nThis notebook is organized as follows\n\n1. [Step 1](#setup): Set up core functions and parameters for unit testing the A2B algorithm\n 1. [Step 1.a](#polynomial) Polynomial vector potential\n 1. [Step 1.b](#gaussian) Gaussian vector potential\n 1. [Step 1.c](#magnetic) The magnetic field $B^i$\n 1. [Step 1.d](#vector_potential) The vector potential $A_k$\n 1. [Step 1.e](#free_parameters) Set free parameters in the code\n1. [Step 2](#mainc): `A2B_unit_test.c`: The Main C Code\n 1. [Step 2.a](#compile_run): Compile and run the code\n1. [Step 3](#convergence): Code validation: Verify that relative error in numerical solution converges to zero at the expected order\n1. [Step 4](#latex_pdf_output): Output this notebook to $\\LaTeX$-formatted PDF file",
"_____no_output_____"
],
[
"<a id='setup'></a>\n\n# Step 1: Set up core functions and parameters for unit testing the A2B algorithm \\[Back to [top](#toc)\\]\n\n$$\\label{setup}$$\n\nWe'll start by appending the relevant paths to `sys.path` so that we can access sympy modules in other places. Then, we'll import NRPy+ core functionality and set up a directory in which to carry out our test. We must also set the desired finite differencing order.",
"_____no_output_____"
]
],
[
[
"import shutil, os, sys # Standard Python modules for multiplatform OS-level functions\n# First, we'll add the parent directory to the list of directories Python will check for modules.\nnrpy_dir_path = os.path.join(\"..\")\nif nrpy_dir_path not in sys.path:\n sys.path.append(nrpy_dir_path)\n\nfrom outputC import * # NRPy+: Core C code output module\nimport finite_difference as fin # NRPy+: Finite difference C code generation module\nimport NRPy_param_funcs as par # NRPy+: Parameter interface\nimport grid as gri # NRPy+: Functions having to do with numerical grids\nimport loop as lp # NRPy+: Generate C code loops\nimport indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support\nimport reference_metric as rfm # NRPy+: Reference metric support\nimport cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface\n\nout_dir = \"Validation/\"\ncmd.mkdir(out_dir)\n\nthismodule = \"Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B\"\n\n# Set the finite-differencing order to 2\npar.set_parval_from_str(\"finite_difference::FD_CENTDERIVS_ORDER\", 2)\n\nUse_Gaussian_Data = True\n\na,b,c,d,e,f,g,h,l,m,n,o,p,q,r,s,t,u = par.Cparameters(\"REAL\",thismodule,[\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"l\",\"m\",\"n\",\"o\",\"p\",\"q\",\"r\",\"s\",\"t\",\"u\"],1e300)\n\ngammaDD = ixp.register_gridfunctions_for_single_rank2(\"AUXEVOL\",\"gammaDD\",\"sym01\")\nAD = ixp.register_gridfunctions_for_single_rank1(\"EVOL\",\"AD\")\nBU = ixp.register_gridfunctions_for_single_rank1(\"AUXEVOL\",\"BU\")\n",
"_____no_output_____"
]
],
[
[
"<a id='polynomial'></a>\n\n## Step 1.a: Polynomial vector potential \\[Back to [top](#toc)\\]\n\n$$\\label{polynomial}$$\n\nWe will start with the simplest case - testing the second-order solver. In second-order finite-differencing, we use a three-point stencil that can exactly differentiate polynomials up to quadratic. So, we will use cubic functions three variables. For instance,\n\n\\begin{align}\nA_x &= ax^3 + by^3 + cz^3 + dy^2 + ez^2 + f \\\\\nA_y &= gx^3 + hy^3 + lz^3 + mx^2 + nz^2 + p \\\\\nA_z &= px^3 + qy^3 + rz^3 + sx^2 + ty^2 + u. \\\\\n\\end{align}\n\nIt will be much simpler to let NRPy+ handle most of this work. So, we will import the core functionality of NRPy+, build the expressions, and then output them using `outputC()`.",
"_____no_output_____"
]
],
[
[
"if not Use_Gaussian_Data:\n is_gaussian = par.Cparameters(\"int\",thismodule,\"is_gaussian\",0)\n\n par.set_parval_from_str(\"reference_metric::CoordSystem\",\"Cartesian\")\n rfm.reference_metric()\n x = rfm.xxCart[0]\n y = rfm.xxCart[1]\n z = rfm.xxCart[2]\n\n AD[0] = a*x**3 + b*y**3 + c*z**3 + d*y**2 + e*z**2 + f\n AD[1] = g*x**3 + h*y**3 + l*z**3 + m*x**2 + n*z**2 + o\n AD[2] = p*x**3 + q*y**3 + r*z**3 + s*x**2 + t*y**2 + u\n",
"_____no_output_____"
]
],
[
[
"<a id='gaussian'></a>\n\n## Step 1.b: Gaussian vector potential \\[Back to [top](#toc)\\]\n\n$$\\label{gaussian}$$\n\nAlternatively, we might want to use different functions for the vector potential. Here, we'll give some 3D Gaussians:\n\\begin{align}\nA_x &= a e^{-((x-b)^2+(y-c)^2+(z-d)^2)} \\\\\nA_y &= f e^{-((x-g)^2+(y-h)^2+(z-l)^2)} \\\\\nA_z &= m e^{-((x-n)^2+(y-o)^2+(z-p)^2)}, \\\\\n\\end{align}\nwhere $e$ is the natural number.",
"_____no_output_____"
]
],
[
[
"if Use_Gaussian_Data:\n is_gaussian = par.Cparameters(\"int\",thismodule,\"is_gaussian\",1)\n\n par.set_parval_from_str(\"reference_metric::CoordSystem\",\"Cartesian\")\n rfm.reference_metric()\n x = rfm.xxCart[0]\n y = rfm.xxCart[1]\n z = rfm.xxCart[2]\n\n AD[0] = a * sp.exp(-((x-b)**2 + (y-c)**2 + (z-d)**2))\n AD[1] = f * sp.exp(-((x-g)**2 + (y-h)**2 + (z-l)**2))\n AD[2] = m * sp.exp(-((x-n)**2 + (y-o)**2 + (z-p)**2))",
"_____no_output_____"
]
],
[
[
"<a id='magnetic'></a>\n\n## Step 1.c: The magnetic field $B^i$ \\[Back to [top](#toc)\\]\n$$\\label{magnetic}$$\n\nNext, we'll let NRPy+ compute derivatives analytically according to $$B^i = \\frac{[ijk]}{\\sqrt{\\gamma}} \\partial_j A_k.$$ Then we can carry out two separate tests to verify the numerical derivatives. First, we will verify that when we let the cubic terms be zero, the two calculations of $B^i$ agree to roundoff error. Second, we will verify that when we set the cubic terms, our error is dominated by trunction error that converges to zero at the expected rate. \n\nWe will need a sample metric $\\gamma_{ij}$ for $\\sqrt{\\gamma}$. We will thus write a function with the following arbitrary equations. \n\\begin{align}\n\\gamma_{xx} &= ax^3 + by^3 + cz^3 + dy^2 + ez^2 + 1 \\\\\n\\gamma_{yy} &= gx^3 + hy^3 + lz^3 + mx^2 + nz^2 + 1 \\\\\n\\gamma_{zz} &= px^3 + qy^3 + rz^3 + sx^2 + ty^2 + 1. \\\\\n\\gamma_{xy} &= \\frac{1}{10} \\exp\\left(-\\left((x-b)^2+(y-c)^2+(z-d)^2\\right)\\right) \\\\\n\\gamma_{xz} &= \\frac{1}{10} \\exp\\left(-\\left((x-g)^2+(y-h)^2+(z-l)^2\\right)\\right) \\\\\n\\gamma_{yz} &= \\frac{1}{10} \\exp\\left(-\\left((x-n)^2+(y-o)^2+(z-p)^2\\right)\\right), \\\\\n\\end{align}\n",
"_____no_output_____"
]
],
[
[
"par.set_parval_from_str(\"reference_metric::CoordSystem\",\"Cartesian\")\nrfm.reference_metric()\nx = rfm.xxCart[0]\ny = rfm.xxCart[1]\nz = rfm.xxCart[2]\n\ngammaDD[0][0] = a*x**3 + b*y**3 + c*z**3 + d*y**2 + e*z**2 + sp.sympify(1)\ngammaDD[1][1] = g*x**3 + h*y**3 + l*z**3 + m*x**2 + n*z**2 + sp.sympify(1)\ngammaDD[2][2] = p*x**3 + q*y**3 + r*z**3 + s*x**2 + t*y**2 + sp.sympify(1)\ngammaDD[0][1] = sp.Rational(1,10) * sp.exp(-((x-b)**2 + (y-c)**2 + (z-d)**2))\ngammaDD[0][2] = sp.Rational(1,10) * sp.exp(-((x-g)**2 + (y-h)**2 + (z-l)**2))\ngammaDD[1][2] = sp.Rational(1,10) * sp.exp(-((x-n)**2 + (y-o)**2 + (z-p)**2))\n\nimport GRHD.equations as gh\ngh.compute_sqrtgammaDET(gammaDD)\n\nimport WeylScal4NRPy.WeylScalars_Cartesian as weyl\nLeviCivitaDDD = weyl.define_LeviCivitaSymbol_rank3()\nLeviCivitaUUU = ixp.zerorank3()\nfor i in range(3):\n for j in range(3):\n for k in range(3):\n LeviCivitaUUU[i][j][k] = LeviCivitaDDD[i][j][k] / gh.sqrtgammaDET\n \nB_analyticU = ixp.register_gridfunctions_for_single_rank1(\"AUXEVOL\",\"B_analyticU\")\nfor i in range(3):\n B_analyticU[i] = 0\n for j in range(3):\n for k in range(3):\n B_analyticU[i] += LeviCivitaUUU[i][j][k] * sp.diff(AD[k],rfm.xxCart[j])\n\nmetric_gfs_to_print = [\\\n lhrh(lhs=gri.gfaccess(\"aux_gfs\",\"gammaDD00\"),rhs=gammaDD[0][0]),\\\n lhrh(lhs=gri.gfaccess(\"aux_gfs\",\"gammaDD01\"),rhs=gammaDD[0][1]),\\\n lhrh(lhs=gri.gfaccess(\"aux_gfs\",\"gammaDD02\"),rhs=gammaDD[0][2]),\\\n lhrh(lhs=gri.gfaccess(\"aux_gfs\",\"gammaDD11\"),rhs=gammaDD[1][1]),\\\n lhrh(lhs=gri.gfaccess(\"aux_gfs\",\"gammaDD12\"),rhs=gammaDD[1][2]),\\\n lhrh(lhs=gri.gfaccess(\"aux_gfs\",\"gammaDD22\"),rhs=gammaDD[2][2]),\\\n ]\n\ndesc = \"Calculate the metric gridfunctions\"\nname = \"calculate_metric_gfs\"\noutCfunction(\n outfile = os.path.join(out_dir,name+\".h\"), desc=desc, name=name,\n params =\"const paramstruct *restrict params,REAL *restrict xx[3],REAL *restrict auxevol_gfs\",\n body = fin.FD_outputC(\"returnstring\",metric_gfs_to_print,params=\"outCverbose=False\").replace(\"IDX4\",\"IDX4S\"),\n loopopts=\"AllPoints,Read_xxs\")\n",
"Output C function calculate_metric_gfs() to file Validation/calculate_metric_gfs.h\n"
]
],
[
[
"We also should write a function that will use the analytic formulae for $B^i$. ",
"_____no_output_____"
]
],
[
[
"B_analyticU_to_print = [\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"B_analyticU0\"),rhs=B_analyticU[0]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"B_analyticU1\"),rhs=B_analyticU[1]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"B_analyticU2\"),rhs=B_analyticU[2]),\\\n ]\n\ndesc = \"Calculate the exact magnetic field\"\nname = \"calculate_exact_BU\"\noutCfunction(\n outfile = os.path.join(out_dir,name+\".h\"), desc=desc, name=name,\n params =\"const paramstruct *restrict params,REAL *restrict xx[3],REAL *restrict auxevol_gfs\",\n body = fin.FD_outputC(\"returnstring\",B_analyticU_to_print,params=\"outCverbose=False\").replace(\"IDX4\",\"IDX4S\"),\n loopopts=\"AllPoints,Read_xxs\")\n",
"Output C function calculate_exact_BU() to file Validation/calculate_exact_BU.h\n"
]
],
[
[
"<a id='vector_potential'></a>\n\n## Step 1.d: The vector potential $A_k$ \\[Back to [top](#toc)\\]\n$$\\label{vector_potential}$$\n\nWe'll now write a function to set the vector potential $A_k$. This simply uses NRPy+ to generate most of the code from the expressions we wrote at the beginning. Then, we'll need to call the function from the module `GiRaFFE_NRPy_A2B` to generate the code we need. Also, we will declare the parameters for the vector potential functions.",
"_____no_output_____"
]
],
[
[
"AD_to_print = [\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"AD0\"),rhs=AD[0]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"AD1\"),rhs=AD[1]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"AD2\"),rhs=AD[2]),\\\n ]\n\ndesc = \"Calculate the vector potential\"\nname = \"calculate_AD\"\noutCfunction(\n outfile = os.path.join(out_dir,name+\".h\"), desc=desc, name=name,\n params =\"const paramstruct *restrict params,REAL *restrict xx[3],REAL *restrict out_gfs\",\n body = fin.FD_outputC(\"returnstring\",AD_to_print,params=\"outCverbose=False\").replace(\"IDX4\",\"IDX4S\"),\n loopopts=\"AllPoints,Read_xxs\")\n\n# cmd.mkdir(os.path.join(out_dir))\nimport GiRaFFE_NRPy.GiRaFFE_NRPy_A2B as A2B\n# We'll generate these into the A2B subdirectory since that's where the functions\n# we're testing expect them to be.\nAD = ixp.declarerank1(\"AD\") # Make sure these aren't analytic expressions\ngammaDD = ixp.declarerank2(\"gammaDD\",\"sym01\")\nA2B.GiRaFFE_NRPy_A2B(os.path.join(out_dir,\"A2B\"),gammaDD,AD,BU)\n",
"Output C function calculate_AD() to file Validation/calculate_AD.h\n"
]
],
[
[
"<a id='free_parameters'></a>\n\n## Step 1.e: Set free parameters in the code \\[Back to [top](#toc)\\]\n$$\\label{free_parameters}$$\n\nWe also need to create the files that interact with NRPy's C parameter interface. ",
"_____no_output_____"
]
],
[
[
"# Step 3.d.i: Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h\n# par.generate_Cparameters_Ccodes(os.path.join(out_dir))\n\n# Step 3.d.ii: Set free_parameters.h\nwith open(os.path.join(out_dir,\"free_parameters.h\"),\"w\") as file:\n file.write(\"\"\"\n// Override parameter defaults with values based on command line arguments and NGHOSTS.\n// We'll use this grid. It has one point and one ghost zone.\nparams.Nxx0 = atoi(argv[1]);\nparams.Nxx1 = atoi(argv[2]);\nparams.Nxx2 = atoi(argv[3]);\nparams.Nxx_plus_2NGHOSTS0 = params.Nxx0 + 2*NGHOSTS;\nparams.Nxx_plus_2NGHOSTS1 = params.Nxx1 + 2*NGHOSTS;\nparams.Nxx_plus_2NGHOSTS2 = params.Nxx2 + 2*NGHOSTS;\n// Step 0d: Set up space and time coordinates\n// Step 0d.i: Declare \\Delta x^i=dxx{0,1,2} and invdxx{0,1,2}, as well as xxmin[3] and xxmax[3]:\nconst REAL xxmin[3] = {-0.01,-0.01,-0.01};\nconst REAL xxmax[3] = { 0.01, 0.01, 0.01};\n\nparams.dxx0 = (xxmax[0] - xxmin[0]) / ((REAL)params.Nxx_plus_2NGHOSTS0-1.0);\nparams.dxx1 = (xxmax[1] - xxmin[1]) / ((REAL)params.Nxx_plus_2NGHOSTS1-1.0);\nparams.dxx2 = (xxmax[2] - xxmin[2]) / ((REAL)params.Nxx_plus_2NGHOSTS2-1.0);\nprintf(\"dxx0,dxx1,dxx2 = %.5e,%.5e,%.5e\\\\n\",params.dxx0,params.dxx1,params.dxx2);\nparams.invdx0 = 1.0 / params.dxx0;\nparams.invdx1 = 1.0 / params.dxx1;\nparams.invdx2 = 1.0 / params.dxx2;\n\\n\"\"\")\n\n# Generates declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h\npar.generate_Cparameters_Ccodes(os.path.join(out_dir))",
"_____no_output_____"
]
],
[
[
"<a id='mainc'></a>\n\n# Step 2: `A2B_unit_test.c`: The Main C Code \\[Back to [top](#toc)\\]\n$$\\label{mainc}$$\n\nNow that we have our vector potential and analytic magnetic field to compare against, we will start writing our unit test. We'll also import common C functionality, define `REAL`, the number of ghost zones, and the faces, and set the standard macros for NRPy+ style memory access.",
"_____no_output_____"
]
],
[
[
"%%writefile $out_dir/A2B_unit_test.c\n// These are common packages that we are likely to need.\n#include \"stdio.h\"\n#include \"stdlib.h\"\n#include \"math.h\"\n#include \"string.h\" // Needed for strncmp, etc.\n#include \"stdint.h\" // Needed for Windows GCC 6.x compatibility\n#include <time.h> // Needed to set a random seed.\n\n#define REAL double\n#include \"declare_Cparameters_struct.h\"\n\nconst int MAXFACE = -1;\nconst int NUL = +0;\nconst int MINFACE = +1;\nconst int NGHOSTS = 3;\nconst int NGHOSTS_A2B = 3;\n\nREAL a,b,c,d,e,f,g,h,l,m,n,o,p,q,r,s,t,u;\n\n// Standard NRPy+ memory access:\n#define IDX4S(g,i,j,k) \\\n( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) )\n",
"Overwriting Validation//A2B_unit_test.c\n"
]
],
[
[
"We'll now define the gridfunction names.",
"_____no_output_____"
]
],
[
[
"%%writefile -a $out_dir/A2B_unit_test.c\n// Let's also #define the NRPy+ gridfunctions\n#define AD0GF 0\n#define AD1GF 1\n#define AD2GF 2\n#define NUM_EVOL_GFS 3\n\n#define GAMMADD00GF 0\n#define GAMMADD01GF 1\n#define GAMMADD02GF 2\n#define GAMMADD11GF 3\n#define GAMMADD12GF 4\n#define GAMMADD22GF 5\n#define B_ANALYTICU0GF 6\n#define B_ANALYTICU1GF 7\n#define B_ANALYTICU2GF 8\n#define BU0GF 9\n#define BU1GF 10\n#define BU2GF 11\n#define NUM_AUXEVOL_GFS 12\n",
"Appending to Validation//A2B_unit_test.c\n"
]
],
[
[
"Now, we'll handle the different A2B codes. There are several things to do here. First, we'll add `#include`s to the C code so that we have access to the functions we want to test, as generated above. We will choose to do this in the subfolder `A2B` relative to this tutorial.\n",
"_____no_output_____"
]
],
[
[
"%%writefile -a $out_dir/A2B_unit_test.c\n#include \"A2B/driver_AtoB.h\" // This file contains both functions we need.\n\n#include \"calculate_exact_BU.h\"\n#include \"calculate_AD.h\"\n\n#include \"calculate_metric_gfs.h\"\n",
"Appending to Validation//A2B_unit_test.c\n"
]
],
[
[
"Now, we'll write the main method. First, we'll set up the grid. In this test, we cannot use only one point. As we are testing a three-point stencil, we can get away with a minimal $3 \\times 3 \\times 3$ grid. Then, we'll write the A fields. After that, we'll calculate the magnetic field two ways.",
"_____no_output_____"
]
],
[
[
"%%writefile -a $out_dir/A2B_unit_test.c\nint main(int argc, const char *argv[]) {\n paramstruct params;\n#include \"set_Cparameters_default.h\"\n\n // Let the last argument be the test we're doing. 1 = coarser grid, 0 = finer grid.\n int do_quadratic_test = atoi(argv[4]);\n \n // Step 0c: Set free parameters, overwriting Cparameters defaults \n // by hand or with command-line input, as desired.\n#include \"free_parameters.h\"\n#include \"set_Cparameters-nopointer.h\"\n\n // We'll define our grid slightly different from how we normally would. We let our outermost\n // ghostzones coincide with xxmin and xxmax instead of the interior of the grid. This means\n // that the ghostzone points will have identical positions so we can do convergence tests of them. // Step 0d.ii: Set up uniform coordinate grids\n REAL *xx[3];\n xx[0] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS0);\n xx[1] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS1);\n xx[2] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS2);\n for(int j=0;j<Nxx_plus_2NGHOSTS0;j++) xx[0][j] = xxmin[0] + ((REAL)(j))*dxx0;\n for(int j=0;j<Nxx_plus_2NGHOSTS1;j++) xx[1][j] = xxmin[1] + ((REAL)(j))*dxx1;\n for(int j=0;j<Nxx_plus_2NGHOSTS2;j++) xx[2][j] = xxmin[2] + ((REAL)(j))*dxx2;\n \n //for(int j=0;j<Nxx_plus_2NGHOSTS0;j++) printf(\"x[%d] = %.5e\\n\",j,xx[0][j]);\n\n //for(int i=0;i<Nxx_plus_2NGHOSTS0;i++) printf(\"xx[0][%d] = %.15e\\\\n\",i,xx[0][i]);\n \n // This is the array to which we'll write the NRPy+ variables.\n REAL *auxevol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS2 * Nxx_plus_2NGHOSTS1 * Nxx_plus_2NGHOSTS0);\n REAL *evol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS2 * Nxx_plus_2NGHOSTS1 * Nxx_plus_2NGHOSTS0);\n for(int i=0;i<Nxx_plus_2NGHOSTS0;i++) for(int j=0;j<Nxx_plus_2NGHOSTS1;j++) for(int k=0;k<Nxx_plus_2NGHOSTS1;k++) {\n auxevol_gfs[IDX4S(BU0GF,i,j,k)] = 0.0;\n auxevol_gfs[IDX4S(BU1GF,i,j,k)] = 0.0;\n auxevol_gfs[IDX4S(BU2GF,i,j,k)] = 0.0;\n }\n \n // We now want to set up the vector potential. First, we must set the coefficients. \n if(is_gaussian) {\n // Gaussian coefficients:\n // Magnitudes:\n a = (double)(rand()%20)/5.0;\n f = (double)(rand()%20)/5.0;\n m = (double)(rand()%20)/5.0;\n // Offsets:\n b = (double)(rand()%10-5)/1000.0;\n c = (double)(rand()%10-5)/1000.0;\n d = (double)(rand()%10-5)/1000.0;\n g = (double)(rand()%10-5)/1000.0;\n h = (double)(rand()%10-5)/1000.0;\n l = (double)(rand()%10-5)/1000.0;\n n = (double)(rand()%10-5)/1000.0;\n o = (double)(rand()%10-5)/1000.0;\n p = (double)(rand()%10-5)/1000.0;\n /*printf(\"Offsets: b,c,d = %f,%f,%f\\n\",b,c,d);\n printf(\"Offsets: g,h,l = %f,%f,%f\\n\",g,h,l);\n printf(\"Offsets: n,o,p = %f,%f,%f\\n\",n,o,p);*/\n // First, calculate the test data on our grid:\n }\n else {\n // Polynomial coefficients\n // We will use random integers between -10 and 10. For the first test, we let the \n // Cubic coefficients remain zero. Those are a,b,c,g,h,l,p,q, and r.\n\n d = (double)(rand()%20-10);\n e = (double)(rand()%20-10);\n f = (double)(rand()%20-10);\n m = (double)(rand()%20-10);\n n = (double)(rand()%20-10);\n o = (double)(rand()%20-10);\n s = (double)(rand()%20-10);\n t = (double)(rand()%20-10);\n u = (double)(rand()%20-10);\n }\n calculate_metric_gfs(¶ms,xx,auxevol_gfs);\n \n if(do_quadratic_test && !is_gaussian) {\n calculate_AD(¶ms,xx,evol_gfs);\n\n // We'll also calculate the exact solution for B^i\n calculate_exact_BU(¶ms,xx,auxevol_gfs);\n\n // And now for the numerical derivatives:\n driver_A_to_B(¶ms,evol_gfs,auxevol_gfs);\n\n printf(\"This test uses quadratic vector potentials, so the magnetic fields should agree to roundoff error.\\n\");\n printf(\"Below, each row represents one point. Each column represents a component of the magnetic field.\\n\");\n printf(\"Shown is the number of Significant Digits of Agreement, at least 13 is good, higher is better:\\n\\n\");\n for(int i2=0;i2<Nxx_plus_2NGHOSTS2;i2++) for(int i1=0;i1<Nxx_plus_2NGHOSTS1;i1++) for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) {\n printf(\"i0,i1,i2=%d,%d,%d; SDA: %.3f, %.3f, %.3f\\n\",i0,i1,i2,\n 1.0-log10(2.0*fabs(auxevol_gfs[IDX4S(B_ANALYTICU0GF,i0,i1,i2)]-auxevol_gfs[IDX4S(BU0GF,i0,i1,i2)])/(fabs(auxevol_gfs[IDX4S(B_ANALYTICU0GF,i0,i1,i2)])+fabs(auxevol_gfs[IDX4S(BU0GF,i0,i1,i2)])+1.e-15)),\n 1.0-log10(2.0*fabs(auxevol_gfs[IDX4S(B_ANALYTICU1GF,i0,i1,i2)]-auxevol_gfs[IDX4S(BU1GF,i0,i1,i2)])/(fabs(auxevol_gfs[IDX4S(B_ANALYTICU1GF,i0,i1,i2)])+fabs(auxevol_gfs[IDX4S(BU1GF,i0,i1,i2)])+1.e-15)),\n 1.0-log10(2.0*fabs(auxevol_gfs[IDX4S(B_ANALYTICU2GF,i0,i1,i2)]-auxevol_gfs[IDX4S(BU2GF,i0,i1,i2)])/(fabs(auxevol_gfs[IDX4S(B_ANALYTICU2GF,i0,i1,i2)])+fabs(auxevol_gfs[IDX4S(BU2GF,i0,i1,i2)])+1.e-15))\n );\n /*printf(\"%.3f, %.3f, %.3f\\n\",\n auxevol_gfs[IDX4S(BU0GF,i0,i1,i2)],\n auxevol_gfs[IDX4S(BU1GF,i0,i1,i2)],\n auxevol_gfs[IDX4S(BU2GF,i0,i1,i2)]\n );*/\n }\n }\n \n if(!is_gaussian) {\n // Now, we'll set the cubic coefficients:\n a = (double)(rand()%20-10);\n b = (double)(rand()%20-10);\n c = (double)(rand()%20-10);\n g = (double)(rand()%20-10);\n h = (double)(rand()%20-10);\n l = (double)(rand()%20-10);\n p = (double)(rand()%20-10);\n q = (double)(rand()%20-10);\n r = (double)(rand()%20-10);\n // First, calculate the test data on our grid:\n calculate_metric_gfs(¶ms,xx,auxevol_gfs);\n }\n \n // And recalculate on our initial grid:\n calculate_AD(¶ms,xx,evol_gfs);\n\n // We'll also calculate the exact solution for B^i\n calculate_exact_BU(¶ms,xx,auxevol_gfs);\n \n // And now for the numerical derivatives:\n driver_A_to_B(¶ms,evol_gfs,auxevol_gfs);\n \n char filename[100];\n sprintf(filename,\"out%d-numer.txt\",Nxx0);\n FILE *out2D = fopen(filename, \"w\");\n if(do_quadratic_test || is_gaussian) {\n for(int i2=0;i2<Nxx_plus_2NGHOSTS2;i2++) for(int i1=0;i1<Nxx_plus_2NGHOSTS1;i1++) for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) {\n // We print the difference between approximate and exact numbers.\n fprintf(out2D,\"%.16e\\t%.16e\\t%.16e %e %e %e\\n\",\n auxevol_gfs[IDX4S(B_ANALYTICU0GF,i0,i1,i2)]-auxevol_gfs[IDX4S(BU0GF,i0,i1,i2)],\n auxevol_gfs[IDX4S(B_ANALYTICU1GF,i0,i1,i2)]-auxevol_gfs[IDX4S(BU1GF,i0,i1,i2)],\n auxevol_gfs[IDX4S(B_ANALYTICU2GF,i0,i1,i2)]-auxevol_gfs[IDX4S(BU2GF,i0,i1,i2)],\n xx[0][i0],xx[1][i1],xx[2][i2]\n );\n }\n }\n else {\n for(int i2=0;i2<Nxx_plus_2NGHOSTS2;i2++) for(int i1=0;i1<Nxx_plus_2NGHOSTS1;i1++) for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) {\n if (i0%2==0 && i1%2==0 && i2%2==0) {\n // We print the difference between approximate and exact numbers.\n fprintf(out2D,\"%.16e\\t%.16e\\t%.16e %e %e %e\\n\",\n auxevol_gfs[IDX4S(B_ANALYTICU0GF,i0,i1,i2)]-auxevol_gfs[IDX4S(BU0GF,i0,i1,i2)],\n auxevol_gfs[IDX4S(B_ANALYTICU1GF,i0,i1,i2)]-auxevol_gfs[IDX4S(BU1GF,i0,i1,i2)],\n auxevol_gfs[IDX4S(B_ANALYTICU2GF,i0,i1,i2)]-auxevol_gfs[IDX4S(BU2GF,i0,i1,i2)],\n xx[0][i0],xx[1][i1],xx[2][i2]\n );\n }\n }\n\n }\n fclose(out2D);\n}\n",
"Appending to Validation//A2B_unit_test.c\n"
]
],
[
[
"<a id='compile_run'></a>\n\n## Step 2.a: Compile and run the code \\[Back to [top](#toc)\\]\n$$\\label{compile_run}$$\n\nNow that we have our file, we can compile it and run the executable.",
"_____no_output_____"
]
],
[
[
"import time\n\nprint(\"Now compiling, should take ~2 seconds...\\n\")\nstart = time.time()\ncmd.C_compile(os.path.join(out_dir,\"A2B_unit_test.c\"), os.path.join(out_dir,\"A2B_unit_test\"))\nend = time.time()\nprint(\"Finished in \"+str(end-start)+\" seconds.\\n\\n\")\n\nprint(\"Now running...\\n\")\nstart = time.time()\n!./Validation/A2B_unit_test 1 1 1 1\nif Use_Gaussian_Data:\n # To do a convergence test, we'll also need a second grid with twice the resolution.\n !./Validation/A2B_unit_test 7 7 7 1\n\n# !./Validation/A2B_unit_test 19 19 19 1\nend = time.time()\nprint(\"Finished in \"+str(end-start)+\" seconds.\\n\\n\")\n",
"Now compiling, should take ~2 seconds...\n\nCompiling executable...\nExecuting `gcc -Ofast -fopenmp -march=native -funroll-loops Validation/A2B_unit_test.c -o Validation/A2B_unit_test -lm`...\nFinished executing in 0.6135389804840088 seconds.\nFinished compilation.\nFinished in 0.6216833591461182 seconds.\n\n\nNow running...\n\ndxx0,dxx1,dxx2 = 3.33333e-03,3.33333e-03,3.33333e-03\ndxx0,dxx1,dxx2 = 1.66667e-03,1.66667e-03,1.66667e-03\nFinished in 0.25135135650634766 seconds.\n\n\n"
]
],
[
[
"<a id='convergence'></a>\n\n# Step 3: Code validation: Verify that relative error in numerical solution converges to zero at the expected order \\[Back to [top](#toc)\\]\n\n$$\\label{convergence}$$\n\nNow that we have shown that when we use a quadratic vector potential, we get roundoff-level agreement (which is to be expected, since the finite-differencing used approximates the underlying function with a quadratic), we will use do a convergence test to show that when we can't exactly model the function, the truncation error dominates and converges to zero at the expected rate. For this, we use cubic functions for the vector potential. In the code above, we output the difference beteween the numeric and exact magnetic fields at the overlapping, non-edge, non-vertex points of two separate grids. Here, we import that data and calculate the convergence in the usual way, \n$$\nk = \\log_2 \\left( \\frac{F - F_1}{F - F_2} \\right),\n$$\nwhere $k$ is the convergence order, $F$ is the exact solution, $F_1$ is the approximate solution on the coarser grid with resolution $\\Delta x$, and $F_2$ is the approximate solution on the finer grid with resolution $\\Delta x/2$.\n\nHere, we will calculate the convergence of the L2 Norm over the points in each region: \n$$\n| B^i_{\\rm approx} - B^i_{\\rm exact}| = \\sqrt{\\frac{1}{N} \\sum_{ijk} \\left( B^i_{\\rm approx} - B^i_{\\rm exact} \\right)^2}\n$$",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\nData1 = np.loadtxt(\"out1-numer.txt\")\nData2 = np.loadtxt(\"out7-numer.txt\")\n\n# print(\"Convergence test: All should be approximately 2\\n\")\n# convergence = np.log(np.divide(np.abs(Data1),np.abs(Data2)))/np.log(2)\n# for i in range(len(convergence[:,0])):\n# print(convergence[i,:])\n \ndef IDX4(i,j,k,Nxx_plus_2NGHOSTS0,Nxx_plus_2NGHOSTS1,Nxx_plus_2NGHOSTS2):\n return (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (0) ) )\ncomp = 0 # 0->Bx, 1->By, 2->Bz\n\n# First, let's do this over the interior\nN = 7 # This is the number of total gridpoints\nnface = 0 # This is the number of points we are taking the norm of.\nnint = 0 # This is the number of points we are taking the norm of.\nL2_1 = 0\nL2_1_xm = 0 # We declare one L2 norm for each face.\nL2_1_xp = 0\nL2_1_ym = 0\nL2_1_yp = 0\nL2_1_zm = 0\nL2_1_zp = 0\nfor k in range(N):\n for j in range(N):\n for i in range(N):\n if i==0:\n L2_1_xm += Data1[IDX4(i,j,k,N,N,N),comp]**2\n nface += 1\n if i==N-1:\n L2_1_xp += Data1[IDX4(i,j,k,N,N,N),comp]**2\n if j==0:\n L2_1_ym += Data1[IDX4(i,j,k,N,N,N),comp]**2\n if j==N-1:\n L2_1_yp += Data1[IDX4(i,j,k,N,N,N),comp]**2\n if k==0:\n L2_1_zm += Data1[IDX4(i,j,k,N,N,N),comp]**2\n if k==N-1:\n L2_1_zp += Data1[IDX4(i,j,k,N,N,N),comp]**2\n if not (i%(N-1)==0 or j%(N-1)==0 or k%(N-1)==0):\n L2_1 += Data1[IDX4(i,j,k,N,N,N),comp]**2\n nint += 1\n \nL2_1 = np.sqrt(L2_1/(nint))\nL2_1_xm = np.sqrt(L2_1_xm/(nface))\nL2_1_xp = np.sqrt(L2_1_xp/(nface))\nL2_1_ym = np.sqrt(L2_1_ym/(nface))\nL2_1_yp = np.sqrt(L2_1_yp/(nface))\nL2_1_zm = np.sqrt(L2_1_zm/(nface))\nL2_1_zp = np.sqrt(L2_1_zp/(nface))\n\nN = 13 # This is the number of total gridpoints\nnface = 0 # This is the number of points we are taking the norm of.\nnint = 0 # This is the number of points we are taking the norm of.\nL2_2 = 0\nL2_2_xm = 0\nL2_2_xp = 0\nL2_2_ym = 0\nL2_2_yp = 0\nL2_2_zm = 0\nL2_2_zp = 0\nfor k in range(N):\n for j in range(N):\n for i in range(N):\n if i==0:\n L2_2_xm += Data2[IDX4(i,j,k,N,N,N),comp]**2\n nface += 1\n if i==N-1:\n L2_2_xp += Data2[IDX4(i,j,k,N,N,N),comp]**2\n if j==0:\n L2_2_ym += Data2[IDX4(i,j,k,N,N,N),comp]**2\n if j==N-1:\n L2_2_yp += Data2[IDX4(i,j,k,N,N,N),comp]**2\n if k==0:\n L2_2_zm += Data2[IDX4(i,j,k,N,N,N),comp]**2\n if k==N-1:\n L2_2_zp += Data2[IDX4(i,j,k,N,N,N),comp]**2\n if not (i%(N-1)==0 or j%(N-1)==0 or k%(N-1)==0):\n L2_2 += Data2[IDX4(i,j,k,N,N,N),comp]**2\n nint += 1\n\nL2_2 = np.sqrt(L2_2/(nint))\nL2_2_xm = np.sqrt(L2_2_xm/(nface))\nL2_2_xp = np.sqrt(L2_2_xp/(nface))\nL2_2_ym = np.sqrt(L2_2_ym/(nface))\nL2_2_yp = np.sqrt(L2_2_yp/(nface))\nL2_2_zm = np.sqrt(L2_2_zm/(nface))\nL2_2_zp = np.sqrt(L2_2_zp/(nface))\n\nprint(\"Face | Res | L2 norm | Conv. Order\")\nprint(\" Int | Dx | \" + \"{:.7f}\".format(L2_1) + \" | -- \")\nprint(\" -- | Dx/2 | \" + \"{:.7f}\".format(L2_2) + \" | \" + \"{:.5f}\".format(np.log2(L2_1/L2_2)))\nprint(\" -x | Dx | \" + \"{:.7f}\".format(L2_1_xm) + \" | -- \")\nprint(\" -- | Dx/2 | \" + \"{:.7f}\".format(L2_2_xm) + \" | \" + \"{:.5f}\".format(np.log2(L2_1_xm/L2_2_xm)))\nprint(\" +x | Dx | \" + \"{:.7f}\".format(L2_1_xp) + \" | -- \")\nprint(\" -- | Dx/2 | \" + \"{:.7f}\".format(L2_2_xp) + \" | \" + \"{:.5f}\".format(np.log2(L2_1_xp/L2_2_xp)))\nprint(\" -y | Dx | \" + \"{:.7f}\".format(L2_1_ym) + \" | -- \")\nprint(\" -- | Dx/2 | \" + \"{:.7f}\".format(L2_2_ym) + \" | \" + \"{:.5f}\".format(np.log2(L2_1_ym/L2_2_ym)))\nprint(\" +y | Dx | \" + \"{:.7f}\".format(L2_1_yp) + \" | -- \")\nprint(\" -- | Dx/2 | \" + \"{:.7f}\".format(L2_2_yp) + \" | \" + \"{:.5f}\".format(np.log2(L2_1_yp/L2_2_yp)))\nprint(\" -z | Dx | \" + \"{:.7f}\".format(L2_1_zm) + \" | -- \")\nprint(\" -- | Dx/2 | \" + \"{:.7f}\".format(L2_2_zm) + \" | \" + \"{:.5f}\".format(np.log2(L2_1_zm/L2_2_zm)))\nprint(\" +z | Dx | \" + \"{:.7f}\".format(L2_1_zp) + \" | -- \")\nprint(\" -- | Dx/2 | \" + \"{:.7f}\".format(L2_2_zp) + \" | \" + \"{:.5f}\".format(np.log2(L2_1_zp/L2_2_zp)))\n",
"Face | Res | L2 norm | Conv. Order\n Int | Dx | 0.0000005 | -- \n -- | Dx/2 | 0.0000001 | 2.03057\n -x | Dx | 0.0000008 | -- \n -- | Dx/2 | 0.0000002 | 2.08857\n +x | Dx | 0.0000008 | -- \n -- | Dx/2 | 0.0000002 | 2.08857\n -y | Dx | 0.0000008 | -- \n -- | Dx/2 | 0.0000002 | 1.64224\n +y | Dx | 0.0000016 | -- \n -- | Dx/2 | 0.0000004 | 1.87830\n -z | Dx | 0.0000010 | -- \n -- | Dx/2 | 0.0000002 | 2.09971\n +z | Dx | 0.0000008 | -- \n -- | Dx/2 | 0.0000002 | 1.99483\n"
]
],
[
[
"<a id='latex_pdf_output'></a>\n\n# Step 4: Output this notebook to $\\LaTeX$-formatted PDF file \\[Back to [top](#toc)\\]\n\n$$\\label{latex_pdf_output}$$\n\nThe following code cell converts this Jupyter notebook into a proper, clickable $\\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename\n[Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.pdf](Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)",
"_____no_output_____"
]
],
[
[
"!jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.ipynb\n!pdflatex -interaction=batchmode Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.tex\n!pdflatex -interaction=batchmode Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.tex\n!pdflatex -interaction=batchmode Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.tex\n!rm -f Tut*.out Tut*.aux Tut*.log",
"This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)\n restricted \\write18 enabled.\nentering extended mode\nThis is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)\n restricted \\write18 enabled.\nentering extended mode\nThis is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)\n restricted \\write18 enabled.\nentering extended mode\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d049ab17e4bb7e83f79301ac06315f76f0797c5a | 73,283 | ipynb | Jupyter Notebook | yr_Dec_clim_2000_2016.ipynb | franzihe/Python_Masterthesis | f6acd3a98edb859f11c3f1cd2bc62e31065f5f4a | [
"MIT"
] | null | null | null | yr_Dec_clim_2000_2016.ipynb | franzihe/Python_Masterthesis | f6acd3a98edb859f11c3f1cd2bc62e31065f5f4a | [
"MIT"
] | null | null | null | yr_Dec_clim_2000_2016.ipynb | franzihe/Python_Masterthesis | f6acd3a98edb859f11c3f1cd2bc62e31065f5f4a | [
"MIT"
] | null | null | null | 207.600567 | 36,510 | 0.899131 | [
[
[
"http://www.yr.no/place/Norway/Telemark/Vinje/Haukeliseter/climate.month12.html",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport matplotlib.dates as dates\nimport numpy as np \nimport csv\nimport pandas as pd\nimport datetime\nfrom datetime import date\nimport calendar\n%matplotlib inline ",
"_____no_output_____"
],
[
"year = np.arange(2000,2017, 1)",
"_____no_output_____"
],
[
"T_av = [-4.1,\\\n -8.2,\\\n -10.7,\\\n -4.3,\\\n -4.1,\\\n -5.5,\\\n -0.5,\\\n -6.4,\\\n -6.6,\\\n -9.4,\\\n -14.8,\\\n -4.4,\\\n -10.7,\\\n -2.1,\\\n -6.0,\\\n -2.4,\\\n -2.3]\n\nT_av = [float(i) for i in T_av]\n",
"_____no_output_____"
],
[
"Prec = [131.9,\\\n 91.0,\\\n 57.7,\\\n 120.8,\\\n 70.9,\\\n 79.2,\\\n 140.2,\\\n 143.6,\\\n 72.2,\\\n 104.4,\\\n 50.9,\\\n 145.2,\\\n 112.5,\\\n 196.9,\\\n 73.6,\\\n 132.5,\\\n 73.2]",
"_____no_output_____"
],
[
"T_ano = -7.5 +4.4\nT_ano",
"_____no_output_____"
],
[
"prec_tick = np.arange(0,300,50)\nt_tick = np.arange(-16,2,2)\n",
"_____no_output_____"
],
[
"fig1 = plt.figure(figsize=(11,7))\nax1 = fig1.add_subplot(1,1,1)\n\n\nbar2 = ax1.bar(year,Prec, label='precipitation',color='lightblue')\nax1.axhline(y=100,c=\"gray\",linewidth=2,zorder=1, linestyle = '--')\n\n\nplt.grid(b=None, which='major', axis='y')\n\n# add some text for labels, title and axes ticks\nax1.set_ylabel('Precipitation (%)', fontsize = '16')\nax1.set_yticklabels(prec_tick, fontsize='14')\n\n\nax1.set_title('30-yr Climate statistics December (2000 - 2016)', fontsize = '16')\nax1.set_xticks(year)\nax1.set_xticklabels(year, rotation=45,fontsize = '14') # rotate x label\nax1.set_ylim([0, 250])\n\n\n\ndef autolabel(rects):\n \"\"\"\n Attach a text label above each bar displaying its height\n \"\"\"\n for rect in rects:\n height = rect.get_height()\n ax1.text(rect.get_x() + rect.get_width()/2., 1.05*height,\n '%d.2' % int(height),\n ha='center', va='bottom',fontsize =14)\n\nautolabel(bar2)\n\nplt.savefig('../Observations/clim_precDec_Haukeli.png')\n#plt.close(fig)\n\nplt.show(fig1)",
"_____no_output_____"
],
[
"fig2 = plt.figure(figsize=(11,7))\nax2 = fig2.add_subplot(1,1,1)\n\n\n\nline1 = ax2.plot(year,T_av, 'og', label = 'T_avg', markersize = 16)\nax2.axhline(y = -7.5, c ='darkgreen', linewidth = 2, zorder = 0, linestyle = '--')\nplt.grid(b=None, which='major', axis='both')\n\nax2.set_title('30-yr Climate statistics December (2000 - 2016)', fontsize = '16')\nax2.set_xticks(year)\nax2.set_yticklabels(t_tick, fontsize='14')\n\nax2.set_xticklabels(year, rotation=45,fontsize = '14') # rotate x label\n\n\n\n#ax1.legend((bar1[0], bar2[0]), ('Men', 'Women'))\n# add some text for labels, title and axes ticks\nax2.set_ylabel('Temperature C$^o$', fontsize = '16')\nax2.set_ylim([-15.5, 0])\n\n\n\n\nplt.savefig('../Observations/clim_tempDec_Haukeli.png')\n#plt.close(fig)\n\nplt.show(fig2)",
"_____no_output_____"
],
[
"t08 = 100/204.7 * 15.9\nt09 = 100/204.7 * 6.7\nt10 = 100/204.7 * 5.7\nt11 = 100/204.7 * 5.9\nt22 = 100/204.7 * 21.4\nt23 = 100/204.7 * 23.6\nt24 = 100/204.7 * 24.9\nt25 = 100/204.7 * 20.8\nt26 = 100/204.7 * 13.7\nt27 = 100/204.7 * 20.9\nt31 = 100/204.7 * 37.8\n\nprint(t08,t09,t10,t11)\nprint(t22,t23,t24,t25,t26,t27)\nprint(t31)",
"7.767464582315585 3.273082559843674 2.7845627747923793 2.8822667318026385\n10.454323400097705 11.529066927210554 12.164142647777235 10.161211529066929 6.692721055202736 10.210063507572057\n18.466047874938937\n"
],
[
"t22+t23+t24+t25+t26+t27",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d049be95ab29a554d25c4de7ba2d1b05fb91b082 | 168,432 | ipynb | Jupyter Notebook | db2.ipynb | Db2-DTE-POC/Db2-Openshift-11.5.4 | 874aa85e671b37334e2e164b5b6a6d1d977faa4b | [
"Apache-2.0"
] | 1 | 2022-01-11T13:26:24.000Z | 2022-01-11T13:26:24.000Z | db2.ipynb | Db2-DTE-POC/Db2-Openshift-11.5.4 | 874aa85e671b37334e2e164b5b6a6d1d977faa4b | [
"Apache-2.0"
] | null | null | null | db2.ipynb | Db2-DTE-POC/Db2-Openshift-11.5.4 | 874aa85e671b37334e2e164b5b6a6d1d977faa4b | [
"Apache-2.0"
] | 3 | 2021-12-11T18:01:44.000Z | 2022-03-29T16:58:11.000Z | 44.370917 | 707 | 0.447522 | [
[
[
"# DB2 Jupyter Notebook Extensions\nVersion: 2021-08-23",
"_____no_output_____"
],
[
"This code is imported as a Jupyter notebook extension in any notebooks you create with DB2 code in it. Place the following line of code in any notebook that you want to use these commands with:\n<pre>\n%run db2.ipynb\n</pre>\n\nThis code defines a Jupyter/Python magic command called `%sql` which allows you to execute DB2 specific calls to \nthe database. There are other packages available for manipulating databases, but this one has been specifically\ndesigned for demonstrating a number of the SQL features available in DB2.\n\nThere are two ways of executing the `%sql` command. A single line SQL statement would use the\nline format of the magic command:\n<pre>\n%sql SELECT * FROM EMPLOYEE\n</pre>\nIf you have a large block of sql then you would place the %%sql command at the beginning of the block and then\nplace the SQL statements into the remainder of the block. Using this form of the `%%sql` statement means that the\nnotebook cell can only contain SQL and no other statements.\n<pre>\n%%sql\nSELECT * FROM EMPLOYEE\nORDER BY LASTNAME\n</pre>\nYou can have multiple lines in the SQL block (`%%sql`). The default SQL delimiter is the semi-column (`;`).\nIf you have scripts (triggers, procedures, functions) that use the semi-colon as part of the script, you \nwill need to use the `-d` option to change the delimiter to an at \"`@`\" sign. \n<pre>\n%%sql -d\nSELECT * FROM EMPLOYEE\n@\nCREATE PROCEDURE ...\n@\n</pre>\n\nThe `%sql` command allows most DB2 commands to execute and has a special version of the CONNECT statement. \nA CONNECT by itself will attempt to reconnect to the database using previously used settings. If it cannot \nconnect, it will prompt the user for additional information. \n\nThe CONNECT command has the following format:\n<pre>\n%sql CONNECT TO <database> USER <userid> USING <password | ?> HOST <ip address> PORT <port number>\n</pre>\nIf you use a \"`?`\" for the password field, the system will prompt you for a password. This avoids typing the \npassword as clear text on the screen. If a connection is not successful, the system will print the error\nmessage associated with the connect request.\n\nIf the connection is successful, the parameters are saved on your system and will be used the next time you\nrun a SQL statement, or when you issue the %sql CONNECT command with no parameters.",
"_____no_output_____"
],
[
"In addition to the -d option, there are a number different options that you can specify at the beginning of \nthe SQL:\n\n - `-d, -delim` - Change SQL delimiter to \"`@`\" from \"`;`\"\n - `-q, -quiet` - Quiet results - no messages returned from the function\n - `-r, -array` - Return the result set as an array of values instead of a dataframe\n - `-t, -time` - Time the following SQL statement and return the number of times it executes in 1 second\n - `-j` - Format the first character column of the result set as a JSON record\n - `-json` - Return result set as an array of json records\n - `-a, -all` - Return all rows in answer set and do not limit display\n - `-grid` - Display the results in a scrollable grid\n - `-pb, -bar` - Plot the results as a bar chart\n - `-pl, -line` - Plot the results as a line chart\n - `-pp, -pie` - Plot the results as a pie chart\n - `-e, -echo` - Any macro expansions are displayed in an output box \n - `-sampledata` - Create and load the EMPLOYEE and DEPARTMENT tables\n\n<p>\nYou can pass python variables to the `%sql` command by using the `{}` braces with the name of the\nvariable inbetween. Note that you will need to place proper punctuation around the variable in the event the\nSQL command requires it. For instance, the following example will find employee '000010' in the EMPLOYEE table.\n<pre>\nempno = '000010'\n%sql SELECT LASTNAME FROM EMPLOYEE WHERE EMPNO='{empno}'\n</pre>\n\nThe other option is to use parameter markers. What you would need to do is use the name of the variable with a colon in front of it and the program will prepare the statement and then pass the variable to Db2 when the statement is executed. This allows you to create complex strings that might contain quote characters and other special characters and not have to worry about enclosing the string with the correct quotes. Note that you do not place the quotes around the variable even though it is a string.\n\n<pre>\nempno = '000020'\n%sql SELECT LASTNAME FROM EMPLOYEE WHERE EMPNO=:empno\n</pre>",
"_____no_output_____"
],
[
"## Development SQL\nThe previous set of `%sql` and `%%sql` commands deals with SQL statements and commands that are run in an interactive manner. There is a class of SQL commands that are more suited to a development environment where code is iterated or requires changing input. The commands that are associated with this form of SQL are:\n- AUTOCOMMIT\n- COMMIT/ROLLBACK\n- PREPARE \n- EXECUTE\n\nAutocommit is the default manner in which SQL statements are executed. At the end of the successful completion of a statement, the results are commited to the database. There is no concept of a transaction where multiple DML/DDL statements are considered one transaction. The `AUTOCOMMIT` command allows you to turn autocommit `OFF` or `ON`. This means that the set of SQL commands run after the `AUTOCOMMIT OFF` command are executed are not commited to the database until a `COMMIT` or `ROLLBACK` command is issued.\n\n`COMMIT` (`WORK`) will finalize all of the transactions (`COMMIT`) to the database and `ROLLBACK` will undo all of the changes. If you issue a `SELECT` statement during the execution of your block, the results will reflect all of your changes. If you `ROLLBACK` the transaction, the changes will be lost.\n\n`PREPARE` is typically used in a situation where you want to repeatidly execute a SQL statement with different variables without incurring the SQL compilation overhead. For instance:\n```\nx = %sql PREPARE SELECT LASTNAME FROM EMPLOYEE WHERE EMPNO=?\nfor y in ['000010','000020','000030']:\n %sql execute :x using :y\n```\n`EXECUTE` is used to execute a previously compiled statement. \n\nTo retrieve the error codes that might be associated with any SQL call, the following variables are updated after every call:\n\n* SQLCODE\n* SQLSTATE\n* SQLERROR - Full error message retrieved from Db2",
"_____no_output_____"
],
[
"### Install Db2 Python Driver\nIf the ibm_db driver is not installed on your system, the subsequent Db2 commands will fail. In order to install the Db2 driver, issue the following command from a Jupyter notebook cell:\n```\n!pip install --user ibm_db\n```",
"_____no_output_____"
],
[
"### Db2 Jupyter Extensions\nThis section of code has the import statements and global variables defined for the remainder of the functions.",
"_____no_output_____"
]
],
[
[
"#\n# Set up Jupyter MAGIC commands \"sql\". \n# %sql will return results from a DB2 select statement or execute a DB2 command\n#\n# IBM 2021: George Baklarz\n# Version 2021-07-13\n#\n\nfrom __future__ import print_function\nfrom IPython.display import HTML as pHTML, Image as pImage, display as pdisplay, Javascript as Javascript\nfrom IPython.core.magic import (Magics, magics_class, line_magic,\n cell_magic, line_cell_magic, needs_local_scope)\nimport ibm_db\nimport pandas\nimport ibm_db_dbi\nimport json\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport getpass\nimport os\nimport pickle\nimport time\nimport sys\nimport re\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\n# Python Hack for Input between 2 and 3\n\ntry: \n input = raw_input \nexcept NameError: \n pass \n\n_settings = {\n \"maxrows\" : 10,\n \"maxgrid\" : 5,\n \"runtime\" : 1,\n \"display\" : \"PANDAS\",\n \"database\" : \"\",\n \"hostname\" : \"localhost\",\n \"port\" : \"50000\",\n \"protocol\" : \"TCPIP\", \n \"uid\" : \"DB2INST1\",\n \"pwd\" : \"password\",\n \"ssl\" : \"\"\n}\n\n_environment = {\n \"jupyter\" : True,\n \"qgrid\" : True\n}\n\n_display = {\n 'fullWidthRows': True,\n 'syncColumnCellResize': True,\n 'forceFitColumns': False,\n 'defaultColumnWidth': 150,\n 'rowHeight': 28,\n 'enableColumnReorder': False,\n 'enableTextSelectionOnCells': True,\n 'editable': False,\n 'autoEdit': False,\n 'explicitInitialization': True,\n 'maxVisibleRows': 5,\n 'minVisibleRows': 5,\n 'sortable': True,\n 'filterable': False,\n 'highlightSelectedCell': False,\n 'highlightSelectedRow': True\n}\n\n# Connection settings for statements \n\n_connected = False\n_hdbc = None\n_hdbi = None\n_stmt = []\n_stmtID = []\n_stmtSQL = []\n_vars = {}\n_macros = {}\n_flags = []\n_debug = False\n\n# Db2 Error Messages and Codes\nsqlcode = 0\nsqlstate = \"0\"\nsqlerror = \"\"\nsqlelapsed = 0\n\n# Check to see if QGrid is installed\n\ntry:\n import qgrid\n qgrid.set_defaults(grid_options=_display)\nexcept:\n _environment['qgrid'] = False\n \n# Check if we are running in iPython or Jupyter\n\ntry:\n if (get_ipython().config == {}): \n _environment['jupyter'] = False\n _environment['qgrid'] = False\n else:\n _environment['jupyter'] = True\nexcept:\n _environment['jupyter'] = False\n _environment['qgrid'] = False\n",
"_____no_output_____"
]
],
[
[
"## Options\nThere are four options that can be set with the **`%sql`** command. These options are shown below with the default value shown in parenthesis.\n- **`MAXROWS n (10)`** - The maximum number of rows that will be displayed before summary information is shown. If the answer set is less than this number of rows, it will be completely shown on the screen. If the answer set is larger than this amount, only the first 5 rows and last 5 rows of the answer set will be displayed. If you want to display a very large answer set, you may want to consider using the grid option `-g` to display the results in a scrollable table. If you really want to show all results then setting MAXROWS to -1 will return all output.\n\n- **`MAXGRID n (5)`** - The maximum size of a grid display. When displaying a result set in a grid `-g`, the default size of the display window is 5 rows. You can set this to a larger size so that more rows are shown on the screen. Note that the minimum size always remains at 5 which means that if the system is unable to display your maximum row size it will reduce the table display until it fits.\n\n- **`DISPLAY PANDAS | GRID (PANDAS)`** - Display the results as a PANDAS dataframe (default) or as a scrollable GRID\n\n- **`RUNTIME n (1)`** - When using the timer option on a SQL statement, the statement will execute for **`n`** number of seconds. The result that is returned is the number of times the SQL statement executed rather than the execution time of the statement. The default value for runtime is one second, so if the SQL is very complex you will need to increase the run time.\n\n- **`LIST`** - Display the current settings\n\nTo set an option use the following syntax:\n```\n%sql option option_name value option_name value ....\n```\nThe following example sets all options:\n```\n%sql option maxrows 100 runtime 2 display grid maxgrid 10\n```\nThe values will **not** be saved between Jupyter notebooks sessions. If you need to retrieve the current options values, use the LIST command as the only argument:\n```\n%sql option list\n```\n",
"_____no_output_____"
]
],
[
[
"def setOptions(inSQL):\n\n global _settings, _display\n\n cParms = inSQL.split()\n cnt = 0\n\n while cnt < len(cParms):\n if cParms[cnt].upper() == 'MAXROWS':\n \n if cnt+1 < len(cParms):\n try:\n _settings[\"maxrows\"] = int(cParms[cnt+1])\n except Exception as err:\n errormsg(\"Invalid MAXROWS value provided.\")\n pass\n cnt = cnt + 1\n else:\n errormsg(\"No maximum rows specified for the MAXROWS option.\")\n return\n \n elif cParms[cnt].upper() == 'MAXGRID':\n \n if cnt+1 < len(cParms):\n try:\n maxgrid = int(cParms[cnt+1])\n if (maxgrid <= 5): # Minimum window size is 5\n maxgrid = 5\n _display[\"maxVisibleRows\"] = int(cParms[cnt+1])\n try:\n import qgrid\n qgrid.set_defaults(grid_options=_display)\n except:\n _environment['qgrid'] = False\n \n except Exception as err:\n errormsg(\"Invalid MAXGRID value provided.\")\n pass\n cnt = cnt + 1\n else:\n errormsg(\"No maximum rows specified for the MAXROWS option.\")\n return \n \n elif cParms[cnt].upper() == 'RUNTIME':\n if cnt+1 < len(cParms):\n try:\n _settings[\"runtime\"] = int(cParms[cnt+1])\n except Exception as err:\n errormsg(\"Invalid RUNTIME value provided.\")\n pass\n cnt = cnt + 1\n else:\n errormsg(\"No value provided for the RUNTIME option.\")\n return \n \n elif cParms[cnt].upper() == 'DISPLAY':\n if cnt+1 < len(cParms):\n if (cParms[cnt+1].upper() == 'GRID'):\n _settings[\"display\"] = 'GRID'\n elif (cParms[cnt+1].upper() == 'PANDAS'):\n _settings[\"display\"] = 'PANDAS'\n else:\n errormsg(\"Invalid DISPLAY value provided.\")\n cnt = cnt + 1\n else:\n errormsg(\"No value provided for the DISPLAY option.\")\n return \n elif (cParms[cnt].upper() == 'LIST'):\n print(\"(MAXROWS) Maximum number of rows displayed: \" + str(_settings[\"maxrows\"]))\n print(\"(MAXGRID) Maximum grid display size: \" + str(_settings[\"maxgrid\"]))\n print(\"(RUNTIME) How many seconds to a run a statement for performance testing: \" + str(_settings[\"runtime\"]))\n print(\"(DISPLAY) Use PANDAS or GRID display format for output: \" + _settings[\"display\"]) \n return\n else:\n cnt = cnt + 1\n \n save_settings()",
"_____no_output_____"
]
],
[
[
"### SQL Help\n\nThe calling format of this routine is:\n\n```\nsqlhelp()\n```\n\nThis code displays help related to the %sql magic command. This help is displayed when you issue a %sql or %%sql command by itself, or use the %sql -h flag.",
"_____no_output_____"
]
],
[
[
"def sqlhelp():\n \n global _environment\n \n if (_environment[\"jupyter\"] == True):\n sd = '<td style=\"text-align:left;\">'\n ed1 = '</td>'\n ed2 = '</td>'\n sh = '<th style=\"text-align:left;\">'\n eh1 = '</th>'\n eh2 = '</th>'\n sr = '<tr>'\n er = '</tr>'\n helpSQL = \"\"\"\n <h3>SQL Options</h3> \n <p>The following options are available as part of a SQL statement. The options are always preceded with a\n minus sign (i.e. -q).\n <table>\n {sr}\n {sh}Option{eh1}{sh}Description{eh2}\n {er}\n {sr}\n {sd}a, all{ed1}{sd}Return all rows in answer set and do not limit display{ed2}\n {er} \n {sr}\n {sd}d{ed1}{sd}Change SQL delimiter to \"@\" from \";\"{ed2}\n {er}\n {sr}\n {sd}e, echo{ed1}{sd}Echo the SQL command that was generated after macro and variable substituion.{ed2}\n {er}\n {sr}\n {sd}h, help{ed1}{sd}Display %sql help information.{ed2}\n {er} \n {sr}\n {sd}j{ed1}{sd}Create a pretty JSON representation. Only the first column is formatted{ed2}\n {er}\n {sr}\n {sd}json{ed1}{sd}Retrieve the result set as a JSON record{ed2}\n {er} \n {sr}\n {sd}pb, bar{ed1}{sd}Plot the results as a bar chart{ed2}\n {er}\n {sr}\n {sd}pl, line{ed1}{sd}Plot the results as a line chart{ed2}\n {er}\n {sr}\n {sd}pp, pie{ed1}{sd}Plot Pie: Plot the results as a pie chart{ed2}\n {er} \n {sr}\n {sd}q, quiet{ed1}{sd}Quiet results - no answer set or messages returned from the function{ed2}\n {er}\n {sr} \n {sd}r, array{ed1}{sd}Return the result set as an array of values{ed2}\n {er}\n {sr}\n {sd}sampledata{ed1}{sd}Create and load the EMPLOYEE and DEPARTMENT tables{ed2}\n {er} \n {sr}\n {sd}t,time{ed1}{sd}Time the following SQL statement and return the number of times it executes in 1 second{ed2}\n {er}\n {sr}\n {sd}grid{ed1}{sd}Display the results in a scrollable grid{ed2}\n {er} \n \n </table>\n \"\"\" \n else:\n helpSQL = \"\"\"\nSQL Options\n\nThe following options are available as part of a SQL statement. Options are always \npreceded with a minus sign (i.e. -q).\n\nOption Description\na, all Return all rows in answer set and do not limit display \nd Change SQL delimiter to \"@\" from \";\" \ne, echo Echo the SQL command that was generated after substitution \nh, help Display %sql help information\nj Create a pretty JSON representation. Only the first column is formatted \njson Retrieve the result set as a JSON record \npb, bar Plot the results as a bar chart \npl, line Plot the results as a line chart \npp, pie Plot Pie: Plot the results as a pie chart \nq, quiet Quiet results - no answer set or messages returned from the function \nr, array Return the result set as an array of values \nsampledata Create and load the EMPLOYEE and DEPARTMENT tables \nt,time Time the SQL statement and return the execution count per second\ngrid Display the results in a scrollable grid \n \"\"\" \n helpSQL = helpSQL.format(**locals())\n \n if (_environment[\"jupyter\"] == True):\n pdisplay(pHTML(helpSQL))\n else:\n print(helpSQL)",
"_____no_output_____"
]
],
[
[
"### Connection Help\n\nThe calling format of this routine is:\n\n```\nconnected_help()\n```\n\nThis code displays help related to the CONNECT command. This code is displayed when you issue a %sql CONNECT command with no arguments or you are running a SQL statement and there isn't any connection to a database yet.",
"_____no_output_____"
]
],
[
[
"def connected_help():\n \n \n sd = '<td style=\"text-align:left;\">'\n ed = '</td>'\n sh = '<th style=\"text-align:left;\">'\n eh = '</th>'\n sr = '<tr>'\n er = '</tr>'\n \n if (_environment['jupyter'] == True):\n \n helpConnect = \"\"\"\n <h3>Connecting to Db2</h3> \n <p>The CONNECT command has the following format:\n <p>\n <pre>\n %sql CONNECT TO <database> USER <userid> USING <password|?> HOST <ip address> PORT <port number> <SSL>\n %sql CONNECT CREDENTIALS <varname>\n %sql CONNECT CLOSE\n %sql CONNECT RESET\n %sql CONNECT PROMPT - use this to be prompted for values\n </pre>\n <p>\n If you use a \"?\" for the password field, the system will prompt you for a password. This avoids typing the \n password as clear text on the screen. If a connection is not successful, the system will print the error\n message associated with the connect request.\n <p>\n The <b>CREDENTIALS</b> option allows you to use credentials that are supplied by Db2 on Cloud instances.\n The credentials can be supplied as a variable and if successful, the variable will be saved to disk \n for future use. If you create another notebook and use the identical syntax, if the variable \n is not defined, the contents on disk will be used as the credentials. You should assign the \n credentials to a variable that represents the database (or schema) that you are communicating with. \n Using familiar names makes it easier to remember the credentials when connecting. \n <p>\n <b>CONNECT CLOSE</b> will close the current connection, but will not reset the database parameters. This means that\n if you issue the CONNECT command again, the system should be able to reconnect you to the database.\n <p>\n <b>CONNECT RESET</b> will close the current connection and remove any information on the connection. You will need \n to issue a new CONNECT statement with all of the connection information.\n <p>\n If the connection is successful, the parameters are saved on your system and will be used the next time you\n run an SQL statement, or when you issue the %sql CONNECT command with no parameters.\n <p>If you issue CONNECT RESET, all of the current values will be deleted and you will need to \n issue a new CONNECT statement. \n <p>A CONNECT command without any parameters will attempt to re-connect to the previous database you \n were using. If the connection could not be established, the program to prompt you for\n the values. To cancel the connection attempt, enter a blank value for any of the values. The connection \n panel will request the following values in order to connect to Db2: \n <table>\n {sr}\n {sh}Setting{eh}\n {sh}Description{eh}\n {er}\n {sr}\n {sd}Database{ed}{sd}Database name you want to connect to.{ed}\n {er}\n {sr}\n {sd}Hostname{ed}\n {sd}Use localhost if Db2 is running on your own machine, but this can be an IP address or host name. \n {er}\n {sr}\n {sd}PORT{ed}\n {sd}The port to use for connecting to Db2. This is usually 50000.{ed}\n {er}\n {sr}\n {sd}SSL{ed}\n {sd}If you are connecting to a secure port (50001) with SSL then you must include this keyword in the connect string.{ed}\n {sr} \n {sd}Userid{ed}\n {sd}The userid to use when connecting (usually DB2INST1){ed} \n {er}\n {sr} \n {sd}Password{ed}\n {sd}No password is provided so you have to enter a value{ed}\n {er}\n </table>\n \"\"\"\n else:\n helpConnect = \"\"\"\\\nConnecting to Db2\n\nThe CONNECT command has the following format:\n\n%sql CONNECT TO database USER userid USING password | ? \n HOST ip address PORT port number SSL\n%sql CONNECT CREDENTIALS varname\n%sql CONNECT CLOSE\n%sql CONNECT RESET\n\nIf you use a \"?\" for the password field, the system will prompt you for a password.\nThis avoids typing the password as clear text on the screen. If a connection is \nnot successful, the system will print the error message associated with the connect\nrequest.\n\nThe CREDENTIALS option allows you to use credentials that are supplied by Db2 on \nCloud instances. The credentials can be supplied as a variable and if successful, \nthe variable will be saved to disk for future use. If you create another notebook\nand use the identical syntax, if the variable is not defined, the contents on disk\nwill be used as the credentials. You should assign the credentials to a variable \nthat represents the database (or schema) that you are communicating with. Using \nfamiliar names makes it easier to remember the credentials when connecting. \n\nCONNECT CLOSE will close the current connection, but will not reset the database \nparameters. This means that if you issue the CONNECT command again, the system \nshould be able to reconnect you to the database.\n\nCONNECT RESET will close the current connection and remove any information on the\nconnection. You will need to issue a new CONNECT statement with all of the connection\ninformation.\n\nIf the connection is successful, the parameters are saved on your system and will be\nused the next time you run an SQL statement, or when you issue the %sql CONNECT \ncommand with no parameters. If you issue CONNECT RESET, all of the current values \nwill be deleted and you will need to issue a new CONNECT statement. \n\nA CONNECT command without any parameters will attempt to re-connect to the previous \ndatabase you were using. If the connection could not be established, the program to\nprompt you for the values. To cancel the connection attempt, enter a blank value for\nany of the values. The connection panel will request the following values in order \nto connect to Db2: \n\n Setting Description\n Database Database name you want to connect to\n Hostname Use localhost if Db2 is running on your own machine, but this can \n be an IP address or host name. \n PORT The port to use for connecting to Db2. This is usually 50000. \n Userid The userid to use when connecting (usually DB2INST1) \n Password No password is provided so you have to enter a value\n SSL Include this keyword to indicate you are connecting via SSL (usually port 50001)\n\"\"\"\n \n helpConnect = helpConnect.format(**locals())\n \n if (_environment['jupyter'] == True):\n pdisplay(pHTML(helpConnect))\n else:\n print(helpConnect)",
"_____no_output_____"
]
],
[
[
"### Prompt for Connection Information\nIf you are running an SQL statement and have not yet connected to a database, the %sql command will prompt you for connection information. In order to connect to a database, you must supply:\n\n- Database name \n- Host name (IP address or name)\n- Port number\n- Userid\n- Password\n- Secure socket\n\nThe routine is called without any parameters:\n\n```\nconnected_prompt()\n```",
"_____no_output_____"
]
],
[
[
"# Prompt for Connection information\n\ndef connected_prompt():\n \n global _settings\n \n _database = ''\n _hostname = ''\n _port = ''\n _uid = ''\n _pwd = ''\n _ssl = ''\n \n print(\"Enter the database connection details (Any empty value will cancel the connection)\")\n _database = input(\"Enter the database name: \");\n if (_database.strip() == \"\"): return False\n _hostname = input(\"Enter the HOST IP address or symbolic name: \");\n if (_hostname.strip() == \"\"): return False \n _port = input(\"Enter the PORT number: \");\n if (_port.strip() == \"\"): return False \n _ssl = input(\"Is this a secure (SSL) port (y or n)\");\n if (_ssl.strip() == \"\"): return False\n if (_ssl == \"n\"):\n _ssl = \"\"\n else:\n _ssl = \"Security=SSL;\" \n _uid = input(\"Enter Userid on the DB2 system: \").upper();\n if (_uid.strip() == \"\"): return False\n _pwd = getpass.getpass(\"Password [password]: \");\n if (_pwd.strip() == \"\"): return False\n \n _settings[\"database\"] = _database.strip()\n _settings[\"hostname\"] = _hostname.strip()\n _settings[\"port\"] = _port.strip()\n _settings[\"uid\"] = _uid.strip()\n _settings[\"pwd\"] = _pwd.strip()\n _settings[\"ssl\"] = _ssl.strip()\n _settings[\"maxrows\"] = 10\n _settings[\"maxgrid\"] = 5\n _settings[\"runtime\"] = 1\n \n return True\n \n# Split port and IP addresses\n\ndef split_string(in_port,splitter=\":\"):\n \n # Split input into an IP address and Port number\n \n global _settings\n\n checkports = in_port.split(splitter)\n ip = checkports[0]\n if (len(checkports) > 1):\n port = checkports[1]\n else:\n port = None\n\n return ip, port",
"_____no_output_____"
]
],
[
[
"### Connect Syntax Parser\nThe parseConnect routine is used to parse the CONNECT command that the user issued within the %sql command. The format of the command is:\n\n```\nparseConnect(inSQL)\n```\n\nThe inSQL string contains the CONNECT keyword with some additional parameters. The format of the CONNECT command is one of:\n\n```\nCONNECT RESET\nCONNECT CLOSE\nCONNECT CREDENTIALS <variable>\nCONNECT TO database USER userid USING password HOST hostname PORT portnumber <SSL>\n```\n\nIf you have credentials available from Db2 on Cloud, place the contents of the credentials into a variable and then use the `CONNECT CREDENTIALS <var>` syntax to connect to the database.\n\nIn addition, supplying a question mark (?) for password will result in the program prompting you for the password rather than having it as clear text in your scripts.\n\nWhen all of the information is checked in the command, the db2_doConnect function is called to actually do the connection to the database.\n",
"_____no_output_____"
]
],
[
[
"# Parse the CONNECT statement and execute if possible \n\ndef parseConnect(inSQL,local_ns):\n \n global _settings, _connected\n\n _connected = False\n \n cParms = inSQL.split()\n cnt = 0\n \n _settings[\"ssl\"] = \"\"\n \n while cnt < len(cParms):\n if cParms[cnt].upper() == 'TO':\n if cnt+1 < len(cParms):\n _settings[\"database\"] = cParms[cnt+1].upper()\n cnt = cnt + 1\n else:\n errormsg(\"No database specified in the CONNECT statement\")\n return\n elif cParms[cnt].upper() == \"SSL\":\n _settings[\"ssl\"] = \"Security=SSL;\" \n cnt = cnt + 1\n elif cParms[cnt].upper() == 'CREDENTIALS':\n if cnt+1 < len(cParms):\n credentials = cParms[cnt+1]\n tempid = eval(credentials,local_ns)\n if (isinstance(tempid,dict) == False): \n errormsg(\"The CREDENTIALS variable (\" + credentials + \") does not contain a valid Python dictionary (JSON object)\")\n return\n if (tempid == None):\n fname = credentials + \".pickle\"\n try:\n with open(fname,'rb') as f: \n _id = pickle.load(f) \n except:\n errormsg(\"Unable to find credential variable or file.\")\n return\n else:\n _id = tempid\n \n try:\n _settings[\"database\"] = _id[\"db\"]\n _settings[\"hostname\"] = _id[\"hostname\"]\n _settings[\"port\"] = _id[\"port\"]\n _settings[\"uid\"] = _id[\"username\"]\n _settings[\"pwd\"] = _id[\"password\"]\n try:\n fname = credentials + \".pickle\"\n with open(fname,'wb') as f:\n pickle.dump(_id,f)\n \n except:\n errormsg(\"Failed trying to write Db2 Credentials.\")\n return\n except:\n errormsg(\"Credentials file is missing information. db/hostname/port/username/password required.\")\n return\n \n else:\n errormsg(\"No Credentials name supplied\")\n return\n \n cnt = cnt + 1\n \n elif cParms[cnt].upper() == 'USER':\n if cnt+1 < len(cParms):\n _settings[\"uid\"] = cParms[cnt+1].upper()\n cnt = cnt + 1\n else:\n errormsg(\"No userid specified in the CONNECT statement\")\n return\n elif cParms[cnt].upper() == 'USING':\n if cnt+1 < len(cParms):\n _settings[\"pwd\"] = cParms[cnt+1] \n if (_settings[\"pwd\"] == '?'):\n _settings[\"pwd\"] = getpass.getpass(\"Password [password]: \") or \"password\"\n cnt = cnt + 1\n else:\n errormsg(\"No password specified in the CONNECT statement\")\n return\n elif cParms[cnt].upper() == 'HOST':\n if cnt+1 < len(cParms):\n hostport = cParms[cnt+1].upper()\n ip, port = split_string(hostport)\n if (port == None): _settings[\"port\"] = \"50000\"\n _settings[\"hostname\"] = ip\n cnt = cnt + 1\n else:\n errormsg(\"No hostname specified in the CONNECT statement\")\n return\n elif cParms[cnt].upper() == 'PORT': \n if cnt+1 < len(cParms):\n _settings[\"port\"] = cParms[cnt+1].upper()\n cnt = cnt + 1\n else:\n errormsg(\"No port specified in the CONNECT statement\")\n return\n elif cParms[cnt].upper() == 'PROMPT':\n if (connected_prompt() == False): \n print(\"Connection canceled.\")\n return \n else:\n cnt = cnt + 1\n elif cParms[cnt].upper() in ('CLOSE','RESET') :\n try:\n result = ibm_db.close(_hdbc)\n _hdbi.close()\n except:\n pass\n success(\"Connection closed.\") \n if cParms[cnt].upper() == 'RESET': \n _settings[\"database\"] = ''\n return\n else:\n cnt = cnt + 1\n \n _ = db2_doConnect()",
"_____no_output_____"
]
],
[
[
"### Connect to Db2\nThe db2_doConnect routine is called when a connection needs to be established to a Db2 database. The command does not require any parameters since it relies on the settings variable which contains all of the information it needs to connect to a Db2 database.\n\n```\ndb2_doConnect()\n```\n\nThere are 4 additional variables that are used throughout the routines to stay connected with the Db2 database. These variables are:\n- hdbc - The connection handle to the database\n- hstmt - A statement handle used for executing SQL statements\n- connected - A flag that tells the program whether or not we are currently connected to a database\n- runtime - Used to tell %sql the length of time (default 1 second) to run a statement when timing it\n\nThe only database driver that is used in this program is the IBM DB2 ODBC DRIVER. This driver needs to be loaded on the system that is connecting to Db2. The Jupyter notebook that is built by this system installs the driver for you so you shouldn't have to do anything other than build the container.\n\nIf the connection is successful, the connected flag is set to True. Any subsequent %sql call will check to see if you are connected and initiate another prompted connection if you do not have a connection to a database.",
"_____no_output_____"
]
],
[
[
"def db2_doConnect():\n \n global _hdbc, _hdbi, _connected, _runtime\n global _settings \n\n if _connected == False: \n \n if len(_settings[\"database\"]) == 0:\n return False\n\n dsn = (\n \"DRIVER={{IBM DB2 ODBC DRIVER}};\"\n \"DATABASE={0};\"\n \"HOSTNAME={1};\"\n \"PORT={2};\"\n \"PROTOCOL=TCPIP;\"\n \"UID={3};\"\n \"PWD={4};{5}\").format(_settings[\"database\"], \n _settings[\"hostname\"], \n _settings[\"port\"], \n _settings[\"uid\"], \n _settings[\"pwd\"],\n _settings[\"ssl\"])\n\n # Get a database handle (hdbc) and a statement handle (hstmt) for subsequent access to DB2\n\n try:\n _hdbc = ibm_db.connect(dsn, \"\", \"\")\n except Exception as err:\n db2_error(False,True) # errormsg(str(err))\n _connected = False\n _settings[\"database\"] = ''\n return False\n \n try:\n _hdbi = ibm_db_dbi.Connection(_hdbc)\n except Exception as err:\n db2_error(False,True) # errormsg(str(err))\n _connected = False\n _settings[\"database\"] = ''\n return False \n \n _connected = True\n \n # Save the values for future use\n \n save_settings()\n \n success(\"Connection successful.\")\n return True\n ",
"_____no_output_____"
]
],
[
[
"### Load/Save Settings\n\nThere are two routines that load and save settings between Jupyter notebooks. These routines are called without any parameters.\n\n```\nload_settings() save_settings()\n```\n\nThere is a global structure called settings which contains the following fields:\n\n```\n_settings = {\n \"maxrows\" : 10, \n \"maxgrid\" : 5,\n \"runtime\" : 1,\n \"display\" : \"TEXT\",\n \"database\" : \"\",\n \"hostname\" : \"localhost\",\n \"port\" : \"50000\",\n \"protocol\" : \"TCPIP\", \n \"uid\" : \"DB2INST1\",\n \"pwd\" : \"password\"\n}\n```\n\nThe information in the settings structure is used for re-connecting to a database when you start up a Jupyter notebook. When the session is established for the first time, the load_settings() function is called to get the contents of the pickle file (db2connect.pickle, a Jupyter session file) that will be used for the first connection to the database. Whenever a new connection is made, the file is updated with the save_settings() function.",
"_____no_output_____"
]
],
[
[
"def load_settings():\n\n # This routine will load the settings from the previous session if they exist\n \n global _settings\n \n fname = \"db2connect.pickle\"\n\n try:\n with open(fname,'rb') as f: \n _settings = pickle.load(f) \n \n # Reset runtime to 1 since it would be unexpected to keep the same value between connections \n _settings[\"runtime\"] = 1\n _settings[\"maxgrid\"] = 5\n \n except: \n pass\n \n return\n\ndef save_settings():\n\n # This routine will save the current settings if they exist\n \n global _settings\n \n fname = \"db2connect.pickle\"\n \n try:\n with open(fname,'wb') as f:\n pickle.dump(_settings,f)\n \n except:\n errormsg(\"Failed trying to write Db2 Configuration Information.\")\n \n return ",
"_____no_output_____"
]
],
[
[
"### Error and Message Functions\n\nThere are three types of messages that are thrown by the %db2 magic command. The first routine will print out a success message with no special formatting:\n\n```\nsuccess(message)\n```\n\nThe second message is used for displaying an error message that is not associated with a SQL error. This type of error message is surrounded with a red box to highlight the problem. Note that the success message has code that has been commented out that could also show a successful return code with a green box. \n\n```\nerrormsg(message)\n```\n\nThe final error message is based on an error occuring in the SQL code that was executed. This code will parse the message returned from the ibm_db interface and parse it to return only the error message portion (and not all of the wrapper code from the driver).\n\n```\ndb2_error(quiet,connect=False)\n```\n\nThe quiet flag is passed to the db2_error routine so that messages can be suppressed if the user wishes to ignore them with the -q flag. A good example of this is dropping a table that does not exist. We know that an error will be thrown so we can ignore it. The information that the db2_error routine gets is from the stmt_errormsg() function from within the ibm_db driver. The db2_error function should only be called after a SQL failure otherwise there will be no diagnostic information returned from stmt_errormsg().\n\nIf the connect flag is True, the routine will get the SQLSTATE and SQLCODE from the connection error message rather than a statement error message.",
"_____no_output_____"
]
],
[
[
"def db2_error(quiet,connect=False):\n \n global sqlerror, sqlcode, sqlstate, _environment\n \n \n try:\n if (connect == False):\n errmsg = ibm_db.stmt_errormsg().replace('\\r',' ')\n errmsg = errmsg[errmsg.rfind(\"]\")+1:].strip()\n else:\n errmsg = ibm_db.conn_errormsg().replace('\\r',' ')\n errmsg = errmsg[errmsg.rfind(\"]\")+1:].strip()\n \n sqlerror = errmsg\n \n msg_start = errmsg.find(\"SQLSTATE=\")\n if (msg_start != -1):\n msg_end = errmsg.find(\" \",msg_start)\n if (msg_end == -1):\n msg_end = len(errmsg)\n sqlstate = errmsg[msg_start+9:msg_end]\n else:\n sqlstate = \"0\"\n \n msg_start = errmsg.find(\"SQLCODE=\")\n if (msg_start != -1):\n msg_end = errmsg.find(\" \",msg_start)\n if (msg_end == -1):\n msg_end = len(errmsg)\n sqlcode = errmsg[msg_start+8:msg_end]\n try:\n sqlcode = int(sqlcode)\n except:\n pass\n else: \n sqlcode = 0\n \n except:\n errmsg = \"Unknown error.\"\n sqlcode = -99999\n sqlstate = \"-99999\"\n sqlerror = errmsg\n return\n \n \n msg_start = errmsg.find(\"SQLSTATE=\")\n if (msg_start != -1):\n msg_end = errmsg.find(\" \",msg_start)\n if (msg_end == -1):\n msg_end = len(errmsg)\n sqlstate = errmsg[msg_start+9:msg_end]\n else:\n sqlstate = \"0\"\n \n \n msg_start = errmsg.find(\"SQLCODE=\")\n if (msg_start != -1):\n msg_end = errmsg.find(\" \",msg_start)\n if (msg_end == -1):\n msg_end = len(errmsg)\n sqlcode = errmsg[msg_start+8:msg_end]\n try:\n sqlcode = int(sqlcode)\n except:\n pass\n else:\n sqlcode = 0\n \n if quiet == True: return\n \n if (errmsg == \"\"): return\n\n html = '<p><p style=\"border:2px; border-style:solid; border-color:#FF0000; background-color:#ffe6e6; padding: 1em;\">'\n \n if (_environment[\"jupyter\"] == True):\n pdisplay(pHTML(html+errmsg+\"</p>\"))\n else:\n print(errmsg)\n \n# Print out an error message\n\ndef errormsg(message):\n \n global _environment\n \n if (message != \"\"):\n html = '<p><p style=\"border:2px; border-style:solid; border-color:#FF0000; background-color:#ffe6e6; padding: 1em;\">'\n if (_environment[\"jupyter\"] == True):\n pdisplay(pHTML(html + message + \"</p>\")) \n else:\n print(message)\n \ndef success(message):\n \n if (message != \"\"):\n print(message)\n return \n\ndef debug(message,error=False):\n \n global _environment\n \n if (_environment[\"jupyter\"] == True):\n spacer = \"<br>\" + \" \"\n else:\n spacer = \"\\n \"\n \n if (message != \"\"):\n\n lines = message.split('\\n')\n msg = \"\"\n indent = 0\n for line in lines:\n delta = line.count(\"(\") - line.count(\")\")\n if (msg == \"\"):\n msg = line\n indent = indent + delta\n else:\n if (delta < 0): indent = indent + delta\n msg = msg + spacer * (indent*2) + line\n if (delta > 0): indent = indent + delta \n\n if (indent < 0): indent = 0\n if (error == True): \n html = '<p><pre style=\"font-family: monospace; border:2px; border-style:solid; border-color:#FF0000; background-color:#ffe6e6; padding: 1em;\">' \n else:\n html = '<p><pre style=\"font-family: monospace; border:2px; border-style:solid; border-color:#008000; background-color:#e6ffe6; padding: 1em;\">'\n \n if (_environment[\"jupyter\"] == True):\n pdisplay(pHTML(html + msg + \"</pre></p>\"))\n else:\n print(msg)\n \n return ",
"_____no_output_____"
]
],
[
[
"## Macro Processor\nA macro is used to generate SQL to be executed by overriding or creating a new keyword. For instance, the base `%sql` command does not understand the `LIST TABLES` command which is usually used in conjunction with the `CLP` processor. Rather than specifically code this in the base `db2.ipynb` file, we can create a macro that can execute this code for us.\n\nThere are three routines that deal with macros. \n\n- checkMacro is used to find the macro calls in a string. All macros are sent to parseMacro for checking.\n- runMacro will evaluate the macro and return the string to the parse\n- subvars is used to track the variables used as part of a macro call.\n- setMacro is used to catalog a macro",
"_____no_output_____"
],
[
"### Set Macro\nThis code will catalog a macro call.",
"_____no_output_____"
]
],
[
[
"def setMacro(inSQL,parms):\n \n global _macros\n \n names = parms.split()\n if (len(names) < 2):\n errormsg(\"No command name supplied.\")\n return None\n \n macroName = names[1].upper()\n _macros[macroName] = inSQL\n\n return",
"_____no_output_____"
]
],
[
[
"### Check Macro\nThis code will check to see if there is a macro command in the SQL. It will take the SQL that is supplied and strip out three values: the first and second keywords, and the remainder of the parameters.\n\nFor instance, consider the following statement:\n```\nCREATE DATABASE GEORGE options....\n```\nThe name of the macro that we want to run is called `CREATE`. We know that there is a SQL command called `CREATE` but this code will call the macro first to see if needs to run any special code. For instance, `CREATE DATABASE` is not part of the `db2.ipynb` syntax, but we can add it in by using a macro.\n\nThe check macro logic will strip out the subcommand (`DATABASE`) and place the remainder of the string after `DATABASE` in options.",
"_____no_output_____"
]
],
[
[
"def checkMacro(in_sql):\n \n global _macros\n \n if (len(in_sql) == 0): return(in_sql) # Nothing to do \n \n tokens = parseArgs(in_sql,None) # Take the string and reduce into tokens\n \n macro_name = tokens[0].upper() # Uppercase the name of the token\n \n if (macro_name not in _macros): \n return(in_sql) # No macro by this name so just return the string\n\n result = runMacro(_macros[macro_name],in_sql,tokens) # Execute the macro using the tokens we found\n\n return(result) # Runmacro will either return the original SQL or the new one",
"_____no_output_____"
]
],
[
[
"### Split Assignment\nThis routine will return the name of a variable and it's value when the format is x=y. If y is enclosed in quotes, the quotes are removed.",
"_____no_output_____"
]
],
[
[
"def splitassign(arg):\n \n var_name = \"null\"\n var_value = \"null\"\n \n arg = arg.strip()\n eq = arg.find(\"=\")\n if (eq != -1):\n var_name = arg[:eq].strip()\n temp_value = arg[eq+1:].strip()\n if (temp_value != \"\"):\n ch = temp_value[0]\n if (ch in [\"'\",'\"']):\n if (temp_value[-1:] == ch):\n var_value = temp_value[1:-1]\n else:\n var_value = temp_value\n else:\n var_value = temp_value\n else:\n var_value = arg\n\n return var_name, var_value",
"_____no_output_____"
]
],
[
[
"### Parse Args \nThe commands that are used in the macros need to be parsed into their separate tokens. The tokens are separated by blanks and strings that enclosed in quotes are kept together.",
"_____no_output_____"
]
],
[
[
"def parseArgs(argin,_vars):\n\n quoteChar = \"\"\n inQuote = False\n inArg = True\n args = []\n arg = ''\n \n for ch in argin.lstrip():\n if (inQuote == True):\n if (ch == quoteChar):\n inQuote = False \n arg = arg + ch #z\n else:\n arg = arg + ch\n elif (ch == \"\\\"\" or ch == \"\\'\"): # Do we have a quote\n quoteChar = ch\n arg = arg + ch #z\n inQuote = True\n elif (ch == \" \"):\n if (arg != \"\"):\n arg = subvars(arg,_vars)\n args.append(arg)\n else:\n args.append(\"null\")\n arg = \"\"\n else:\n arg = arg + ch\n \n if (arg != \"\"):\n arg = subvars(arg,_vars)\n args.append(arg) \n \n return(args)",
"_____no_output_____"
]
],
[
[
"### Run Macro\nThis code will execute the body of the macro and return the results for that macro call.",
"_____no_output_____"
]
],
[
[
"def runMacro(script,in_sql,tokens):\n \n result = \"\"\n runIT = True \n code = script.split(\"\\n\")\n level = 0\n runlevel = [True,False,False,False,False,False,False,False,False,False]\n ifcount = 0\n _vars = {}\n \n for i in range(0,len(tokens)):\n vstr = str(i)\n _vars[vstr] = tokens[i]\n \n if (len(tokens) == 0):\n _vars[\"argc\"] = \"0\"\n else:\n _vars[\"argc\"] = str(len(tokens)-1)\n \n for line in code:\n line = line.strip()\n if (line == \"\" or line == \"\\n\"): continue\n if (line[0] == \"#\"): continue # A comment line starts with a # in the first position of the line\n args = parseArgs(line,_vars) # Get all of the arguments\n if (args[0] == \"if\"):\n ifcount = ifcount + 1\n if (runlevel[level] == False): # You can't execute this statement\n continue\n level = level + 1 \n if (len(args) < 4):\n print(\"Macro: Incorrect number of arguments for the if clause.\")\n return insql\n arg1 = args[1]\n arg2 = args[3]\n if (len(arg2) > 2):\n ch1 = arg2[0]\n ch2 = arg2[-1:]\n if (ch1 in ['\"',\"'\"] and ch1 == ch2):\n arg2 = arg2[1:-1].strip()\n \n op = args[2]\n if (op in [\"=\",\"==\"]):\n if (arg1 == arg2):\n runlevel[level] = True\n else:\n runlevel[level] = False \n elif (op in [\"<=\",\"=<\"]):\n if (arg1 <= arg2):\n runlevel[level] = True\n else:\n runlevel[level] = False \n elif (op in [\">=\",\"=>\"]): \n if (arg1 >= arg2):\n runlevel[level] = True\n else:\n runlevel[level] = False \n elif (op in [\"<>\",\"!=\"]): \n if (arg1 != arg2):\n runlevel[level] = True\n else:\n runlevel[level] = False \n elif (op in [\"<\"]):\n if (arg1 < arg2):\n runlevel[level] = True\n else:\n runlevel[level] = False \n elif (op in [\">\"]):\n if (arg1 > arg2):\n runlevel[level] = True\n else:\n runlevel[level] = False \n else:\n print(\"Macro: Unknown comparison operator in the if statement:\" + op)\n\n continue\n\n elif (args[0] in [\"exit\",\"echo\"] and runlevel[level] == True):\n msg = \"\"\n for msgline in args[1:]:\n if (msg == \"\"):\n msg = subvars(msgline,_vars)\n else:\n msg = msg + \" \" + subvars(msgline,_vars)\n if (msg != \"\"): \n if (args[0] == \"echo\"):\n debug(msg,error=False)\n else:\n debug(msg,error=True)\n if (args[0] == \"exit\"): return ''\n \n elif (args[0] == \"pass\" and runlevel[level] == True):\n pass\n\n elif (args[0] == \"var\" and runlevel[level] == True):\n value = \"\"\n for val in args[2:]:\n if (value == \"\"):\n value = subvars(val,_vars)\n else:\n value = value + \" \" + subvars(val,_vars)\n value.strip()\n _vars[args[1]] = value \n\n elif (args[0] == 'else'):\n\n if (ifcount == level):\n runlevel[level] = not runlevel[level]\n \n elif (args[0] == 'return' and runlevel[level] == True):\n return(result)\n\n elif (args[0] == \"endif\"):\n ifcount = ifcount - 1\n if (ifcount < level):\n level = level - 1\n if (level < 0):\n print(\"Macro: Unmatched if/endif pairs.\")\n return ''\n \n else:\n if (runlevel[level] == True):\n if (result == \"\"):\n result = subvars(line,_vars)\n else:\n result = result + \"\\n\" + subvars(line,_vars)\n \n return(result) ",
"_____no_output_____"
]
],
[
[
"### Substitute Vars\nThis routine is used by the runMacro program to track variables that are used within Macros. These are kept separate from the rest of the code.",
"_____no_output_____"
]
],
[
[
"def subvars(script,_vars):\n \n if (_vars == None): return script\n \n remainder = script\n result = \"\"\n done = False\n \n while done == False:\n bv = remainder.find(\"{\")\n if (bv == -1):\n done = True\n continue\n ev = remainder.find(\"}\")\n if (ev == -1):\n done = True\n continue\n result = result + remainder[:bv]\n vvar = remainder[bv+1:ev]\n remainder = remainder[ev+1:]\n \n upper = False\n allvars = False\n if (vvar[0] == \"^\"):\n upper = True\n vvar = vvar[1:]\n elif (vvar[0] == \"*\"):\n vvar = vvar[1:]\n allvars = True\n else:\n pass\n \n if (vvar in _vars):\n if (upper == True):\n items = _vars[vvar].upper()\n elif (allvars == True):\n try:\n iVar = int(vvar)\n except:\n return(script)\n items = \"\"\n sVar = str(iVar)\n while sVar in _vars:\n if (items == \"\"):\n items = _vars[sVar]\n else:\n items = items + \" \" + _vars[sVar]\n iVar = iVar + 1\n sVar = str(iVar)\n else:\n items = _vars[vvar]\n else:\n if (allvars == True):\n items = \"\"\n else:\n items = \"null\" \n \n result = result + items\n \n if (remainder != \"\"):\n result = result + remainder\n \n return(result)",
"_____no_output_____"
]
],
[
[
"### SQL Timer\n\nThe calling format of this routine is:\n\n```\ncount = sqlTimer(hdbc, runtime, inSQL)\n```\n\nThis code runs the SQL string multiple times for one second (by default). The accuracy of the clock is not that great when you are running just one statement, so instead this routine will run the code multiple times for a second to give you an execution count. If you need to run the code for more than one second, the runtime value needs to be set to the number of seconds you want the code to run.\n\nThe return result is always the number of times that the code executed. Note, that the program will skip reading the data if it is a SELECT statement so it doesn't included fetch time for the answer set.",
"_____no_output_____"
]
],
[
[
"def sqlTimer(hdbc, runtime, inSQL):\n \n count = 0\n t_end = time.time() + runtime\n \n while time.time() < t_end:\n \n try:\n stmt = ibm_db.exec_immediate(hdbc,inSQL) \n if (stmt == False):\n db2_error(flag([\"-q\",\"-quiet\"]))\n return(-1)\n ibm_db.free_result(stmt)\n \n except Exception as err:\n db2_error(False)\n return(-1)\n \n count = count + 1\n \n return(count)",
"_____no_output_____"
]
],
[
[
"### Split Args\nThis routine takes as an argument a string and then splits the arguments according to the following logic:\n* If the string starts with a `(` character, it will check the last character in the string and see if it is a `)` and then remove those characters\n* Every parameter is separated by a comma `,` and commas within quotes are ignored\n* Each parameter returned will have three values returned - one for the value itself, an indicator which will be either True if it was quoted, or False if not, and True or False if it is numeric.\n\nExample:\n```\n \"abcdef\",abcdef,456,\"856\"\n```\n\nThree values would be returned:\n```\n[abcdef,True,False],[abcdef,False,False],[456,False,True],[856,True,False]\n```\n\nAny quoted string will be False for numeric. The way that the parameters are handled are up to the calling program. However, in the case of Db2, the quoted strings must be in single quotes so any quoted parameter using the double quotes `\"` must be wrapped with single quotes. There is always a possibility that a string contains single quotes (i.e. O'Connor) so any substituted text should use `''` so that Db2 can properly interpret the string. This routine does not adjust the strings with quotes, and depends on the variable subtitution routine to do that. ",
"_____no_output_____"
]
],
[
[
"def splitargs(arguments):\n \n import types\n \n # String the string and remove the ( and ) characters if they at the beginning and end of the string\n \n results = []\n \n step1 = arguments.strip()\n if (len(step1) == 0): return(results) # Not much to do here - no args found\n \n if (step1[0] == '('):\n if (step1[-1:] == ')'):\n step2 = step1[1:-1]\n step2 = step2.strip()\n else:\n step2 = step1\n else:\n step2 = step1\n \n # Now we have a string without brackets. Start scanning for commas\n \n quoteCH = \"\"\n pos = 0\n arg = \"\"\n args = []\n \n while pos < len(step2):\n ch = step2[pos]\n if (quoteCH == \"\"): # Are we in a quote?\n if (ch in ('\"',\"'\")): # Check to see if we are starting a quote\n quoteCH = ch\n arg = arg + ch\n pos += 1\n elif (ch == \",\"): # Are we at the end of a parameter?\n arg = arg.strip()\n args.append(arg)\n arg = \"\"\n inarg = False \n pos += 1\n else: # Continue collecting the string\n arg = arg + ch\n pos += 1\n else:\n if (ch == quoteCH): # Are we at the end of a quote?\n arg = arg + ch # Add the quote to the string\n pos += 1 # Increment past the quote\n quoteCH = \"\" # Stop quote checking (maybe!)\n else:\n pos += 1\n arg = arg + ch\n\n if (quoteCH != \"\"): # So we didn't end our string\n arg = arg.strip()\n args.append(arg)\n elif (arg != \"\"): # Something left over as an argument\n arg = arg.strip()\n args.append(arg)\n else:\n pass\n \n results = []\n \n for arg in args:\n result = []\n if (len(arg) > 0):\n if (arg[0] in ('\"',\"'\")):\n value = arg[1:-1]\n isString = True\n isNumber = False\n else:\n isString = False \n isNumber = False \n try:\n value = eval(arg)\n if (type(value) == int):\n isNumber = True\n elif (isinstance(value,float) == True):\n isNumber = True\n else:\n value = arg\n except:\n value = arg\n\n else:\n value = \"\"\n isString = False\n isNumber = False\n \n result = [value,isString,isNumber]\n results.append(result)\n \n return results",
"_____no_output_____"
]
],
[
[
"### DataFrame Table Creation\nWhen using dataframes, it is sometimes useful to use the definition of the dataframe to create a Db2 table. The format of the command is:\n```\n%sql using <df> create table <table> [with data | columns asis]\n```\nThe value <df> is the name of the dataframe, not the contents (`:df`). The definition of the data types in the dataframe will be used to create the Db2 table using typical Db2 data types rather than generic CLOBs and FLOAT for numeric objects. The two options are used to handle how the conversion is done. If you supply `with data`, the contents of the df will be inserted into the table, otherwise the table is defined only. The column names will be uppercased and special characters (like blanks) will be replaced with underscores. If `columns asis` is specified, the column names will remain the same as in the dataframe, with each name using quotes to guarantee the same spelling as in the DF.\n \nIf the table already exists, the command will not run and an error message will be produced.",
"_____no_output_____"
]
],
[
[
"def createDF(hdbc,sqlin,local_ns):\n \n import datetime\n import ibm_db \n \n global sqlcode \n \n # Strip apart the command into tokens based on spaces\n tokens = sqlin.split()\n \n token_count = len(tokens)\n \n if (token_count < 5): # Not enough parameters\n errormsg(\"Insufficient arguments for USING command. %sql using df create table name [with data | columns asis]\")\n return\n \n keyword_command = tokens[0].upper()\n dfName = tokens[1]\n keyword_create = tokens[2].upper()\n keyword_table = tokens[3].upper()\n table = tokens[4]\n \n if (keyword_create not in (\"CREATE\",\"REPLACE\") or keyword_table != \"TABLE\"):\n errormsg(\"Incorrect syntax: %sql using <df> create table <name> [options]\")\n return\n \n if (token_count % 2 != 1):\n errormsg(\"Insufficient arguments for USING command. %sql using df create table name [with data | columns asis | keep float]\")\n return \n \n flag_withdata = False\n flag_asis = False\n flag_float = False\n flag_integer = False\n limit = -1\n \n if (keyword_create == \"REPLACE\"):\n %sql -q DROP TABLE {table}\n \n for token_idx in range(5,token_count,2):\n\n option_key = tokens[token_idx].upper()\n option_val = tokens[token_idx+1].upper()\n if (option_key == \"WITH\" and option_val == \"DATA\"):\n flag_withdata = True\n elif (option_key == \"COLUMNS\" and option_val == \"ASIS\"):\n flag_asis = True\n elif (option_key == \"KEEP\" and option_val == \"FLOAT64\"):\n flag_float = True\n elif (option_key == \"KEEP\" and option_val == \"INT64\"):\n flag_integer = True\n elif (option_key == \"LIMIT\"):\n if (option_val.isnumeric() == False):\n errormsg(\"The LIMIT must be a valid number from -1 (unlimited) to the maximun number of rows to insert\")\n return\n limit = int(option_val)\n else:\n errormsg(\"Invalid options. Must be either WITH DATA | COLUMNS ASIS | KEEP FLOAT64 | KEEP FLOAT INT64\")\n return\n \n dfName = tokens[1]\n if (dfName not in local_ns):\n errormsg(\"The variable ({dfName}) does not exist in the local variable list.\")\n return \n\n try:\n df_value = eval(dfName,None,local_ns) # globals()[varName] # eval(varName)\n except:\n errormsg(\"The variable ({dfName}) does not contain a value.\")\n return \n \n if (isinstance(df_value,pandas.DataFrame) == False): # Not a Pandas dataframe\n errormsg(\"The variable ({dfName}) is not a Pandas dataframe.\")\n return \n\n sql = [] \n columns = dict(df_value.dtypes)\n sql.append(f'CREATE TABLE {table} (')\n datatypes = []\n comma = \"\"\n for column in columns:\n datatype = columns[column]\n if (datatype == \"object\"):\n datapoint = df_value[column][0]\n if (isinstance(datapoint,datetime.datetime)):\n type = \"TIMESTAMP\"\n elif (isinstance(datapoint,datetime.time)):\n type = \"TIME\"\n elif (isinstance(datapoint,datetime.date)):\n type = \"DATE\"\n elif (isinstance(datapoint,float)):\n if (flag_float == True):\n type = \"FLOAT\"\n else:\n type = \"DECFLOAT\"\n elif (isinstance(datapoint,int)):\n if (flag_integer == True):\n type = \"BIGINT\"\n else:\n type = \"INTEGER\"\n elif (isinstance(datapoint,str)):\n maxlength = df_value[column].apply(str).apply(len).max()\n type = f\"VARCHAR({maxlength})\"\n else:\n type = \"CLOB\"\n elif (datatype == \"int64\"):\n if (flag_integer == True):\n type = \"BIGINT\"\n else:\n type = \"INTEGER\"\n elif (datatype == \"float64\"):\n if (flag_float == True):\n type = \"FLOAT\"\n else:\n type = \"DECFLOAT\"\n elif (datatype == \"datetime64\"):\n type = \"TIMESTAMP\"\n elif (datatype == \"bool\"):\n type = \"BINARY\"\n else:\n type = \"CLOB\"\n \n datatypes.append(type) \n\n if (flag_asis == False):\n if (isinstance(column,str) == False):\n column = str(column)\n identifier = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_\"\n column_name = column.strip().upper()\n new_name = \"\"\n for ch in column_name:\n if (ch not in identifier):\n new_name = new_name + \"_\"\n else:\n new_name = new_name + ch\n \n new_name = new_name.lstrip('_').rstrip('_')\n \n if (new_name == \"\" or new_name[0] not in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"):\n new_name = f'\"{column}\"'\n else:\n new_name = f'\"{column}\"'\n \n sql.append(f\" {new_name} {type}\")\n sql.append(\")\")\n\n sqlcmd = \"\"\n for i in range(0,len(sql)):\n if (i > 0 and i < len(sql)-2):\n comma = \",\"\n else:\n comma = \"\"\n sqlcmd = \"{}\\n{}{}\".format(sqlcmd,sql[i],comma)\n \n print(sqlcmd)\n %sql {sqlcmd}\n\n if (sqlcode != 0):\n return\n\n if (flag_withdata == True):\n \n autocommit = ibm_db.autocommit(hdbc)\n ibm_db.autocommit(hdbc,False)\n\n row_count = 0\n insert_sql = \"\"\n rows, cols = df_value.shape\n for row in range(0,rows):\n \n insert_row = \"\"\n for col in range(0, cols):\n \n value = df_value.iloc[row][col]\n \n if (datatypes[col] == \"CLOB\" or \"VARCHAR\" in datatypes[col]):\n value = str(value)\n value = addquotes(value,True)\n elif (datatypes[col] in (\"TIME\",\"DATE\",\"TIMESTAMP\")):\n value = str(value)\n value = addquotes(value,True)\n elif (datatypes[col] in (\"INTEGER\",\"DECFLOAT\",\"FLOAT\",\"BINARY\")):\n strvalue = str(value)\n if (\"NAN\" in strvalue.upper()):\n value = \"NULL\" \n else:\n value = str(value)\n value = addquotes(value,True)\n \n if (insert_row == \"\"):\n insert_row = f\"{value}\"\n else:\n insert_row = f\"{insert_row},{value}\"\n \n if (insert_sql == \"\"):\n insert_sql = f\"INSERT INTO {table} VALUES ({insert_row})\"\n else:\n insert_sql = f\"{insert_sql},({insert_row})\"\n \n row_count += 1\n if (row_count % 1000 == 0 or row_count == limit):\n result = ibm_db.exec_immediate(hdbc, insert_sql) # Run it \n if (result == False): # Error executing the code\n db2_error(False) \n return\n ibm_db.commit(hdbc)\n\n print(f\"\\r{row_count} of {rows} rows inserted.\",end=\"\")\n \n insert_sql = \"\"\n \n if (row_count == limit):\n break\n \n if (insert_sql != \"\"):\n result = ibm_db.exec_immediate(hdbc, insert_sql) # Run it \n if (result == False): # Error executing the code\n db2_error(False) \n ibm_db.commit(hdbc)\n\n ibm_db.autocommit(hdbc,autocommit)\n\n print(\"\\nInsert completed.\")\n \n return",
"_____no_output_____"
]
],
[
[
"### SQL Parser\n\nThe calling format of this routine is:\n\n```\nsql_cmd, encoded_sql = sqlParser(sql_input)\n```\n\nThis code will look at the SQL string that has been passed to it and parse it into two values:\n- sql_cmd: First command in the list (so this may not be the actual SQL command)\n- encoded_sql: SQL with the parameters removed if there are any (replaced with ? markers)",
"_____no_output_____"
]
],
[
[
"def sqlParser(sqlin,local_ns):\n \n sql_cmd = \"\"\n encoded_sql = sqlin\n \n firstCommand = \"(?:^\\s*)([a-zA-Z]+)(?:\\s+.*|$)\"\n \n findFirst = re.match(firstCommand,sqlin)\n \n if (findFirst == None): # We did not find a match so we just return the empty string\n return sql_cmd, encoded_sql\n \n cmd = findFirst.group(1)\n sql_cmd = cmd.upper()\n\n #\n # Scan the input string looking for variables in the format :var. If no : is found just return.\n # Var must be alpha+number+_ to be valid\n #\n \n if (':' not in sqlin): # A quick check to see if parameters are in here, but not fool-proof! \n return sql_cmd, encoded_sql \n \n inVar = False \n inQuote = \"\" \n varName = \"\"\n encoded_sql = \"\"\n \n STRING = 0\n NUMBER = 1\n LIST = 2\n RAW = 3\n PANDAS = 5\n \n for ch in sqlin:\n if (inVar == True): # We are collecting the name of a variable\n if (ch.upper() in \"@_ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789[]\"):\n varName = varName + ch\n continue\n else:\n if (varName == \"\"):\n encode_sql = encoded_sql + \":\"\n elif (varName[0] in ('[',']')):\n encoded_sql = encoded_sql + \":\" + varName\n else:\n if (ch == '.'): # If the variable name is stopped by a period, assume no quotes are used\n flag_quotes = False\n else:\n flag_quotes = True\n varValue, varType = getContents(varName,flag_quotes,local_ns)\n if (varType != PANDAS and varValue == None): \n encoded_sql = encoded_sql + \":\" + varName\n else:\n if (varType == STRING):\n encoded_sql = encoded_sql + varValue\n elif (varType == NUMBER):\n encoded_sql = encoded_sql + str(varValue)\n elif (varType == RAW):\n encoded_sql = encoded_sql + varValue\n elif (varType == PANDAS):\n insertsql = \"\"\n coltypes = varValue.dtypes\n rows, cols = varValue.shape\n for row in range(0,rows):\n insertrow = \"\"\n for col in range(0, cols):\n value = varValue.iloc[row][col]\n if (coltypes[col] == \"object\"):\n value = str(value)\n value = addquotes(value,True)\n else:\n strvalue = str(value)\n if (\"NAN\" in strvalue.upper()):\n value = \"NULL\" \n if (insertrow == \"\"):\n insertrow = f\"{value}\"\n else:\n insertrow = f\"{insertrow},{value}\"\n if (insertsql == \"\"):\n insertsql = f\"({insertrow})\"\n else:\n insertsql = f\"{insertsql},({insertrow})\" \n encoded_sql = encoded_sql + insertsql\n elif (varType == LIST):\n start = True\n for v in varValue:\n if (start == False):\n encoded_sql = encoded_sql + \",\"\n if (isinstance(v,int) == True): # Integer value \n encoded_sql = encoded_sql + str(v)\n elif (isinstance(v,float) == True):\n encoded_sql = encoded_sql + str(v)\n else:\n flag_quotes = True\n try:\n if (v.find('0x') == 0): # Just guessing this is a hex value at beginning\n encoded_sql = encoded_sql + v\n else:\n encoded_sql = encoded_sql + addquotes(v,flag_quotes) # String\n except:\n encoded_sql = encoded_sql + addquotes(str(v),flag_quotes) \n start = False\n\n encoded_sql = encoded_sql + ch\n varName = \"\"\n inVar = False \n elif (inQuote != \"\"):\n encoded_sql = encoded_sql + ch\n if (ch == inQuote): inQuote = \"\"\n elif (ch in (\"'\",'\"')):\n encoded_sql = encoded_sql + ch\n inQuote = ch\n elif (ch == \":\"): # This might be a variable\n varName = \"\"\n inVar = True\n else:\n encoded_sql = encoded_sql + ch\n \n if (inVar == True):\n varValue, varType = getContents(varName,True,local_ns) # We assume the end of a line is quoted\n if (varType != PANDAS and varValue == None): \n encoded_sql = encoded_sql + \":\" + varName \n else:\n if (varType == STRING):\n encoded_sql = encoded_sql + varValue\n elif (varType == NUMBER):\n encoded_sql = encoded_sql + str(varValue)\n elif (varType == PANDAS):\n insertsql = \"\"\n coltypes = varValue.dtypes\n rows, cols = varValue.shape\n for row in range(0,rows):\n insertrow = \"\"\n for col in range(0, cols):\n value = varValue.iloc[row][col]\n if (coltypes[col] == \"object\"):\n value = str(value)\n value = addquotes(value,True)\n else:\n strvalue = str(value)\n if (\"NAN\" in strvalue.upper()):\n value = \"NULL\" \n if (insertrow == \"\"):\n insertrow = f\"{value}\"\n else:\n insertrow = f\"{insertrow},{value}\"\n if (insertsql == \"\"):\n insertsql = f\"({insertrow})\"\n else:\n insertsql = f\"{insertsql},({insertrow})\" \n encoded_sql = encoded_sql + insertsql \n elif (varType == LIST):\n flag_quotes = True\n start = True\n for v in varValue:\n if (start == False):\n encoded_sql = encoded_sql + \",\"\n if (isinstance(v,int) == True): # Integer value \n encoded_sql = encoded_sql + str(v)\n elif (isinstance(v,float) == True):\n encoded_sql = encoded_sql + str(v)\n else:\n try:\n if (v.find('0x') == 0): # Just guessing this is a hex value\n encoded_sql = encoded_sql + v\n else:\n encoded_sql = encoded_sql + addquotes(v,flag_quotes) # String\n except:\n encoded_sql = encoded_sql + addquotes(str(v),flag_quotes) \n start = False\n\n return sql_cmd, encoded_sql",
"_____no_output_____"
]
],
[
[
"### Variable Contents Function\nThe calling format of this routine is:\n\n```\nvalue = getContents(varName,quote,name_space)\n```\n\nThis code will take the name of a variable as input and return the contents of that variable. If the variable is not found then the program will return None which is the equivalent to empty or null. Note that this function looks at the global variable pool for Python so it is possible that the wrong version of variable is returned if it is used in different functions. For this reason, any variables used in SQL statements should use a unique namimg convention if possible.\n\nThe other thing that this function does is replace single quotes with two quotes. The reason for doing this is that Db2 will convert two single quotes into one quote when dealing with strings. This avoids problems when dealing with text that contains multiple quotes within the string. Note that this substitution is done only for single quote characters since the double quote character is used by Db2 for naming columns that are case sensitive or contain special characters.\n\nIf the quote value is True, the field will have quotes around it. The name_space is the variables currently that are registered in Python.",
"_____no_output_____"
]
],
[
[
"def getContents(varName,flag_quotes,local_ns):\n \n #\n # Get the contents of the variable name that is passed to the routine. Only simple\n # variables are checked, i.e. arrays and lists are not parsed\n #\n \n STRING = 0\n NUMBER = 1\n LIST = 2\n RAW = 3\n DICT = 4\n PANDAS = 5\n \n try:\n value = eval(varName,None,local_ns) # globals()[varName] # eval(varName)\n except:\n return(None,STRING)\n \n if (isinstance(value,dict) == True): # Check to see if this is JSON dictionary\n return(addquotes(value,flag_quotes),STRING)\n\n elif(isinstance(value,list) == True): # List - tricky \n return(value,LIST)\n \n elif (isinstance(value,pandas.DataFrame) == True): # Pandas dataframe\n return(value,PANDAS)\n\n elif (isinstance(value,int) == True): # Integer value \n return(value,NUMBER)\n\n elif (isinstance(value,float) == True): # Float value\n return(value,NUMBER)\n\n else:\n try:\n # The pattern needs to be in the first position (0 in Python terms)\n if (value.find('0x') == 0): # Just guessing this is a hex value\n return(value,RAW)\n else:\n return(addquotes(value,flag_quotes),STRING) # String\n except:\n return(addquotes(str(value),flag_quotes),RAW)",
"_____no_output_____"
]
],
[
[
"### Add Quotes\nQuotes are a challenge when dealing with dictionaries and Db2. Db2 wants strings delimited with single quotes, while Dictionaries use double quotes. That wouldn't be a problems except imbedded single quotes within these dictionaries will cause things to fail. This routine attempts to double-quote the single quotes within the dicitonary.",
"_____no_output_____"
]
],
[
[
"def addquotes(inString,flag_quotes):\n \n if (isinstance(inString,dict) == True): # Check to see if this is JSON dictionary\n serialized = json.dumps(inString) \n else:\n serialized = inString\n\n # Replace single quotes with '' (two quotes) and wrap everything in single quotes\n if (flag_quotes == False):\n return(serialized)\n else:\n return(\"'\"+serialized.replace(\"'\",\"''\")+\"'\") # Convert single quotes to two single quotes",
"_____no_output_____"
]
],
[
[
"### Create the SAMPLE Database Tables\nThe calling format of this routine is:\n\n```\ndb2_create_sample(quiet)\n```\n\nThere are a lot of examples that depend on the data within the SAMPLE database. If you are running these examples and the connection is not to the SAMPLE database, then this code will create the two (EMPLOYEE, DEPARTMENT) tables that are used by most examples. If the function finds that these tables already exist, then nothing is done. If the tables are missing then they will be created with the same data as in the SAMPLE database.\n\nThe quiet flag tells the program not to print any messages when the creation of the tables is complete.",
"_____no_output_____"
]
],
[
[
"def db2_create_sample(quiet):\n \n create_department = \"\"\"\n BEGIN\n DECLARE FOUND INTEGER; \n SET FOUND = (SELECT COUNT(*) FROM SYSIBM.SYSTABLES WHERE NAME='DEPARTMENT' AND CREATOR=CURRENT USER); \n IF FOUND = 0 THEN \n EXECUTE IMMEDIATE('CREATE TABLE DEPARTMENT(DEPTNO CHAR(3) NOT NULL, DEPTNAME VARCHAR(36) NOT NULL, \n MGRNO CHAR(6),ADMRDEPT CHAR(3) NOT NULL)'); \n EXECUTE IMMEDIATE('INSERT INTO DEPARTMENT VALUES \n (''A00'',''SPIFFY COMPUTER SERVICE DIV.'',''000010'',''A00''), \n (''B01'',''PLANNING'',''000020'',''A00''), \n (''C01'',''INFORMATION CENTER'',''000030'',''A00''), \n (''D01'',''DEVELOPMENT CENTER'',NULL,''A00''), \n (''D11'',''MANUFACTURING SYSTEMS'',''000060'',''D01''), \n (''D21'',''ADMINISTRATION SYSTEMS'',''000070'',''D01''), \n (''E01'',''SUPPORT SERVICES'',''000050'',''A00''), \n (''E11'',''OPERATIONS'',''000090'',''E01''), \n (''E21'',''SOFTWARE SUPPORT'',''000100'',''E01''), \n (''F22'',''BRANCH OFFICE F2'',NULL,''E01''), \n (''G22'',''BRANCH OFFICE G2'',NULL,''E01''), \n (''H22'',''BRANCH OFFICE H2'',NULL,''E01''), \n (''I22'',''BRANCH OFFICE I2'',NULL,''E01''), \n (''J22'',''BRANCH OFFICE J2'',NULL,''E01'')'); \n END IF;\n END\"\"\"\n \n %sql -d -q {create_department} \n \n create_employee = \"\"\"\n BEGIN\n DECLARE FOUND INTEGER; \n SET FOUND = (SELECT COUNT(*) FROM SYSIBM.SYSTABLES WHERE NAME='EMPLOYEE' AND CREATOR=CURRENT USER); \n IF FOUND = 0 THEN \n EXECUTE IMMEDIATE('CREATE TABLE EMPLOYEE(\n EMPNO CHAR(6) NOT NULL,\n FIRSTNME VARCHAR(12) NOT NULL,\n MIDINIT CHAR(1),\n LASTNAME VARCHAR(15) NOT NULL,\n WORKDEPT CHAR(3),\n PHONENO CHAR(4),\n HIREDATE DATE,\n JOB CHAR(8),\n EDLEVEL SMALLINT NOT NULL,\n SEX CHAR(1),\n BIRTHDATE DATE,\n SALARY DECIMAL(9,2),\n BONUS DECIMAL(9,2),\n COMM DECIMAL(9,2)\n )');\n EXECUTE IMMEDIATE('INSERT INTO EMPLOYEE VALUES\n (''000010'',''CHRISTINE'',''I'',''HAAS'' ,''A00'',''3978'',''1995-01-01'',''PRES '',18,''F'',''1963-08-24'',152750.00,1000.00,4220.00),\n (''000020'',''MICHAEL'' ,''L'',''THOMPSON'' ,''B01'',''3476'',''2003-10-10'',''MANAGER '',18,''M'',''1978-02-02'',94250.00,800.00,3300.00),\n (''000030'',''SALLY'' ,''A'',''KWAN'' ,''C01'',''4738'',''2005-04-05'',''MANAGER '',20,''F'',''1971-05-11'',98250.00,800.00,3060.00),\n (''000050'',''JOHN'' ,''B'',''GEYER'' ,''E01'',''6789'',''1979-08-17'',''MANAGER '',16,''M'',''1955-09-15'',80175.00,800.00,3214.00),\n (''000060'',''IRVING'' ,''F'',''STERN'' ,''D11'',''6423'',''2003-09-14'',''MANAGER '',16,''M'',''1975-07-07'',72250.00,500.00,2580.00),\n (''000070'',''EVA'' ,''D'',''PULASKI'' ,''D21'',''7831'',''2005-09-30'',''MANAGER '',16,''F'',''2003-05-26'',96170.00,700.00,2893.00),\n (''000090'',''EILEEN'' ,''W'',''HENDERSON'' ,''E11'',''5498'',''2000-08-15'',''MANAGER '',16,''F'',''1971-05-15'',89750.00,600.00,2380.00),\n (''000100'',''THEODORE'' ,''Q'',''SPENSER'' ,''E21'',''0972'',''2000-06-19'',''MANAGER '',14,''M'',''1980-12-18'',86150.00,500.00,2092.00),\n (''000110'',''VINCENZO'' ,''G'',''LUCCHESSI'' ,''A00'',''3490'',''1988-05-16'',''SALESREP'',19,''M'',''1959-11-05'',66500.00,900.00,3720.00),\n (''000120'',''SEAN'' ,'' '',''O`CONNELL'' ,''A00'',''2167'',''1993-12-05'',''CLERK '',14,''M'',''1972-10-18'',49250.00,600.00,2340.00),\n (''000130'',''DELORES'' ,''M'',''QUINTANA'' ,''C01'',''4578'',''2001-07-28'',''ANALYST '',16,''F'',''1955-09-15'',73800.00,500.00,1904.00),\n (''000140'',''HEATHER'' ,''A'',''NICHOLLS'' ,''C01'',''1793'',''2006-12-15'',''ANALYST '',18,''F'',''1976-01-19'',68420.00,600.00,2274.00),\n (''000150'',''BRUCE'' ,'' '',''ADAMSON'' ,''D11'',''4510'',''2002-02-12'',''DESIGNER'',16,''M'',''1977-05-17'',55280.00,500.00,2022.00),\n (''000160'',''ELIZABETH'',''R'',''PIANKA'' ,''D11'',''3782'',''2006-10-11'',''DESIGNER'',17,''F'',''1980-04-12'',62250.00,400.00,1780.00),\n (''000170'',''MASATOSHI'',''J'',''YOSHIMURA'' ,''D11'',''2890'',''1999-09-15'',''DESIGNER'',16,''M'',''1981-01-05'',44680.00,500.00,1974.00),\n (''000180'',''MARILYN'' ,''S'',''SCOUTTEN'' ,''D11'',''1682'',''2003-07-07'',''DESIGNER'',17,''F'',''1979-02-21'',51340.00,500.00,1707.00),\n (''000190'',''JAMES'' ,''H'',''WALKER'' ,''D11'',''2986'',''2004-07-26'',''DESIGNER'',16,''M'',''1982-06-25'',50450.00,400.00,1636.00),\n (''000200'',''DAVID'' ,'' '',''BROWN'' ,''D11'',''4501'',''2002-03-03'',''DESIGNER'',16,''M'',''1971-05-29'',57740.00,600.00,2217.00),\n (''000210'',''WILLIAM'' ,''T'',''JONES'' ,''D11'',''0942'',''1998-04-11'',''DESIGNER'',17,''M'',''2003-02-23'',68270.00,400.00,1462.00),\n (''000220'',''JENNIFER'' ,''K'',''LUTZ'' ,''D11'',''0672'',''1998-08-29'',''DESIGNER'',18,''F'',''1978-03-19'',49840.00,600.00,2387.00),\n (''000230'',''JAMES'' ,''J'',''JEFFERSON'' ,''D21'',''2094'',''1996-11-21'',''CLERK '',14,''M'',''1980-05-30'',42180.00,400.00,1774.00),\n (''000240'',''SALVATORE'',''M'',''MARINO'' ,''D21'',''3780'',''2004-12-05'',''CLERK '',17,''M'',''2002-03-31'',48760.00,600.00,2301.00),\n (''000250'',''DANIEL'' ,''S'',''SMITH'' ,''D21'',''0961'',''1999-10-30'',''CLERK '',15,''M'',''1969-11-12'',49180.00,400.00,1534.00),\n (''000260'',''SYBIL'' ,''P'',''JOHNSON'' ,''D21'',''8953'',''2005-09-11'',''CLERK '',16,''F'',''1976-10-05'',47250.00,300.00,1380.00),\n (''000270'',''MARIA'' ,''L'',''PEREZ'' ,''D21'',''9001'',''2006-09-30'',''CLERK '',15,''F'',''2003-05-26'',37380.00,500.00,2190.00),\n (''000280'',''ETHEL'' ,''R'',''SCHNEIDER'' ,''E11'',''8997'',''1997-03-24'',''OPERATOR'',17,''F'',''1976-03-28'',36250.00,500.00,2100.00),\n (''000290'',''JOHN'' ,''R'',''PARKER'' ,''E11'',''4502'',''2006-05-30'',''OPERATOR'',12,''M'',''1985-07-09'',35340.00,300.00,1227.00),\n (''000300'',''PHILIP'' ,''X'',''SMITH'' ,''E11'',''2095'',''2002-06-19'',''OPERATOR'',14,''M'',''1976-10-27'',37750.00,400.00,1420.00),\n (''000310'',''MAUDE'' ,''F'',''SETRIGHT'' ,''E11'',''3332'',''1994-09-12'',''OPERATOR'',12,''F'',''1961-04-21'',35900.00,300.00,1272.00),\n (''000320'',''RAMLAL'' ,''V'',''MEHTA'' ,''E21'',''9990'',''1995-07-07'',''FIELDREP'',16,''M'',''1962-08-11'',39950.00,400.00,1596.00),\n (''000330'',''WING'' ,'' '',''LEE'' ,''E21'',''2103'',''2006-02-23'',''FIELDREP'',14,''M'',''1971-07-18'',45370.00,500.00,2030.00),\n (''000340'',''JASON'' ,''R'',''GOUNOT'' ,''E21'',''5698'',''1977-05-05'',''FIELDREP'',16,''M'',''1956-05-17'',43840.00,500.00,1907.00),\n (''200010'',''DIAN'' ,''J'',''HEMMINGER'' ,''A00'',''3978'',''1995-01-01'',''SALESREP'',18,''F'',''1973-08-14'',46500.00,1000.00,4220.00),\n (''200120'',''GREG'' ,'' '',''ORLANDO'' ,''A00'',''2167'',''2002-05-05'',''CLERK '',14,''M'',''1972-10-18'',39250.00,600.00,2340.00),\n (''200140'',''KIM'' ,''N'',''NATZ'' ,''C01'',''1793'',''2006-12-15'',''ANALYST '',18,''F'',''1976-01-19'',68420.00,600.00,2274.00),\n (''200170'',''KIYOSHI'' ,'' '',''YAMAMOTO'' ,''D11'',''2890'',''2005-09-15'',''DESIGNER'',16,''M'',''1981-01-05'',64680.00,500.00,1974.00),\n (''200220'',''REBA'' ,''K'',''JOHN'' ,''D11'',''0672'',''2005-08-29'',''DESIGNER'',18,''F'',''1978-03-19'',69840.00,600.00,2387.00),\n (''200240'',''ROBERT'' ,''M'',''MONTEVERDE'',''D21'',''3780'',''2004-12-05'',''CLERK '',17,''M'',''1984-03-31'',37760.00,600.00,2301.00),\n (''200280'',''EILEEN'' ,''R'',''SCHWARTZ'' ,''E11'',''8997'',''1997-03-24'',''OPERATOR'',17,''F'',''1966-03-28'',46250.00,500.00,2100.00),\n (''200310'',''MICHELLE'' ,''F'',''SPRINGER'' ,''E11'',''3332'',''1994-09-12'',''OPERATOR'',12,''F'',''1961-04-21'',35900.00,300.00,1272.00),\n (''200330'',''HELENA'' ,'' '',''WONG'' ,''E21'',''2103'',''2006-02-23'',''FIELDREP'',14,''F'',''1971-07-18'',35370.00,500.00,2030.00),\n (''200340'',''ROY'' ,''R'',''ALONZO'' ,''E21'',''5698'',''1997-07-05'',''FIELDREP'',16,''M'',''1956-05-17'',31840.00,500.00,1907.00)'); \n END IF;\n END\"\"\"\n \n %sql -d -q {create_employee} \n \n if (quiet == False): success(\"Sample tables [EMPLOYEE, DEPARTMENT] created.\")",
"_____no_output_____"
]
],
[
[
"### Check option\nThis function will return the original string with the option removed, and a flag or true or false of the value is found.\n\n```\nargs, flag = checkOption(option_string, option, false_value, true_value)\n```\n\nOptions are specified with a -x where x is the character that we are searching for. It may actually be more than one character long like -pb/-pi/etc... The false and true values are optional. By default these are the boolean values of T/F but for some options it could be a character string like ';' versus '@' for delimiters.",
"_____no_output_____"
]
],
[
[
"def checkOption(args_in, option, vFalse=False, vTrue=True):\n \n args_out = args_in.strip()\n found = vFalse\n \n if (args_out != \"\"):\n if (args_out.find(option) >= 0):\n args_out = args_out.replace(option,\" \")\n args_out = args_out.strip()\n found = vTrue\n\n return args_out, found",
"_____no_output_____"
]
],
[
[
"### Plot Data\nThis function will plot the data that is returned from the answer set. The plot value determines how we display the data. 1=Bar, 2=Pie, 3=Line, 4=Interactive.\n\n```\nplotData(flag_plot, hdbi, sql, parms)\n```\n\nThe hdbi is the ibm_db_sa handle that is used by pandas dataframes to run the sql. The parms contains any of the parameters required to run the query.",
"_____no_output_____"
]
],
[
[
"def plotData(hdbi, sql):\n \n try:\n df = pandas.read_sql(sql,hdbi)\n \n except Exception as err:\n db2_error(False)\n return\n \n \n if df.empty:\n errormsg(\"No results returned\")\n return\n \n col_count = len(df.columns)\n\n if flag([\"-pb\",\"-bar\"]): # Plot 1 = bar chart\n \n if (col_count in (1,2,3)):\n \n if (col_count == 1):\n \n df.index = df.index + 1\n _ = df.plot(kind='bar');\n _ = plt.plot();\n \n elif (col_count == 2):\n \n xlabel = df.columns.values[0]\n ylabel = df.columns.values[1]\n df.plot(kind='bar',x=xlabel,y=ylabel);\n _ = plt.plot();\n \n else:\n \n values = df.columns.values[2]\n columns = df.columns.values[0]\n index = df.columns.values[1]\n pivoted = pandas.pivot_table(df, values=values, columns=columns, index=index) \n _ = pivoted.plot.bar(); \n \n else:\n errormsg(\"Can't determine what columns to plot\")\n return\n \n elif flag([\"-pp\",\"-pie\"]): # Plot 2 = pie chart\n \n if (col_count in (1,2)): \n \n if (col_count == 1):\n df.index = df.index + 1\n yname = df.columns.values[0]\n _ = df.plot(kind='pie',y=yname); \n else: \n xlabel = df.columns.values[0]\n xname = df[xlabel].tolist()\n yname = df.columns.values[1]\n _ = df.plot(kind='pie',y=yname,labels=xname);\n \n plt.show();\n \n else:\n errormsg(\"Can't determine what columns to plot\")\n return\n \n elif flag([\"-pl\",\"-line\"]): # Plot 3 = line chart\n \n if (col_count in (1,2,3)): \n \n if (col_count == 1):\n df.index = df.index + 1 \n _ = df.plot(kind='line'); \n elif (col_count == 2): \n xlabel = df.columns.values[0]\n ylabel = df.columns.values[1]\n _ = df.plot(kind='line',x=xlabel,y=ylabel) ; \n else: \n values = df.columns.values[2]\n columns = df.columns.values[0]\n index = df.columns.values[1]\n pivoted = pandas.pivot_table(df, values=values, columns=columns, index=index)\n _ = pivoted.plot();\n \n plt.show();\n \n else:\n errormsg(\"Can't determine what columns to plot\")\n return\n else:\n return",
"_____no_output_____"
]
],
[
[
"### Find a Procedure\nThis routine will check to see if a procedure exists with the SCHEMA/NAME (or just NAME if no schema is supplied) and returns the number of answer sets returned. Possible values are 0, 1 (or greater) or None. If None is returned then we can't find the procedure anywhere.",
"_____no_output_____"
]
],
[
[
"def findProc(procname):\n \n global _hdbc, _hdbi, _connected, _runtime\n \n # Split the procedure name into schema.procname if appropriate\n upper_procname = procname.upper()\n schema, proc = split_string(upper_procname,\".\") # Expect schema.procname\n if (proc == None):\n proc = schema\n\n # Call ibm_db.procedures to see if the procedure does exist\n schema = \"%\"\n\n try:\n stmt = ibm_db.procedures(_hdbc, None, schema, proc) \n if (stmt == False): # Error executing the code\n errormsg(\"Procedure \" + procname + \" not found in the system catalog.\")\n return None\n\n result = ibm_db.fetch_tuple(stmt)\n resultsets = result[5]\n if (resultsets >= 1): resultsets = 1\n return resultsets\n \n except Exception as err:\n errormsg(\"Procedure \" + procname + \" not found in the system catalog.\")\n return None",
"_____no_output_____"
]
],
[
[
"### Parse Call Arguments\nThis code will parse a SQL call #name(parm1,...) and return the name and the parameters in the call.",
"_____no_output_____"
]
],
[
[
"def parseCallArgs(macro):\n \n quoteChar = \"\"\n inQuote = False\n inParm = False\n ignore = False\n name = \"\"\n parms = []\n parm = ''\n \n sqlin = macro.replace(\"\\n\",\"\")\n sqlin.lstrip()\n \n for ch in sqlin:\n if (inParm == False):\n # We hit a blank in the name, so ignore everything after the procedure name until a ( is found\n if (ch == \" \"): \n ignore == True\n elif (ch == \"(\"): # Now we have parameters to send to the stored procedure\n inParm = True\n else:\n if (ignore == False): name = name + ch # The name of the procedure (and no blanks)\n else:\n if (inQuote == True):\n if (ch == quoteChar):\n inQuote = False \n else:\n parm = parm + ch\n elif (ch in (\"\\\"\",\"\\'\",\"[\")): # Do we have a quote\n if (ch == \"[\"):\n quoteChar = \"]\"\n else:\n quoteChar = ch\n inQuote = True\n elif (ch == \")\"):\n if (parm != \"\"):\n parms.append(parm)\n parm = \"\"\n break\n elif (ch == \",\"):\n if (parm != \"\"):\n parms.append(parm) \n else:\n parms.append(\"null\")\n \n parm = \"\"\n\n else:\n parm = parm + ch\n \n if (inParm == True):\n if (parm != \"\"):\n parms.append(parm_value) \n \n return(name,parms)",
"_____no_output_____"
]
],
[
[
"### Get Columns\nGiven a statement handle, determine what the column names are or the data types.",
"_____no_output_____"
]
],
[
[
"def getColumns(stmt):\n \n columns = []\n types = []\n colcount = 0\n try:\n colname = ibm_db.field_name(stmt,colcount)\n coltype = ibm_db.field_type(stmt,colcount)\n while (colname != False):\n columns.append(colname)\n types.append(coltype)\n colcount += 1\n colname = ibm_db.field_name(stmt,colcount)\n coltype = ibm_db.field_type(stmt,colcount) \n return columns,types \n \n except Exception as err:\n db2_error(False)\n return None",
"_____no_output_____"
]
],
[
[
"### Call a Procedure\nThe CALL statement is used for execution of a stored procedure. The format of the CALL statement is:\n```\nCALL PROC_NAME(x,y,z,...)\n```\nProcedures allow for the return of answer sets (cursors) as well as changing the contents of the parameters being passed to the procedure. In this implementation, the CALL function is limited to returning one answer set (or nothing). If you want to use more complex stored procedures then you will have to use the native python libraries.",
"_____no_output_____"
]
],
[
[
"def parseCall(hdbc, inSQL, local_ns):\n \n global _hdbc, _hdbi, _connected, _runtime, _environment\n \n # Check to see if we are connected first\n if (_connected == False): # Check if you are connected \n db2_doConnect()\n if _connected == False: return None\n \n remainder = inSQL.strip()\n procName, procArgs = parseCallArgs(remainder[5:]) # Assume that CALL ... is the format\n \n resultsets = findProc(procName)\n if (resultsets == None): return None\n \n argvalues = []\n \n if (len(procArgs) > 0): # We have arguments to consider\n for arg in procArgs:\n varname = arg\n if (len(varname) > 0):\n if (varname[0] == \":\"):\n checkvar = varname[1:]\n varvalue = getContents(checkvar,True,local_ns)\n if (varvalue == None):\n errormsg(\"Variable \" + checkvar + \" is not defined.\")\n return None\n argvalues.append(varvalue)\n else:\n if (varname.upper() == \"NULL\"):\n argvalues.append(None)\n else:\n argvalues.append(varname)\n else:\n argvalues.append(None)\n\n \n try:\n\n if (len(procArgs) > 0):\n argtuple = tuple(argvalues)\n result = ibm_db.callproc(_hdbc,procName,argtuple)\n stmt = result[0]\n else:\n result = ibm_db.callproc(_hdbc,procName)\n stmt = result\n \n if (resultsets != 0 and stmt != None): \n\n columns, types = getColumns(stmt)\n if (columns == None): return None\n \n rows = []\n rowlist = ibm_db.fetch_tuple(stmt)\n while ( rowlist ) :\n row = []\n colcount = 0\n for col in rowlist:\n try:\n if (types[colcount] in [\"int\",\"bigint\"]):\n row.append(int(col))\n elif (types[colcount] in [\"decimal\",\"real\"]):\n row.append(float(col))\n elif (types[colcount] in [\"date\",\"time\",\"timestamp\"]):\n row.append(str(col))\n else:\n row.append(col)\n except:\n row.append(col)\n colcount += 1\n rows.append(row)\n rowlist = ibm_db.fetch_tuple(stmt)\n \n if flag([\"-r\",\"-array\"]):\n rows.insert(0,columns)\n if len(procArgs) > 0:\n allresults = []\n allresults.append(rows)\n for x in result[1:]:\n allresults.append(x)\n return allresults # rows,returned_results\n else:\n return rows\n else:\n df = pandas.DataFrame.from_records(rows,columns=columns)\n if flag(\"-grid\") or _settings['display'] == 'GRID':\n if (_environment['qgrid'] == False):\n with pandas.option_context('display.max_rows', None, 'display.max_columns', None): \n pdisplay(df)\n else:\n try:\n pdisplay(qgrid.show_grid(df))\n except:\n errormsg(\"Grid cannot be used to display data with duplicate column names. Use option -a or %sql OPTION DISPLAY PANDAS instead.\")\n \n return \n else:\n if flag([\"-a\",\"-all\"]) or _settings[\"maxrows\"] == -1 : # All of the rows\n with pandas.option_context('display.max_rows', None, 'display.max_columns', None): \n pdisplay(df)\n else:\n return df\n \n else:\n if len(procArgs) > 0:\n allresults = []\n for x in result[1:]:\n allresults.append(x)\n return allresults # rows,returned_results\n else:\n return None\n \n except Exception as err:\n db2_error(False)\n return None",
"_____no_output_____"
]
],
[
[
"### Parse Prepare/Execute\nThe PREPARE statement is used for repeated execution of a SQL statement. The PREPARE statement has the format:\n```\nstmt = PREPARE SELECT EMPNO FROM EMPLOYEE WHERE WORKDEPT=? AND SALARY<?\n```\nThe SQL statement that you want executed is placed after the PREPARE statement with the location of variables marked with ? (parameter) markers. The variable stmt contains the prepared statement that need to be passed to the EXECUTE statement. The EXECUTE statement has the format:\n```\nEXECUTE :x USING z, y, s \n```\nThe first variable (:x) is the name of the variable that you assigned the results of the prepare statement. The values after the USING clause are substituted into the prepare statement where the ? markers are found. \n\nIf the values in USING clause are variable names (z, y, s), a **link** is created to these variables as part of the execute statement. If you use the variable subsitution form of variable name (:z, :y, :s), the **contents** of the variable are placed into the USING clause. Normally this would not make much of a difference except when you are dealing with binary strings or JSON strings where the quote characters may cause some problems when subsituted into the statement. ",
"_____no_output_____"
]
],
[
[
"def parsePExec(hdbc, inSQL):\n \n import ibm_db \n global _stmt, _stmtID, _stmtSQL, sqlcode\n \n cParms = inSQL.split()\n parmCount = len(cParms)\n if (parmCount == 0): return(None) # Nothing to do but this shouldn't happen\n \n keyword = cParms[0].upper() # Upper case the keyword\n \n if (keyword == \"PREPARE\"): # Prepare the following SQL\n uSQL = inSQL.upper()\n found = uSQL.find(\"PREPARE\")\n sql = inSQL[found+7:].strip()\n\n try:\n pattern = \"\\?\\*[0-9]+\"\n findparm = re.search(pattern,sql)\n while findparm != None:\n found = findparm.group(0)\n count = int(found[2:])\n markers = ('?,' * count)[:-1]\n sql = sql.replace(found,markers)\n findparm = re.search(pattern,sql)\n \n stmt = ibm_db.prepare(hdbc,sql) # Check error code here\n if (stmt == False): \n db2_error(False)\n return(False)\n \n stmttext = str(stmt).strip()\n stmtID = stmttext[33:48].strip()\n \n if (stmtID in _stmtID) == False:\n _stmt.append(stmt) # Prepare and return STMT to caller\n _stmtID.append(stmtID)\n else:\n stmtIX = _stmtID.index(stmtID)\n _stmt[stmtiX] = stmt\n \n return(stmtID)\n \n except Exception as err:\n print(err)\n db2_error(False)\n return(False)\n\n if (keyword == \"EXECUTE\"): # Execute the prepare statement\n if (parmCount < 2): return(False) # No stmtID available\n \n stmtID = cParms[1].strip()\n if (stmtID in _stmtID) == False:\n errormsg(\"Prepared statement not found or invalid.\")\n return(False)\n\n stmtIX = _stmtID.index(stmtID)\n stmt = _stmt[stmtIX]\n\n try: \n\n if (parmCount == 2): # Only the statement handle available\n result = ibm_db.execute(stmt) # Run it\n elif (parmCount == 3): # Not quite enough arguments\n errormsg(\"Missing or invalid USING clause on EXECUTE statement.\")\n sqlcode = -99999\n return(False)\n else:\n using = cParms[2].upper()\n if (using != \"USING\"): # Bad syntax again\n errormsg(\"Missing USING clause on EXECUTE statement.\")\n sqlcode = -99999\n return(False)\n \n uSQL = inSQL.upper()\n found = uSQL.find(\"USING\")\n parmString = inSQL[found+5:].strip()\n parmset = splitargs(parmString)\n \n if (len(parmset) == 0):\n errormsg(\"Missing parameters after the USING clause.\")\n sqlcode = -99999\n return(False)\n \n parms = []\n\n parm_count = 0\n \n CONSTANT = 0\n VARIABLE = 1\n const = [0]\n const_cnt = 0\n \n for v in parmset:\n \n parm_count = parm_count + 1\n \n if (v[1] == True or v[2] == True): # v[1] true if string, v[2] true if num\n \n parm_type = CONSTANT \n const_cnt = const_cnt + 1\n if (v[2] == True):\n if (isinstance(v[0],int) == True): # Integer value \n sql_type = ibm_db.SQL_INTEGER\n elif (isinstance(v[0],float) == True): # Float value\n sql_type = ibm_db.SQL_DOUBLE\n else:\n sql_type = ibm_db.SQL_INTEGER\n else:\n sql_type = ibm_db.SQL_CHAR\n \n const.append(v[0])\n\n \n else:\n \n parm_type = VARIABLE\n \n # See if the variable has a type associated with it varname@type\n \n varset = v[0].split(\"@\")\n parm_name = varset[0]\n \n parm_datatype = \"char\"\n\n # Does the variable exist?\n if (parm_name not in globals()):\n errormsg(\"SQL Execute parameter \" + parm_name + \" not found\")\n sqlcode = -99999\n return(false) \n \n if (len(varset) > 1): # Type provided\n parm_datatype = varset[1]\n\n if (parm_datatype == \"dec\" or parm_datatype == \"decimal\"):\n sql_type = ibm_db.SQL_DOUBLE\n elif (parm_datatype == \"bin\" or parm_datatype == \"binary\"):\n sql_type = ibm_db.SQL_BINARY\n elif (parm_datatype == \"int\" or parm_datatype == \"integer\"):\n sql_type = ibm_db.SQL_INTEGER\n else:\n sql_type = ibm_db.SQL_CHAR\n \n try:\n if (parm_type == VARIABLE):\n result = ibm_db.bind_param(stmt, parm_count, globals()[parm_name], ibm_db.SQL_PARAM_INPUT, sql_type)\n else:\n result = ibm_db.bind_param(stmt, parm_count, const[const_cnt], ibm_db.SQL_PARAM_INPUT, sql_type)\n \n except:\n result = False\n \n if (result == False):\n errormsg(\"SQL Bind on variable \" + parm_name + \" failed.\")\n sqlcode = -99999\n return(false) \n \n result = ibm_db.execute(stmt) # ,tuple(parms))\n \n if (result == False): \n errormsg(\"SQL Execute failed.\") \n return(False)\n \n if (ibm_db.num_fields(stmt) == 0): return(True) # Command successfully completed\n \n return(fetchResults(stmt))\n \n except Exception as err:\n db2_error(False)\n return(False)\n \n return(False)\n \n return(False) ",
"_____no_output_____"
]
],
[
[
"### Fetch Result Set\nThis code will take the stmt handle and then produce a result set of rows as either an array (`-r`,`-array`) or as an array of json records (`-json`).",
"_____no_output_____"
]
],
[
[
"def fetchResults(stmt):\n \n global sqlcode\n \n rows = []\n columns, types = getColumns(stmt)\n \n # By default we assume that the data will be an array\n is_array = True\n \n # Check what type of data we want returned - array or json\n if (flag([\"-r\",\"-array\"]) == False):\n # See if we want it in JSON format, if not it remains as an array\n if (flag(\"-json\") == True):\n is_array = False\n \n # Set column names to lowercase for JSON records\n if (is_array == False):\n columns = [col.lower() for col in columns] # Convert to lowercase for each of access\n \n # First row of an array has the column names in it\n if (is_array == True):\n rows.append(columns)\n \n result = ibm_db.fetch_tuple(stmt)\n rowcount = 0\n while (result):\n \n rowcount += 1\n \n if (is_array == True):\n row = []\n else:\n row = {}\n \n colcount = 0\n for col in result:\n try:\n if (types[colcount] in [\"int\",\"bigint\"]):\n if (is_array == True):\n row.append(int(col))\n else:\n row[columns[colcount]] = int(col)\n elif (types[colcount] in [\"decimal\",\"real\"]):\n if (is_array == True):\n row.append(float(col))\n else:\n row[columns[colcount]] = float(col)\n elif (types[colcount] in [\"date\",\"time\",\"timestamp\"]):\n if (is_array == True):\n row.append(str(col))\n else:\n row[columns[colcount]] = str(col)\n else:\n if (is_array == True):\n row.append(col)\n else:\n row[columns[colcount]] = col\n \n except:\n if (is_array == True):\n row.append(col)\n else:\n row[columns[colcount]] = col\n \n colcount += 1\n \n rows.append(row)\n result = ibm_db.fetch_tuple(stmt)\n \n if (rowcount == 0): \n sqlcode = 100 \n else:\n sqlcode = 0\n \n return rows\n ",
"_____no_output_____"
]
],
[
[
"### Parse Commit\nThere are three possible COMMIT verbs that can bs used:\n- COMMIT [WORK] - Commit the work in progress - The WORK keyword is not checked for\n- ROLLBACK - Roll back the unit of work\n- AUTOCOMMIT ON/OFF - Are statements committed on or off?\n\nThe statement is passed to this routine and then checked.",
"_____no_output_____"
]
],
[
[
"def parseCommit(sql):\n \n global _hdbc, _hdbi, _connected, _runtime, _stmt, _stmtID, _stmtSQL\n\n if (_connected == False): return # Nothing to do if we are not connected\n \n cParms = sql.split()\n if (len(cParms) == 0): return # Nothing to do but this shouldn't happen\n \n keyword = cParms[0].upper() # Upper case the keyword\n \n if (keyword == \"COMMIT\"): # Commit the work that was done\n try:\n result = ibm_db.commit (_hdbc) # Commit the connection\n if (len(cParms) > 1):\n keyword = cParms[1].upper()\n if (keyword == \"HOLD\"):\n return\n \n del _stmt[:]\n del _stmtID[:]\n\n except Exception as err:\n db2_error(False)\n \n return\n \n if (keyword == \"ROLLBACK\"): # Rollback the work that was done\n try:\n result = ibm_db.rollback(_hdbc) # Rollback the connection\n del _stmt[:]\n del _stmtID[:] \n\n except Exception as err:\n db2_error(False)\n \n return\n \n if (keyword == \"AUTOCOMMIT\"): # Is autocommit on or off\n if (len(cParms) > 1): \n op = cParms[1].upper() # Need ON or OFF value\n else:\n return\n \n try:\n if (op == \"OFF\"):\n ibm_db.autocommit(_hdbc, False)\n elif (op == \"ON\"):\n ibm_db.autocommit (_hdbc, True)\n return \n \n except Exception as err:\n db2_error(False)\n return \n \n return",
"_____no_output_____"
]
],
[
[
"### Set Flags\nThis code will take the input SQL block and update the global flag list. The global flag list is just a list of options that are set at the beginning of a code block. The absence of a flag means it is false. If it exists it is true.",
"_____no_output_____"
]
],
[
[
"def setFlags(inSQL):\n \n global _flags\n \n _flags = [] # Delete all of the current flag settings\n \n pos = 0\n end = len(inSQL)-1\n inFlag = False\n ignore = False\n outSQL = \"\"\n flag = \"\"\n \n while (pos <= end):\n ch = inSQL[pos]\n if (ignore == True): \n outSQL = outSQL + ch\n else:\n if (inFlag == True):\n if (ch != \" \"):\n flag = flag + ch\n else:\n _flags.append(flag)\n inFlag = False\n else:\n if (ch == \"-\"):\n flag = \"-\"\n inFlag = True\n elif (ch == ' '):\n outSQL = outSQL + ch\n else:\n outSQL = outSQL + ch\n ignore = True\n pos += 1\n \n if (inFlag == True):\n _flags.append(flag)\n \n return outSQL",
"_____no_output_____"
]
],
[
[
"### Check to see if flag Exists\nThis function determines whether or not a flag exists in the global flag array. Absence of a value means it is false. The parameter can be a single value, or an array of values.",
"_____no_output_____"
]
],
[
[
"def flag(inflag):\n \n global _flags\n\n if isinstance(inflag,list):\n for x in inflag:\n if (x in _flags):\n return True\n return False\n else:\n if (inflag in _flags):\n return True\n else:\n return False",
"_____no_output_____"
]
],
[
[
"### Generate a list of SQL lines based on a delimiter\nNote that this function will make sure that quotes are properly maintained so that delimiters inside of quoted strings do not cause errors.",
"_____no_output_____"
]
],
[
[
"def splitSQL(inputString, delimiter):\n \n pos = 0\n arg = \"\"\n results = []\n quoteCH = \"\"\n \n inSQL = inputString.strip()\n if (len(inSQL) == 0): return(results) # Not much to do here - no args found\n \n while pos < len(inSQL):\n ch = inSQL[pos]\n pos += 1\n if (ch in ('\"',\"'\")): # Is this a quote characters?\n arg = arg + ch # Keep appending the characters to the current arg\n if (ch == quoteCH): # Is this quote character we are in\n quoteCH = \"\"\n elif (quoteCH == \"\"): # Create the quote\n quoteCH = ch\n else:\n None\n elif (quoteCH != \"\"): # Still in a quote\n arg = arg + ch\n elif (ch == delimiter): # Is there a delimiter?\n results.append(arg)\n arg = \"\"\n else:\n arg = arg + ch\n \n if (arg != \"\"):\n results.append(arg)\n \n return(results)",
"_____no_output_____"
]
],
[
[
"### Main %sql Magic Definition\nThe main %sql Magic logic is found in this section of code. This code will register the Magic command and allow Jupyter notebooks to interact with Db2 by using this extension.\n",
"_____no_output_____"
]
],
[
[
"@magics_class\nclass DB2(Magics):\n \n @needs_local_scope \n @line_cell_magic\n def sql(self, line, cell=None, local_ns=None):\n \n # Before we event get started, check to see if you have connected yet. Without a connection we \n # can't do anything. You may have a connection request in the code, so if that is true, we run those,\n # otherwise we connect immediately\n \n # If your statement is not a connect, and you haven't connected, we need to do it for you\n \n global _settings, _environment\n global _hdbc, _hdbi, _connected, _runtime, sqlstate, sqlerror, sqlcode, sqlelapsed\n \n # If you use %sql (line) we just run the SQL. If you use %%SQL the entire cell is run.\n \n flag_cell = False\n flag_output = False\n sqlstate = \"0\"\n sqlerror = \"\"\n sqlcode = 0\n sqlelapsed = 0\n \n start_time = time.time()\n end_time = time.time()\n \n # Macros gets expanded before anything is done\n \n SQL1 = setFlags(line.strip()) \n SQL1 = checkMacro(SQL1) # Update the SQL if any macros are in there\n SQL2 = cell \n \n if flag(\"-sampledata\"): # Check if you only want sample data loaded\n if (_connected == False):\n if (db2_doConnect() == False):\n errormsg('A CONNECT statement must be issued before issuing SQL statements.')\n return \n \n db2_create_sample(flag([\"-q\",\"-quiet\"]))\n return \n \n if SQL1 == \"?\" or flag([\"-h\",\"-help\"]): # Are you asking for help\n sqlhelp()\n return\n \n if len(SQL1) == 0 and SQL2 == None: return # Nothing to do here\n \n # Check for help\n\n if SQL1.upper() == \"? CONNECT\": # Are you asking for help on CONNECT\n connected_help()\n return \n \n sqlType,remainder = sqlParser(SQL1,local_ns) # What type of command do you have?\n \n if (sqlType == \"CONNECT\"): # A connect request \n parseConnect(SQL1,local_ns)\n return \n elif (sqlType == \"USING\"): # You want to use a dataframe to create a table?\n createDF(_hdbc,SQL1,local_ns)\n return\n elif (sqlType == \"DEFINE\"): # Create a macro from the body\n result = setMacro(SQL2,remainder)\n return\n elif (sqlType == \"OPTION\"):\n setOptions(SQL1)\n return \n elif (sqlType == 'COMMIT' or sqlType == 'ROLLBACK' or sqlType == 'AUTOCOMMIT'):\n parseCommit(remainder)\n return\n elif (sqlType == \"PREPARE\"):\n pstmt = parsePExec(_hdbc, remainder)\n return(pstmt)\n elif (sqlType == \"EXECUTE\"):\n result = parsePExec(_hdbc, remainder)\n return(result) \n elif (sqlType == \"CALL\"):\n result = parseCall(_hdbc, remainder, local_ns)\n return(result)\n else:\n pass \n \n sql = SQL1\n \n if (sql == \"\"): sql = SQL2\n \n if (sql == \"\"): return # Nothing to do here\n \n if (_connected == False):\n if (db2_doConnect() == False):\n errormsg('A CONNECT statement must be issued before issuing SQL statements.')\n return \n \n if _settings[\"maxrows\"] == -1: # Set the return result size\n pandas.reset_option('display.max_rows')\n else:\n pandas.options.display.max_rows = _settings[\"maxrows\"]\n \n runSQL = re.sub('.*?--.*$',\"\",sql,flags=re.M)\n remainder = runSQL.replace(\"\\n\",\" \") \n if flag([\"-d\",\"-delim\"]):\n sqlLines = splitSQL(remainder,\"@\")\n else:\n sqlLines = splitSQL(remainder,\";\")\n flag_cell = True\n \n # For each line figure out if you run it as a command (db2) or select (sql)\n\n for sqlin in sqlLines: # Run each command\n \n sqlin = checkMacro(sqlin) # Update based on any macros\n\n sqlType, sql = sqlParser(sqlin,local_ns) # Parse the SQL \n if (sql.strip() == \"\"): continue\n if flag([\"-e\",\"-echo\"]): debug(sql,False)\n \n if flag(\"-t\"):\n cnt = sqlTimer(_hdbc, _settings[\"runtime\"], sql) # Given the sql and parameters, clock the time\n if (cnt >= 0): print(\"Total iterations in %s second(s): %s\" % (_settings[\"runtime\"],cnt)) \n return(cnt)\n \n elif flag([\"-pb\",\"-bar\",\"-pp\",\"-pie\",\"-pl\",\"-line\"]): # We are plotting some results \n \n plotData(_hdbi, sql) # Plot the data and return\n return\n \n else:\n \n try: # See if we have an answer set\n stmt = ibm_db.prepare(_hdbc,sql)\n if (ibm_db.num_fields(stmt) == 0): # No, so we just execute the code\n result = ibm_db.execute(stmt) # Run it \n if (result == False): # Error executing the code\n db2_error(flag([\"-q\",\"-quiet\"])) \n continue\n \n rowcount = ibm_db.num_rows(stmt) \n \n if (rowcount == 0 and flag([\"-q\",\"-quiet\"]) == False):\n errormsg(\"No rows found.\") \n \n continue # Continue running\n \n elif flag([\"-r\",\"-array\",\"-j\",\"-json\"]): # raw, json, format json\n row_count = 0\n resultSet = []\n try:\n result = ibm_db.execute(stmt) # Run it\n if (result == False): # Error executing the code\n db2_error(flag([\"-q\",\"-quiet\"])) \n return\n \n if flag(\"-j\"): # JSON single output\n row_count = 0\n json_results = []\n while( ibm_db.fetch_row(stmt) ):\n row_count = row_count + 1\n jsonVal = ibm_db.result(stmt,0)\n jsonDict = json.loads(jsonVal)\n json_results.append(jsonDict)\n flag_output = True \n \n if (row_count == 0): sqlcode = 100\n return(json_results)\n \n else:\n return(fetchResults(stmt))\n \n except Exception as err:\n db2_error(flag([\"-q\",\"-quiet\"]))\n return\n \n else:\n \n try:\n df = pandas.read_sql(sql,_hdbi)\n \n except Exception as err:\n db2_error(False)\n return\n \n if (len(df) == 0):\n sqlcode = 100\n if (flag([\"-q\",\"-quiet\"]) == False): \n errormsg(\"No rows found\")\n continue \n \n flag_output = True\n if flag(\"-grid\") or _settings['display'] == 'GRID': # Check to see if we can display the results\n if (_environment['qgrid'] == False):\n with pandas.option_context('display.max_rows', None, 'display.max_columns', None): \n print(df.to_string())\n else:\n try:\n pdisplay(qgrid.show_grid(df))\n except:\n errormsg(\"Grid cannot be used to display data with duplicate column names. Use option -a or %sql OPTION DISPLAY PANDAS instead.\")\n return \n else:\n if flag([\"-a\",\"-all\"]) or _settings[\"maxrows\"] == -1 : # All of the rows\n pandas.options.display.max_rows = None\n pandas.options.display.max_columns = None\n return df # print(df.to_string())\n else:\n pandas.options.display.max_rows = _settings[\"maxrows\"]\n pandas.options.display.max_columns = None\n return df # pdisplay(df) # print(df.to_string())\n \n except:\n db2_error(flag([\"-q\",\"-quiet\"]))\n continue # return\n \n end_time = time.time()\n sqlelapsed = end_time - start_time\n if (flag_output == False and flag([\"-q\",\"-quiet\"]) == False): print(\"Command completed.\")\n \n# Register the Magic extension in Jupyter \nip = get_ipython() \nip.register_magics(DB2)\nload_settings()\n \nsuccess(\"Db2 Extensions Loaded.\")",
"_____no_output_____"
]
],
[
[
"## Pre-defined Macros\nThese macros are used to simulate the LIST TABLES and DESCRIBE commands that are available from within the Db2 command line.",
"_____no_output_____"
]
],
[
[
"%%sql define LIST\n#\n# The LIST macro is used to list all of the tables in the current schema or for all schemas\n#\nvar syntax Syntax: LIST TABLES [FOR ALL | FOR SCHEMA name]\n# \n# Only LIST TABLES is supported by this macro\n#\nif {^1} <> 'TABLES'\n exit {syntax}\nendif\n\n#\n# This SQL is a temporary table that contains the description of the different table types\n#\nWITH TYPES(TYPE,DESCRIPTION) AS (\n VALUES\n ('A','Alias'),\n ('G','Created temporary table'),\n ('H','Hierarchy table'),\n ('L','Detached table'),\n ('N','Nickname'),\n ('S','Materialized query table'),\n ('T','Table'),\n ('U','Typed table'),\n ('V','View'),\n ('W','Typed view')\n)\nSELECT TABNAME, TABSCHEMA, T.DESCRIPTION FROM SYSCAT.TABLES S, TYPES T\n WHERE T.TYPE = S.TYPE \n\n#\n# Case 1: No arguments - LIST TABLES\n#\nif {argc} == 1\n AND OWNER = CURRENT USER\n ORDER BY TABNAME, TABSCHEMA\n return\nendif \n\n#\n# Case 2: Need 3 arguments - LIST TABLES FOR ALL\n#\nif {argc} == 3\n if {^2}&{^3} == 'FOR&ALL'\n ORDER BY TABNAME, TABSCHEMA\n return\n endif\n exit {syntax}\nendif\n\n#\n# Case 3: Need FOR SCHEMA something here\n#\nif {argc} == 4\n if {^2}&{^3} == 'FOR&SCHEMA'\n AND TABSCHEMA = '{^4}'\n ORDER BY TABNAME, TABSCHEMA\n return\n else\n exit {syntax}\n endif\nendif\n\n#\n# Nothing matched - Error\n#\nexit {syntax}",
"_____no_output_____"
],
[
"%%sql define describe\n#\n# The DESCRIBE command can either use the syntax DESCRIBE TABLE <name> or DESCRIBE TABLE SELECT ...\n#\nvar syntax Syntax: DESCRIBE [TABLE name | SELECT statement] \n#\n# Check to see what count of variables is... Must be at least 2 items DESCRIBE TABLE x or SELECT x\n#\nif {argc} < 2\n exit {syntax}\nendif\n\nCALL ADMIN_CMD('{*0}');",
"_____no_output_____"
]
],
[
[
"Set the table formatting to left align a table in a cell. By default, tables are centered in a cell. Remove this cell if you don't want to change Jupyter notebook formatting for tables. In addition, we skip this code if you are running in a shell environment rather than a Jupyter notebook",
"_____no_output_____"
]
],
[
[
"#%%html\n#<style>\n# table {margin-left: 0 !important; text-align: left;}\n#</style>",
"_____no_output_____"
]
],
[
[
"#### Credits: IBM 2021, George Baklarz [[email protected]]",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d049dd792a0a0a825d09daf449fc958358b543c8 | 11,284 | ipynb | Jupyter Notebook | sagemaker-debugger/tensorflow_profiling/tf-resnet-profiling-multi-gpu-multi-node.ipynb | Amirosimani/amazon-sagemaker-examples | bc35e7a9da9e2258e77f98098254c2a8e308041a | [
"Apache-2.0"
] | 2,610 | 2020-10-01T14:14:53.000Z | 2022-03-31T18:02:31.000Z | sagemaker-debugger/tensorflow_profiling/tf-resnet-profiling-multi-gpu-multi-node.ipynb | Amirosimani/amazon-sagemaker-examples | bc35e7a9da9e2258e77f98098254c2a8e308041a | [
"Apache-2.0"
] | 1,959 | 2020-09-30T20:22:42.000Z | 2022-03-31T23:58:37.000Z | sagemaker-debugger/tensorflow_profiling/tf-resnet-profiling-multi-gpu-multi-node.ipynb | Amirosimani/amazon-sagemaker-examples | bc35e7a9da9e2258e77f98098254c2a8e308041a | [
"Apache-2.0"
] | 2,052 | 2020-09-30T22:11:46.000Z | 2022-03-31T23:02:51.000Z | 31.431755 | 347 | 0.625753 | [
[
[
"# Profiling TensorFlow Multi GPU Multi Node Training Job with Amazon SageMaker Debugger\n\nThis notebook will walk you through creating a TensorFlow training job with the SageMaker Debugger profiling feature enabled. It will create a multi GPU multi node training using Horovod. \n",
"_____no_output_____"
],
[
"### (Optional) Install SageMaker and SMDebug Python SDKs\nTo use the new Debugger profiling features released in December 2020, ensure that you have the latest versions of SageMaker and SMDebug SDKs installed. Use the following cell to update the libraries and restarts the Jupyter kernel to apply the updates.",
"_____no_output_____"
]
],
[
[
"import sys\nimport IPython\ninstall_needed = False # should only be True once\nif install_needed:\n print(\"installing deps and restarting kernel\")\n !{sys.executable} -m pip install -U sagemaker smdebug\n IPython.Application.instance().kernel.do_shutdown(True)",
"_____no_output_____"
]
],
[
[
"## 1. Create a Training Job with Profiling Enabled<a class=\"anchor\" id=\"option-1\"></a>\n\nYou will use the standard [SageMaker Estimator API for Tensorflow](https://sagemaker.readthedocs.io/en/stable/frameworks/tensorflow/sagemaker.tensorflow.html#tensorflow-estimator) to create training jobs. To enable profiling, create a `ProfilerConfig` object and pass it to the `profiler_config` parameter of the `TensorFlow` estimator.",
"_____no_output_____"
],
[
"### Define parameters for distributed training",
"_____no_output_____"
],
[
"This parameter tells SageMaker how to configure and run horovod. If you want to use more than 4 GPUs per node then change the process_per_host paramter accordingly.",
"_____no_output_____"
]
],
[
[
"distributions = {\n \"mpi\": {\n \"enabled\": True,\n \"processes_per_host\": 4,\n \"custom_mpi_options\": \"-verbose -x HOROVOD_TIMELINE=./hvd_timeline.json -x NCCL_DEBUG=INFO -x OMPI_MCA_btl_vader_single_copy_mechanism=none\",\n }\n}",
"_____no_output_____"
]
],
[
[
"### Configure rules\nWe specify the following rules:\n- loss_not_decreasing: checks if loss is decreasing and triggers if the loss has not decreased by a certain persentage in the last few iterations\n- LowGPUUtilization: checks if GPU is under-utilizated \n- ProfilerReport: runs the entire set of performance rules and create a final output report with further insights and recommendations.",
"_____no_output_____"
]
],
[
[
"from sagemaker.debugger import Rule, ProfilerRule, rule_configs\n\nrules = [\n Rule.sagemaker(rule_configs.loss_not_decreasing()),\n ProfilerRule.sagemaker(rule_configs.LowGPUUtilization()),\n ProfilerRule.sagemaker(rule_configs.ProfilerReport()),\n]",
"_____no_output_____"
]
],
[
[
"### Specify a profiler configuration\nThe following configuration will capture system metrics at 500 milliseconds. The system metrics include utilization per CPU, GPU, memory utilization per CPU, GPU as well I/O and network.\n\nDebugger will capture detailed profiling information from step 5 to step 15. This information includes Horovod metrics, dataloading, preprocessing, operators running on CPU and GPU.",
"_____no_output_____"
]
],
[
[
"from sagemaker.debugger import ProfilerConfig, FrameworkProfile\n\nprofiler_config = ProfilerConfig(\n system_monitor_interval_millis=500,\n framework_profile_params=FrameworkProfile(\n local_path=\"/opt/ml/output/profiler/\", start_step=5, num_steps=10\n ),\n)",
"_____no_output_____"
]
],
[
[
"### Get the image URI\nThe image that we will is dependent on the region that you are running this notebook in.",
"_____no_output_____"
]
],
[
[
"import boto3\n\nsession = boto3.session.Session()\nregion = session.region_name\n\nimage_uri = f\"763104351884.dkr.ecr.{region}.amazonaws.com/tensorflow-training:2.3.1-gpu-py37-cu110-ubuntu18.04\"",
"_____no_output_____"
]
],
[
[
"### Define estimator\n\nTo enable profiling, you need to pass the Debugger profiling configuration (`profiler_config`), a list of Debugger rules (`rules`), and the image URI (`image_uri`) to the estimator. Debugger enables monitoring and profiling while the SageMaker estimator requests a training job.",
"_____no_output_____"
]
],
[
[
"import sagemaker\nfrom sagemaker.tensorflow import TensorFlow\n\nestimator = TensorFlow(\n role=sagemaker.get_execution_role(),\n image_uri=image_uri,\n instance_count=2,\n instance_type=\"ml.p3.8xlarge\",\n entry_point=\"tf-hvd-train.py\",\n source_dir=\"entry_point\",\n profiler_config=profiler_config,\n distribution=distributions,\n rules=rules,\n)",
"_____no_output_____"
]
],
[
[
"### Start training job\n\nThe following `estimator.fit()` with `wait=False` argument initiates the training job in the background. You can proceed to run the dashboard or analysis notebooks.",
"_____no_output_____"
]
],
[
[
"estimator.fit(wait=False)",
"_____no_output_____"
]
],
[
[
"## 2. Analyze Profiling Data\n\nCopy outputs of the following cell (`training_job_name` and `region`) to run the analysis notebooks `profiling_generic_dashboard.ipynb`, `analyze_performance_bottlenecks.ipynb`, and `profiling_interactive_analysis.ipynb`.",
"_____no_output_____"
]
],
[
[
"training_job_name = estimator.latest_training_job.name\nprint(f\"Training jobname: {training_job_name}\")\nprint(f\"Region: {region}\")",
"_____no_output_____"
]
],
[
[
"While the training is still in progress you can visualize the performance data in SageMaker Studio or in the notebook.\nDebugger provides utilities to plot system metrics in form of timeline charts or heatmaps. Checkout out the notebook \n[profiling_interactive_analysis.ipynb](analysis_tools/profiling_interactive_analysis.ipynb) for more details. In the following code cell we plot the total CPU and GPU utilization as timeseries charts. To visualize other metrics such as I/O, memory, network you simply need to extend the list passed to `select_dimension` and `select_events`.",
"_____no_output_____"
],
[
"### Install the SMDebug client library to use Debugger analysis tools",
"_____no_output_____"
]
],
[
[
"import pip\n\n\ndef import_or_install(package):\n try:\n __import__(package)\n except ImportError:\n pip.main([\"install\", package])\n\n\nimport_or_install(\"smdebug\")",
"_____no_output_____"
]
],
[
[
"### Access the profiling data using the SMDebug `TrainingJob` utility class",
"_____no_output_____"
]
],
[
[
"from smdebug.profiler.analysis.notebook_utils.training_job import TrainingJob\n\ntj = TrainingJob(training_job_name, region)\ntj.wait_for_sys_profiling_data_to_be_available()",
"_____no_output_____"
]
],
[
[
"### Plot time line charts\n\nThe following code shows how to use the SMDebug `TrainingJob` object, refresh the object if new event files are available, and plot time line charts of CPU and GPU usage.",
"_____no_output_____"
]
],
[
[
"from smdebug.profiler.analysis.notebook_utils.timeline_charts import TimelineCharts\n\nsystem_metrics_reader = tj.get_systems_metrics_reader()\nsystem_metrics_reader.refresh_event_file_list()\n\nview_timeline_charts = TimelineCharts(\n system_metrics_reader,\n framework_metrics_reader=None,\n select_dimensions=[\"CPU\", \"GPU\"],\n select_events=[\"total\"],\n)",
"_____no_output_____"
]
],
[
[
"## 3. Download Debugger Profiling Report",
"_____no_output_____"
],
[
"The `ProfilerReport()` rule creates an html report `profiler-report.html` with a summary of builtin rules and recommenades of next steps. You can find this report in your S3 bucket. ",
"_____no_output_____"
]
],
[
[
"rule_output_path = estimator.output_path + estimator.latest_training_job.job_name + \"/rule-output\"\nprint(f\"You will find the profiler report in {rule_output_path}\")",
"_____no_output_____"
]
],
[
[
"For more information about how to download and open the Debugger profiling report, see [SageMaker Debugger Profiling Report](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-profiling-report.html) in the SageMaker developer guide.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d049de995d52c42a30f7368dd6208185de9fbe53 | 117,819 | ipynb | Jupyter Notebook | lab2/Lab 2.ipynb | KHYehor/MachineLearning | 726466445241773e50caf45299d71541b0c377d2 | [
"MIT"
] | null | null | null | lab2/Lab 2.ipynb | KHYehor/MachineLearning | 726466445241773e50caf45299d71541b0c377d2 | [
"MIT"
] | null | null | null | lab2/Lab 2.ipynb | KHYehor/MachineLearning | 726466445241773e50caf45299d71541b0c377d2 | [
"MIT"
] | null | null | null | 149.70648 | 53,756 | 0.828338 | [
[
[
"## Лабораторная работа 2 - Линейная и полиномиальная регрессия.",
"_____no_output_____"
],
[
"Одна из множества задач, которой занимается современная физика это поиск материала для изготовления сверхпроводника, работающего при комнатной температуре. Кроме теоретических методов есть и подход со стороны статистики, который подразумевает анализ базы данных материалов для нахождения зависимости критической температуры от других физических характеристик. Именно этим Вы и займетесь.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"В файле **data.csv** содержится весь датасет.",
"_____no_output_____"
]
],
[
[
"data = pd.read_csv('data.csv')\ndata",
"_____no_output_____"
]
],
[
[
"Итого имеем 21 тысячу строк и 169 колонок, из которых первые 167 - признаки, колонка **critical_temp** содержит величину, которую надо предсказать. Колонка **material** - содержит химическую формулу материала, ее можно отбросить.\n\nВыполним предобработку данных и разобъем на тренировочную и тестовую выборки:",
"_____no_output_____"
]
],
[
[
"# X - last two columns cut.\n# Y - pre last column.\nx, y = data.values[:, :-2].astype(np.float32), data.values[:, -2:-1].astype(np.float32)\n\nnp.random.seed(1337)\nis_train = np.random.uniform(size=(x.shape[0],)) < 0.95\n\nx_train, y_train = x[is_train], y[is_train]\nx_test, y_test = x[~is_train], y[~is_train]\n\nprint(f'Train samples: {len(x_train)}')\nprint(f'Test samples: {len(x_test)}')",
"Train samples: 20210\nTest samples: 1053\n"
]
],
[
[
"Реализуйте методы с пометкой `#TODO` класса PolynomialRegression:\n\nМетод `preprocess` должен выполнять следующее преобразование:\n$$\n\\begin{array}{l}\nX=\\begin{bmatrix}\nx_{i,j}\n\\end{bmatrix}_{m\\times n}\\\\\npreprocess( X) =\\begin{bmatrix}\n1 & x_{1,1} & \\dotsc & x_{1,1} & x^{2}_{1,1} & \\dotsc & x^{2}_{1,1} & \\dotsc & x^{p}_{1,1} & \\dotsc & x^{p}_{1,1}\\\\\n1 & x_{2,1} & \\dotsc & x_{2,n} & x^{2}_{2,1} & \\dotsc & x^{2}_{2,n} & \\dotsc & x^{p}_{2,1} & \\dotsc & x^{p}_{2,n}\\\\\n\\vdots & & & & & & & & & & \\\\\n1 & x_{m,1} & \\dotsc & x_{m,n} & x^{2}_{m,1} & \\dotsc & x^{2}_{m,n} & \\dotsc & x^{p}_{m,1} & \\dotsc & x^{p}_{m,n}\n\\end{bmatrix}_{m,N}\n\\end{array}\n$$где p - степень полинома (`self.poly_deg` в коде).\nТаким образом, preprocess добавляет полиномиальные признаки к $X$.\n\nМетод `J` должен вычислять оценочную функцию регрессии:\n$$\nJ( \\theta ) =MSE( Y,\\ h_{\\theta }( X)) +\\alpha _{1}\\sum ^{N}_{i=1}\\sum ^{k}_{j=1} |\\hat{\\theta }_{i,j} |+\\alpha _{2}\\sum ^{N}_{i=1}\\sum ^{k}_{j=1}\\hat{\\theta }^{2}_{i,j}\n$$\nМетод `grad` должен вычислять градиент $\\frac{\\partial J}{\\partial \\theta }$:\n$$\n{\\displaystyle \\frac{\\partial J}{\\partial \\theta }} =-{\\displaystyle \\frac{2}{m}} X^{T} (Y-X\\theta )+\\begin{bmatrix}\n0 & & & \\\\\n & 1 & & \\\\\n & & \\ddots & \\\\\n & & & 1\n\\end{bmatrix} \\times ( \\alpha _{1} sign(\\theta )+2\\alpha _{2} \\theta )\n$$\nМетод `moments` должен возвращать вектор-строки $\\mu,\\sigma$ для среднего и стандартного отклонения каждой колонки. Помните, что колонку с единицами не нужно нормализировать, так что соответствующие среднее и стандартное отколонение для нее укажите равными 0 и 1 соответственно. Можно использовать функции \n[np.mean](https://numpy.org/doc/stable/reference/generated/numpy.mean.html) и [np.std](https://numpy.org/doc/stable/reference/generated/numpy.std.html).\n\nМетод `normalize` должен выполнять нормализацию $X$ на основе статистик $\\mu,\\sigma$, что вернул метод **moments**. Для того чтобы избежать деления на 0, можете к $\\sigma$ прибавить маленькую величину, например $10^{-8}$.\n\nМетод `get_batch` должен возвращать матрицы $X_b, Y_b$ из произвольно выбранных $b$ элементов выборки ($b$ в коде - `self.batch_size`).\n\nМетод `fit` выполняет оптимизацию $J(\\theta)$. Для лучшей сходимости реализуйте алгоритм оптимизации **Momentum**:\n$$\n\\begin{array}{l}\nv_t = \\gamma v_{t-1} + \\alpha\\nabla J(\\theta_{t-1})\\\\\n\\theta_t = \\theta_{t-1} - v_t\n\\end{array}\n$$\nгде $\\gamma$ установите равным $0.9$ (можете поэкспериментировать с другими величиными), $v_1=[0]_{N,k}$.",
"_____no_output_____"
]
],
[
[
"class PolynomialRegression:\n def __init__(\n self,\n alpha1,\n alpha2,\n poly_deg,\n learning_rate,\n batch_size,\n train_steps\n ):\n self.alpha1 = alpha1\n self.alpha2 = alpha2\n self.poly_deg = poly_deg\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n self.train_steps = train_steps\n \n def preprocess(self, x):\n # Create first one column.\n ones = [np.ones(shape=(x.shape[0], 1))]\n # Polynomic scale.\n powers = [x ** i for i in range(1, self.poly_deg + 1)]\n # Unite into one.\n result = np.concatenate(ones + powers, axis=1)\n return result\n \n def normalize(self, x):\n return (x - self.mu) / (self.sigma + 1e-8)\n \n def moments(self, x):\n # Arttimetic average (a + b + ... + z) / n.\n mu = np.mean(x, axis=0)\n # Standart deviation.\n sigma = np.std(x, axis=0)\n mu[0] = 0\n sigma[0] = 1\n return mu, sigma\n \n def J(self, x, y, theta):\n # Theta is not multiply with first (ones) column.\n circumcized_theta = theta[1::]\n # Mean squared error.\n mse = ((y - np.dot(x, theta)) ** 2).mean(axis=None)\n # Module sum of theta (alpha1).\n l1 = self.alpha1 * np.sum(np.abs(circumcized_theta), axis=None)\n # Quadro sum of theta (alpha2).\n l2 = self.alpha2 * np.sum(circumcized_theta ** 2, axis=None)\n return mse + l1 + l2\n \n def grad(self, x, y, theta):\n # Create ones matrix.\n diag = np.eye(x.shape[1], x.shape[1])\n # Init first element as 0.\n diag[0][0] = 0\n # Left assign.\n l1l2 = self.alpha1 * np.sign(theta) + 2 * self.alpha2 * theta\n return (-2/x.shape[0]) * x.T @ (y - (x @ theta)) + (diag @ l1l2)\n \n def get_batch(self, x, y):\n # Return random values.\n i = np.random.default_rng().choice(x.shape[0], self.batch_size, replace=False)\n return x[i], y[i]\n \n def fit(self, x, y):\n ## Trasform source data to polynom regression.\n x = self.preprocess(x)\n (m, N), (_, k) = x.shape, y.shape\n \n # Calculate mu and standart deviation.\n self.mu, self.sigma = self.moments(x)\n # Normalize using average values.\n x = self.normalize(x)\n \n try:\n assert np.allclose(x[:, 1:].mean(axis=0), 0, atol=1e-3)\n assert np.all((np.abs(x[:, 1:].std(axis=0)) < 1e-2) | (np.abs(x[:, 1:].std(axis=0) - 1) < 1e-2))\n except AssertionError as e:\n print('Something wrong with normalization')\n raise e\n # Random x & y.\n x_batch, y_batch = self.get_batch(x, y)\n try:\n assert x_batch.shape[0] == self.batch_size\n assert y_batch.shape[0] == self.batch_size\n except AssertionError as e:\n print('Something wrong with get_batch')\n raise e\n \n theta = np.zeros(shape=(N, k))\n v_1 = np.zeros(shape=(N, k))\n v_t = v_1\n for step in range(self.train_steps):\n x_batch, y_batch = self.get_batch(x, y)\n theta_grad = self.grad(x_batch, y_batch, theta)\n\n v_t = 0.9 * v_t + self.learning_rate * theta_grad\n theta = theta - v_t\n \n self.theta = theta\n \n return self\n\n def predict(self, x):\n x = self.preprocess(x)\n x = self.normalize(x)\n return x @ self.theta\n \n def score(self, x, y):\n y_pred = self.predict(x)\n return np.abs(y - y_pred).mean()",
"_____no_output_____"
],
[
"reg = PolynomialRegression(0, 0, 1, 1e-3, 1024, 1000).fit(x_train, y_train)\nprint(f'Test MAE: {reg.score(x_test, y_test)}')",
"Test MAE: 12.593122309572655\n"
]
],
[
[
"Полученный MAE на тестовой выборке должен быть приблизительно равен $12.5$.",
"_____no_output_____"
],
[
"Выполните поиск оптимальных параметров регуляризации $\\alpha_1,\\alpha_2$ по отдельности (то есть устанавливаете один параметр равным нулю и ищете второй, потом наоборот) и старшей степени полиномиальной регрессии (`poly_deg`). Обратите внимание, что поиск параметра регуляризации следует искать на логарифмической шкале. То есть, например, список кандидатов может быть задан как: `10 ** np.linspace(-5, -1, 5)`, что даст вам величины $10^{-5},10^{-4},10^{-3},10^{-2},10^{-1}$.\nПри надобности, можете отрегулировать оптимальный `batch_size`, `learning_rate`, `training_steps`.\n\nРезультаты представьте в виде графиков по примеру ниже.\n\nДополнительные баллы будут начислены за выполнение поиска оптимальных параметров $\\alpha_1,\\alpha_2$ вместе. В таком случае результаты представьте при помощи [plt.matshow](https://matplotlib.org/3.3.2/api/_as_gen/matplotlib.pyplot.matshow.html).",
"_____no_output_____"
]
],
[
[
"a1 = 10 ** np.linspace(-9, -1, 9)\na2 = 10 ** np.linspace(-9, -1, 9)\nfig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(20, 10))\nfig.suptitle('Poly deg. = 5')\nax1.set_xlabel('Alpha 1')\nax1.set_ylabel('Score')\nax1.set_xscale('log')\nax1.plot([a1i for a1i in a1], [PolynomialRegression(a1i, 0, 1, 1e-3, 1024, 1000).fit(x_train, y_train).score(x_test, y_test) for a1i in a1])\nax2.set_xlabel('Alpha 2')\nax2.set_ylabel('Score')\nax2.set_xscale('log')\nax2.plot([a2i for a2i in a2], [PolynomialRegression(0, a2i, 1, 1e-3, 1024, 1000).fit(x_train, y_train).score(x_test, y_test) for a2i in a2])\nplt.show()",
"_____no_output_____"
]
],
[
[
"Визуализируйте зависимость предсказанной критической температуры от истинной для лучшей модели:",
"_____no_output_____"
]
],
[
[
"reg = PolynomialRegression(1e-5, 1e-5, 5, 1e-3, 1024, 1000).fit(x_train, y_train)\ny_test_pred = reg.predict(x_test)\nprint(f'Test MAE: {reg.score(x_test, y_test)}')\n\nplt.figure(figsize=(10, 10))\nplt.scatter(y_test[:, 0], y_test_pred[:, 0], marker='.', c='r')\nplt.xlabel('True Y')\nplt.ylabel('Predicted Y')\nplt.show()",
"Test MAE: 11.26531342526687\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d049dec99b95715dc24851727baac9ce1aba9709 | 479,295 | ipynb | Jupyter Notebook | source/examples/basics/geocoding/geocoding_level_state_us.ipynb | ASmirnov-HORIS/lets-plot-docs | fb15e81ca0f03d54539c098ce4ee725f03a03d2f | [
"MIT"
] | null | null | null | source/examples/basics/geocoding/geocoding_level_state_us.ipynb | ASmirnov-HORIS/lets-plot-docs | fb15e81ca0f03d54539c098ce4ee725f03a03d2f | [
"MIT"
] | null | null | null | source/examples/basics/geocoding/geocoding_level_state_us.ipynb | ASmirnov-HORIS/lets-plot-docs | fb15e81ca0f03d54539c098ce4ee725f03a03d2f | [
"MIT"
] | 1 | 2021-06-30T10:05:13.000Z | 2021-06-30T10:05:13.000Z | 2,648.038674 | 472,716 | 0.729678 | [
[
[
"from lets_plot.geo_data import *\nfrom lets_plot import *\nLetsPlot.setup_html()",
"The geodata is provided by © OpenStreetMap contributors and is made available here under the Open Database License (ODbL).\n"
],
[
"gdf = geocode_states().countries('US-48').inc_res().get_boundaries()\n\nggplot() + \\\n geom_map(data=gdf, color='white', fill='#636363', \\\n tooltips=layer_tooltips().line('@{found name}')) + \\\n theme(axis_title='blank', axis_text='blank', axis_ticks='blank', axis_line='blank') + \\\n ggsize(800, 600) + ggtitle('US States') + \\\n theme(panel_grid='blank')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code"
]
] |
d049e1b2aca762f4ec88b837e20c02e2b7ed1bf4 | 31,091 | ipynb | Jupyter Notebook | notebooks/consistency_of_importance_over_different_initializations.ipynb | atseng95/fourier_attribution_priors | 53f668e315621e4f64f9e11a403f2ea80529eb29 | [
"MIT"
] | 8 | 2020-11-19T02:58:55.000Z | 2021-09-10T14:11:29.000Z | notebooks/consistency_of_importance_over_different_initializations.ipynb | atseng95/fourier_attribution_priors | 53f668e315621e4f64f9e11a403f2ea80529eb29 | [
"MIT"
] | null | null | null | notebooks/consistency_of_importance_over_different_initializations.ipynb | atseng95/fourier_attribution_priors | 53f668e315621e4f64f9e11a403f2ea80529eb29 | [
"MIT"
] | 1 | 2020-09-26T00:49:25.000Z | 2020-09-26T00:49:25.000Z | 39.20681 | 175 | 0.582837 | [
[
[
"import sys\nimport os\nsys.path.append(os.path.abspath(\"../src/\"))\nimport extract.data_loading as data_loading\nimport extract.compute_predictions as compute_predictions\nimport extract.compute_shap as compute_shap\nimport extract.compute_ism as compute_ism\nimport model.util as model_util\nimport model.profile_models as profile_models\nimport model.binary_models as binary_models\nimport plot.viz_sequence as viz_sequence\nimport torch\nimport numpy as np\nimport scipy.stats\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as font_manager\nimport json\nimport tqdm\ntqdm.tqdm_notebook() # It is necessary to call this before the tqdm.notebook submodule is available",
"_____no_output_____"
],
[
"font_manager.fontManager.ttflist.extend(\n font_manager.createFontList(\n font_manager.findSystemFonts(fontpaths=\"/users/amtseng/modules/fonts\")\n )\n)\nplot_params = {\n \"axes.titlesize\": 22,\n \"axes.labelsize\": 20,\n \"legend.fontsize\": 18,\n \"xtick.labelsize\": 16,\n \"ytick.labelsize\": 16,\n \"font.family\": \"Roboto\",\n \"font.weight\": \"bold\"\n}\nplt.rcParams.update(plot_params)",
"_____no_output_____"
]
],
[
[
"### Define paths for the model and data of interest",
"_____no_output_____"
]
],
[
[
"model_type = \"profile\"",
"_____no_output_____"
],
[
"# Shared paths/constants\nreference_fasta = \"/users/amtseng/genomes/hg38.fasta\"\nchrom_sizes = \"/users/amtseng/genomes/hg38.canon.chrom.sizes\"\ndata_base_path = \"/users/amtseng/att_priors/data/processed/\"\nmodel_base_path = \"/users/amtseng/att_priors/models/trained_models/%s/\" % model_type\nchrom_set = [\"chr1\"]\ninput_length = 1346 if model_type == \"profile\" else 1000\nprofile_length = 1000",
"_____no_output_____"
],
[
"# SPI1\ncondition_name = \"SPI1\"\nfiles_spec_path = os.path.join(data_base_path, \"ENCODE_TFChIP/%s/config/SPI1/SPI1_training_paths.json\" % model_type)\nnum_tasks = 4\nnum_strands = 2\ntask_index = None\ncontrols = \"matched\"\nif model_type == \"profile\":\n model_class = profile_models.ProfilePredictorWithMatchedControls\nelse:\n model_class = binary_models.BinaryPredictor\nnoprior_model_base_path = os.path.join(model_base_path, \"SPI1/\")\nprior_model_base_path = os.path.join(model_base_path, \"SPI1_prior/\")\npeak_retention = \"all\"",
"_____no_output_____"
],
[
"# GATA2\ncondition_name = \"GATA2\"\nfiles_spec_path = os.path.join(data_base_path, \"ENCODE_TFChIP/%s/config/GATA2/GATA2_training_paths.json\" % model_type)\nnum_tasks = 3\nnum_strands = 2\ntask_index = None\ncontrols = \"matched\"\nif model_type == \"profile\":\n model_class = profile_models.ProfilePredictorWithMatchedControls\nelse:\n model_class = binary_models.BinaryPredictor\nnoprior_model_base_path = os.path.join(model_base_path, \"GATA2/\")\nprior_model_base_path = os.path.join(model_base_path, \"GATA2_prior/\")\npeak_retention = \"all\"",
"_____no_output_____"
],
[
"# K562\ncondition_name = \"K562\"\nfiles_spec_path = os.path.join(data_base_path, \"ENCODE_DNase/%s/config/K562/K562_training_paths.json\" % model_type)\nnum_tasks = 1\nnum_strands = 1\ntask_index = None\ncontrols = \"shared\"\nif model_type == \"profile\":\n model_class = profile_models.ProfilePredictorWithSharedControls\nelse:\n model_class = binary_models.BinaryPredictor\nnoprior_model_base_path = os.path.join(model_base_path, \"K562/\")\nprior_model_base_path = os.path.join(model_base_path, \"K562_prior/\")\npeak_retention = \"all\"",
"_____no_output_____"
],
[
"# BPNet\ncondition_name = \"BPNet\"\nreference_fasta = \"/users/amtseng/genomes/mm10.fasta\"\nchrom_sizes = \"/users/amtseng/genomes/mm10.canon.chrom.sizes\"\nfiles_spec_path = os.path.join(data_base_path, \"BPNet_ChIPseq/%s/config/BPNet_training_paths.json\" % model_type)\nnum_tasks = 3\nnum_strands = 2\ntask_index = None\ncontrols = \"shared\"\nif model_type == \"profile\":\n model_class = profile_models.ProfilePredictorWithSharedControls\nelse:\n model_class = binary_models.BinaryPredictor\nnoprior_model_base_path = os.path.join(model_base_path, \"BPNet/\")\nprior_model_base_path = os.path.join(model_base_path, \"BPNet_prior/\")\npeak_retention = \"all\"",
"_____no_output_____"
]
],
[
[
"### Get all runs/epochs with random initializations",
"_____no_output_____"
]
],
[
[
"def import_metrics_json(model_base_path, run_num):\n \"\"\"\n Looks in {model_base_path}/{run_num}/metrics.json and returns the contents as a\n Python dictionary. Returns None if the path does not exist.\n \"\"\"\n path = os.path.join(model_base_path, str(run_num), \"metrics.json\")\n if not os.path.exists(path):\n return None\n with open(path, \"r\") as f:\n return json.load(f)",
"_____no_output_____"
],
[
"def get_model_paths(\n model_base_path, metric_name=\"val_prof_corr_losses\",\n reduce_func=(lambda values: np.mean(values)), compare_func=(lambda x, y: x < y),\n print_found_values=True\n):\n \"\"\"\n Looks in `model_base_path` and for each run, returns the full path to\n the best epoch. By default, the best epoch in a run is determined by\n the lowest validation profile loss.\n \"\"\"\n # Get the metrics, ignoring empty or nonexistent metrics.json files\n metrics = {run_num : import_metrics_json(model_base_path, run_num) for run_num in os.listdir(model_base_path)}\n metrics = {key : val for key, val in metrics.items() if val} # Remove empties\n \n model_paths, metric_vals = [], []\n for run_num in sorted(metrics.keys(), key=lambda x: int(x)):\n try:\n # Find the best epoch within that run\n best_epoch_in_run, best_val_in_run = None, None\n for i, subarr in enumerate(metrics[run_num][metric_name][\"values\"]):\n val = reduce_func(subarr)\n if best_val_in_run is None or compare_func(val, best_val_in_run):\n best_epoch_in_run, best_val_in_run = i + 1, val\n model_path = os.path.join(model_base_path, run_num, \"model_ckpt_epoch_%d.pt\" % best_epoch_in_run)\n model_paths.append(model_path)\n metric_vals.append(best_val_in_run)\n if print_found_values:\n print(\"\\tRun %s, epoch %d: %6.2f\" % (run_num, best_epoch_in_run, best_val_in_run))\n except Exception:\n print(\"Warning: Was not able to compute values for run %s\" % run_num)\n continue\n return model_paths, metric_vals\n ",
"_____no_output_____"
],
[
"metric_name = \"val_prof_corr_losses\" if model_type == \"profile\" else \"val_corr_losses\"",
"_____no_output_____"
],
[
"noprior_model_paths, noprior_metric_vals = get_model_paths(noprior_model_base_path, metric_name=metric_name)\nprior_model_paths, prior_metric_vals = get_model_paths(prior_model_base_path, metric_name=metric_name)",
"_____no_output_____"
],
[
"torch.set_grad_enabled(True)\ndevice = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\ndef restore_model(model_path):\n model = model_util.restore_model(model_class, model_path)\n model.eval()\n model = model.to(device)\n return model",
"_____no_output_____"
]
],
[
[
"### Data preparation\nCreate an input data loader, that maps coordinates or bin indices to data needed for the model",
"_____no_output_____"
]
],
[
[
"if model_type == \"profile\":\n input_func = data_loading.get_profile_input_func(\n files_spec_path, input_length, profile_length, reference_fasta\n )\n pos_examples = data_loading.get_positive_profile_coords(\n files_spec_path, chrom_set=chrom_set\n )\nelse:\n input_func = data_loading.get_binary_input_func(\n files_spec_path, input_length, reference_fasta\n )\n pos_examples = data_loading.get_positive_binary_bins(\n files_spec_path, chrom_set=chrom_set\n )",
"_____no_output_____"
]
],
[
[
"### Compute importances",
"_____no_output_____"
]
],
[
[
"# Pick a sample of 100 random coordinates/bins\nnum_samples = 100\nrng = np.random.RandomState(20200318)\nsample = pos_examples[rng.choice(len(pos_examples), size=num_samples, replace=False)]",
"_____no_output_____"
],
[
"# For profile models, add a random jitter to avoid center-bias\nif model_type == \"profile\":\n jitters = np.random.randint(-128, 128 + 1, size=len(sample))\n sample[:, 1] = sample[:, 1] + jitters\n sample[:, 2] = sample[:, 2] + jitters",
"_____no_output_____"
],
[
"def compute_gradients(model_paths, sample):\n \"\"\"\n Given a list of paths to M models and a list of N coordinates or bins, computes\n the input gradients over all models, returning an M x N x I x 4 array of\n gradient values and an N x I x 4 array of one-hot encoded sequence.\n \"\"\"\n num_models, num_samples = len(model_paths), len(sample)\n \n all_input_grads = np.empty((num_models, num_samples, input_length, 4))\n all_one_hot_seqs = np.empty((num_samples, input_length, 4))\n \n for i in tqdm.notebook.trange(num_models):\n model = restore_model(model_paths[i])\n \n if model_type == \"profile\":\n results = compute_predictions.get_profile_model_predictions( \n model, sample, num_tasks, input_func, controls=controls, \n return_losses=False, return_gradients=True, show_progress=False \n )\n else:\n results = compute_predictions.get_binary_model_predictions( \n model, sample, input_func, \n return_losses=False, return_gradients=True, show_progress=False \n )\n\n all_input_grads[i] = results[\"input_grads\"]\n if i == 0:\n all_one_hot_seqs = results[\"input_seqs\"]\n return all_input_grads, all_one_hot_seqs",
"_____no_output_____"
],
[
"def compute_shap_scores(model_paths, sample, batch_size=128):\n \"\"\"\n Given a list of paths to M models and a list of N coordinates or bins, computes\n the SHAP scores over all models, returning an M x N x I x 4 array of\n SHAP scores and an N x I x 4 array of one-hot encoded sequence.\n \"\"\"\n num_models, num_samples = len(model_paths), len(sample)\n \n num_batches = int(np.ceil(num_samples / batch_size))\n \n all_shap_scores = np.empty((num_models, num_samples, input_length, 4))\n all_one_hot_seqs = np.empty((num_samples, input_length, 4))\n \n for i in tqdm.notebook.trange(num_models):\n model = restore_model(model_paths[i])\n \n if model_type == \"profile\":\n shap_explainer = compute_shap.create_profile_explainer(\n model, input_length, profile_length, num_tasks, num_strands, controls,\n task_index=task_index\n )\n else:\n shap_explainer = compute_shap.create_binary_explainer(\n model, input_length, task_index=task_index\n )\n\n for j in range(num_batches):\n batch_slice = slice(j * batch_size, (j + 1) * batch_size)\n batch = sample[batch_slice]\n \n if model_type == \"profile\":\n input_seqs, profiles = input_func(sample)\n shap_scores = shap_explainer(\n input_seqs, cont_profs=profiles[:, num_tasks:], hide_shap_output=True\n )\n else:\n input_seqs, _, _ = input_func(sample)\n shap_scores = shap_explainer(\n input_seqs, hide_shap_output=True\n )\n \n all_shap_scores[i, batch_slice] = shap_scores\n if i == 0:\n all_one_hot_seqs[batch_slice] = input_seqs\n return all_shap_scores, all_one_hot_seqs",
"_____no_output_____"
],
[
"# Compute the importance scores and 1-hot seqs\nimp_type = (\"DeepSHAP scores\", \"input gradients\")[0]\nimp_func = compute_shap_scores if imp_type == \"DeepSHAP scores\" else compute_gradients\nnoprior_scores, _ = imp_func(noprior_model_paths, sample)\nprior_scores, one_hot_seqs = imp_func(prior_model_paths, sample)",
"_____no_output_____"
]
],
[
[
"### Compute similarity",
"_____no_output_____"
]
],
[
[
"def cont_jaccard(seq_1, seq_2):\n \"\"\"\n Takes two gradient sequences (I x 4 arrays) and computes a similarity between\n them, using a continuous Jaccard metric.\n \"\"\"\n # L1-normalize\n norm_1 = np.sum(np.abs(seq_1), axis=1, keepdims=True)\n norm_2 = np.sum(np.abs(seq_2), axis=1, keepdims=True)\n norm_1[norm_1 == 0] = 1\n norm_2[norm_2 == 0] = 1\n seq_1 = seq_1 / norm_1\n seq_2 = seq_2 / norm_2\n \n ab_1, ab_2 = np.abs(seq_1), np.abs(seq_2)\n inter = np.sum(np.minimum(ab_1, ab_2) * np.sign(seq_1) * np.sign(seq_2), axis=1)\n union = np.sum(np.maximum(ab_1, ab_2), axis=1)\n zero_mask = union == 0\n inter[zero_mask] = 0\n union[zero_mask] = 1\n return np.sum(inter / union)",
"_____no_output_____"
],
[
"def cosine_sim(seq_1, seq_2):\n \"\"\"\n Takes two gradient sequences (I x 4 arrays) and computes a similarity between\n them, using a cosine similarity.\n \"\"\"\n seq_1, seq_2 = np.ravel(seq_1), np.ravel(seq_2)\n dot = np.sum(seq_1 * seq_2)\n mag_1, mag_2 = np.sqrt(np.sum(seq_1 * seq_1)), np.sqrt(np.sum(seq_2 * seq_2))\n return dot / (mag_1 * mag_2) if mag_1 * mag_2 else 0",
"_____no_output_____"
],
[
"def compute_similarity_matrix(imp_scores, sim_func=cosine_sim):\n \"\"\"\n Given the M x N x I x 4 importance scores returned by `compute_gradients`\n or `compute_shap_scores`, computes an N x M x M similarity matrix of\n similarity across models (i.e. each coordinate gets a similarity matrix\n across different models). By default uses cosine similarity.\n \"\"\"\n num_models, num_coords = imp_scores.shape[0], imp_scores.shape[1]\n \n sim_mats = np.empty((num_coords, num_models, num_models))\n for i in tqdm.notebook.trange(num_coords):\n for j in range(num_models):\n sim_mats[i, j, j] = 0\n for k in range(j):\n sim_score = sim_func(imp_scores[j][i], imp_scores[k][i])\n sim_mats[i, j, k] = sim_score\n sim_mats[i, k, j] = sim_score\n return sim_mats",
"_____no_output_____"
],
[
"sim_type = (\"Cosine\", \"Continuous Jaccard\")[1]\nsim_func = cosine_sim if sim_type == \"Cosine\" else cont_jaccard\nnoprior_sim_matrix = compute_similarity_matrix(noprior_scores, sim_func=sim_func)\nprior_sim_matrix = compute_similarity_matrix(prior_scores, sim_func=sim_func)",
"_____no_output_____"
],
[
"# Plot some examples of poor consistency, particularly ones that showed an improvement\nnum_to_show = 100\ncenter_view_length = 200\nplot_zoom = True\nmidpoint = input_length // 2\nstart = midpoint - (center_view_length // 2)\nend = start + center_view_length\ncenter_slice = slice(550, 800)\n\nnoprior_sim_matrix_copy = noprior_sim_matrix.copy()\nfor i in range(len(noprior_sim_matrix_copy)):\n noprior_sim_matrix_copy[i][np.diag_indices(noprior_sim_matrix.shape[1])] = np.inf # Put infinity in diagonal\ndiffs = np.max(prior_sim_matrix, axis=(1, 2)) - np.min(noprior_sim_matrix_copy, axis=(1, 2))\nbest_example_inds = np.flip(np.argsort(diffs))[:num_to_show]\n\nbest_example_inds = [7] #, 38]\n\nfor sample_index in best_example_inds:\n noprior_model_ind_1, noprior_model_ind_2 = np.unravel_index(np.argmin(np.ravel(noprior_sim_matrix_copy[sample_index])), noprior_sim_matrix[sample_index].shape)\n prior_model_ind_1, prior_model_ind_2 = np.unravel_index(np.argmax(np.ravel(prior_sim_matrix[sample_index])), prior_sim_matrix[sample_index].shape)\n \n noprior_model_ind_1, noprior_model_ind_2 = 5, 17\n prior_model_ind_1, prior_model_ind_2 = 13, 17\n print(\"Sample index: %d\" % sample_index)\n if model_type == \"binary\":\n bin_index = sample[sample_index]\n coord = input_func(np.array([bin_index]))[2][0]\n print(\"Coordinate: %s (bin %d)\" % (str(coord), bin_index))\n else:\n coord = sample[sample_index]\n print(\"Coordinate: %s\" % str(coord))\n print(\"Model indices without prior: %d vs %d\" % (noprior_model_ind_1, noprior_model_ind_2))\n plt.figure(figsize=(20, 2))\n plt.plot(np.sum(noprior_scores[noprior_model_ind_1, sample_index] * one_hot_seqs[sample_index], axis=1), color=\"coral\")\n plt.show()\n if plot_zoom:\n viz_sequence.plot_weights(noprior_scores[noprior_model_ind_1, sample_index, center_slice], subticks_frequency=1000)\n viz_sequence.plot_weights(noprior_scores[noprior_model_ind_1, sample_index, center_slice] * one_hot_seqs[sample_index, center_slice], subticks_frequency=1000)\n plt.figure(figsize=(20, 2))\n plt.plot(np.sum(noprior_scores[noprior_model_ind_2, sample_index] * one_hot_seqs[sample_index], axis=1), color=\"coral\")\n plt.show()\n if plot_zoom:\n viz_sequence.plot_weights(noprior_scores[noprior_model_ind_2, sample_index, center_slice], subticks_frequency=1000)\n viz_sequence.plot_weights(noprior_scores[noprior_model_ind_2, sample_index, center_slice] * one_hot_seqs[sample_index, center_slice], subticks_frequency=1000)\n \n print(\"Model indices with prior: %d vs %d\" % (prior_model_ind_1, prior_model_ind_2))\n plt.figure(figsize=(20, 2))\n plt.plot(np.sum(prior_scores[prior_model_ind_1, sample_index] * one_hot_seqs[sample_index], axis=1), color=\"slateblue\")\n plt.show()\n if plot_zoom:\n viz_sequence.plot_weights(prior_scores[prior_model_ind_1, sample_index, center_slice], subticks_frequency=1000)\n viz_sequence.plot_weights(prior_scores[prior_model_ind_1, sample_index, center_slice] * one_hot_seqs[sample_index, center_slice], subticks_frequency=1000)\n plt.figure(figsize=(20, 2))\n plt.plot(np.sum(prior_scores[prior_model_ind_2, sample_index] * one_hot_seqs[sample_index], axis=1), color=\"slateblue\")\n plt.show()\n if plot_zoom:\n viz_sequence.plot_weights(prior_scores[prior_model_ind_2, sample_index, center_slice], subticks_frequency=1000)\n viz_sequence.plot_weights(prior_scores[prior_model_ind_2, sample_index, center_slice] * one_hot_seqs[sample_index, center_slice], subticks_frequency=1000)",
"_____no_output_____"
],
[
"sample_index = 7\n\nfor i in range(30):\n print(i)\n plt.figure(figsize=(20, 2))\n plt.plot(np.sum(noprior_scores[i, sample_index] * one_hot_seqs[sample_index], axis=1), color=\"coral\")\n plt.show()\nfor i in range(30):\n print(i)\n plt.figure(figsize=(20, 2))\n plt.plot(np.sum(prior_scores[i, sample_index] * one_hot_seqs[sample_index], axis=1), color=\"coral\")\n plt.show()",
"_____no_output_____"
],
[
"noprior_avg_sims, prior_avg_sims = [], []\nbin_num = 30\nfor i in range(num_samples):\n noprior_avg_sims.append(np.mean(noprior_sim_matrix[i][np.tril_indices(len(noprior_model_paths), k=-1)]))\n prior_avg_sims.append(np.mean(prior_sim_matrix[i][np.tril_indices(len(prior_model_paths), k=-1)]))\nnoprior_avg_sims, prior_avg_sims = np.array(noprior_avg_sims), np.array(prior_avg_sims)\nall_vals = np.concatenate([noprior_avg_sims, prior_avg_sims])\nbins = np.linspace(np.min(all_vals), np.max(all_vals), bin_num)\nfig, ax = plt.subplots(figsize=(16, 8))\nax.hist(noprior_avg_sims, bins=bins, color=\"coral\", label=\"No prior\", alpha=0.7)\nax.hist(prior_avg_sims, bins=bins, color=\"slateblue\", label=\"With Fourier prior\", alpha=0.7)\nplt.legend()\nplt.title(\n (\"Mean pairwise similarities of %s between different random initializations\" % imp_type) +\n (\"\\n%s %s models\" % (condition_name, model_type)) +\n \"\\nComputed over %d/%d models without/with Fourier prior on %d randomly drawn test peaks\" % (len(noprior_model_paths), len(prior_model_paths), num_samples)\n)\nplt.xlabel(\"%s similarity\" % sim_type)\n\nprint(\"Average similarity without priors: %f\" % np.nanmean(noprior_avg_sims))\nprint(\"Average similarity with priors: %f\" % np.nanmean(prior_avg_sims))\nprint(\"Standard error without priors: %f\" % scipy.stats.sem(noprior_avg_sims, nan_policy=\"omit\"))\nprint(\"Standard error with priors: %f\" % scipy.stats.sem(prior_avg_sims, nan_policy=\"omit\"))\nw, p = scipy.stats.wilcoxon(noprior_avg_sims, prior_avg_sims, alternative=\"less\")\nprint(\"One-sided Wilcoxon test: w = %f, p = %f\" % (w, p))",
"_____no_output_____"
],
[
"avg_sim_diffs = prior_avg_sims - noprior_avg_sims\nplt.figure(figsize=(16, 8))\nplt.hist(avg_sim_diffs, bins=30, color=\"mediumorchid\")\nplt.title(\n (\"Paired difference of %s similarity between different random initializations\" % imp_type) +\n (\"\\n%s %s models\" % (condition_name, model_type)) +\n \"\\nComputed over %d/%d models without/with Fourier prior on %d randomly drawn test peaks\" % (len(noprior_model_paths), len(prior_model_paths), num_samples)\n)\nplt.xlabel(\"Average similarity difference: with Fourier prior - no prior\")",
"_____no_output_____"
],
[
"def get_bias(sim_matrix):\n num_examples, num_models, _ = sim_matrix.shape\n bias_vals = []\n for i in range(num_models):\n avg = np.sum(sim_matrix[:, i]) / (num_examples * (num_models - 1))\n bias_vals.append(avg)\n print(\"%d: %f\" % (i + 1, avg))\n return bias_vals",
"_____no_output_____"
],
[
"print(\"Model-specific bias without priors\")\nnoprior_bias_vals = get_bias(noprior_sim_matrix)\nprint(\"Model-specific bias with priors\")\nprior_bias_vals = get_bias(prior_sim_matrix)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(1, 2, figsize=(10, 5))\nfig.suptitle(\"Model-specific average Jaccard similarity vs model performance\")\nax[0].scatter(noprior_bias_vals, np.array(noprior_metric_vals)[noprior_keep_mask])\nax[0].set_title(\"No priors\")\nax[1].scatter(prior_bias_vals, np.array(prior_metric_vals)[prior_keep_mask])\nax[1].set_title(\"With priors\")\nplt.grid(False)\nfig.text(0.5, 0.04, \"Average Jaccard similarity with other models over all samples\", ha=\"center\", va=\"center\")\nfig.text(0.06, 0.5, \"Model profile validation loss\", ha=\"center\", va=\"center\", rotation=\"vertical\")",
"_____no_output_____"
],
[
"# Compute some simple bounds on the expected consistency using\n# the \"no-prior\" scores\n\nrng = np.random.RandomState(1234)\n\ndef shuf_none(track):\n # Do nothing\n return track\n\ndef shuf_bases(track):\n # Shuffle the importances across each base dimension separately,\n # but keep positions intact\n inds = np.random.rand(*track.shape).argsort(axis=1) # Each row is 0,1,2,3 in random order\n return np.take_along_axis(track, inds, axis=1)\n\ndef shuf_pos(track):\n # Shuffle the importances across the positions, but keep the base\n # importances at each position intact\n shuf = np.copy(track)\n rng.shuffle(shuf)\n return shuf\n\ndef shuf_all(track):\n # Shuffle the importances across positions and bases\n return np.ravel(track)[rng.permutation(track.size)].reshape(track.shape)\n\nfor shuf_type, shuf_func in [\n (\"no\", shuf_none), (\"base\", shuf_bases), (\"position\", shuf_pos), (\"all\", shuf_all)\n]:\n sims = []\n for i in tqdm.notebook.trange(noprior_scores.shape[0]):\n for j in range(noprior_scores.shape[1]):\n track = noprior_scores[i, j]\n track_shuf = shuf_func(track)\n sims.append(sim_func(track, track_shuf))\n fig, ax = plt.subplots()\n ax.hist(sims, bins=30)\n ax.set_title(\"%s similarity with %s shuffing\" % (sim_type, shuf_type))\n plt.show()\n print(\"Mean: %f\" % np.mean(sims))\n print(\"Standard deviation: %f\" % np.std(sims))",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d049e580cb0d39372de58abe8f832ae4ed6549cc | 214,591 | ipynb | Jupyter Notebook | notebooks/lightGBM_v01-Copy1.ipynb | mnm-signate/analysys_titanic | 567839537f8fa01ec8225b7c3b2feedc731cf029 | [
"MIT"
] | null | null | null | notebooks/lightGBM_v01-Copy1.ipynb | mnm-signate/analysys_titanic | 567839537f8fa01ec8225b7c3b2feedc731cf029 | [
"MIT"
] | null | null | null | notebooks/lightGBM_v01-Copy1.ipynb | mnm-signate/analysys_titanic | 567839537f8fa01ec8225b7c3b2feedc731cf029 | [
"MIT"
] | null | null | null | 36.833333 | 1,034 | 0.336314 | [
[
[
"<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#init\" data-toc-modified-id=\"init-1\"><span class=\"toc-item-num\">1 </span>init</a></span></li><li><span><a href=\"#モデリング\" data-toc-modified-id=\"モデリング-2\"><span class=\"toc-item-num\">2 </span>モデリング</a></span><ul class=\"toc-item\"><li><span><a href=\"#べースモデル\" data-toc-modified-id=\"べースモデル-2.1\"><span class=\"toc-item-num\">2.1 </span>べースモデル</a></span></li><li><span><a href=\"#グリッドサーチ\" data-toc-modified-id=\"グリッドサーチ-2.2\"><span class=\"toc-item-num\">2.2 </span>グリッドサーチ</a></span></li></ul></li><li><span><a href=\"#本番モデル\" data-toc-modified-id=\"本番モデル-3\"><span class=\"toc-item-num\">3 </span>本番モデル</a></span></li><li><span><a href=\"#ランダムフォレスト\" data-toc-modified-id=\"ランダムフォレスト-4\"><span class=\"toc-item-num\">4 </span>ランダムフォレスト</a></span></li><li><span><a href=\"#work\" data-toc-modified-id=\"work-5\"><span class=\"toc-item-num\">5 </span>work</a></span></li></ul></div>",
"_____no_output_____"
],
[
"# init",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"pwd",
"_____no_output_____"
],
[
"cd ../",
"C:\\00_work\\01_project\\analysys_titanic\n"
],
[
"cd data",
"C:\\00_work\\01_project\\analysys_titanic\\data\n"
],
[
"cd raw ",
"C:\\00_work\\01_project\\analysys_titanic\\data\\raw\n"
],
[
"pdf_train = pd.read_csv(\"train.tsv\", sep=\"\\t\")\npdf_train.T",
"_____no_output_____"
],
[
"pdf_test = pd.read_csv(\"test.tsv\", sep=\"\\t\")\npdf_test.T",
"_____no_output_____"
],
[
"pdf_test.describe()\n",
"_____no_output_____"
],
[
"for str_col in [\"C%s\"%i for i in range(1,7)]:\n print(\"# \"+str_col)\n v_cnt = pdf_test[str_col].value_counts(dropna=None)\n print(v_cnt)\n print()",
"# C1\n421256035 832746\n2581703001 251297\n3651044092 62265\n2686811630 42737\n2387281372 35030\n2541535031 29339\n1111213504 17837\n1045405899 17621\n2068315619 6214\nName: C1, dtype: int64\n\n# C2\n3874378935 349875\n1862037199 323055\n2589684548 141041\n1088910726 101124\n1537671376 87011\n1504830175 30904\n1950756866 15653\n71293199 13956\n1479325723 12964\n2985342882 10908\n1306122108 9705\n421256035 8994\n727692042 7774\n102346738 7252\n1854140495 7097\n3831111254 7032\n3954817824 6764\n3034198634 5596\n2377199148 5355\n364377489 5011\n712451701 4893\n3150909154 4869\n749836023 4832\n1940966173 4681\n3350677597 4473\n330730226 4202\n3111151614 4169\n2084850680 4032\n4212447203 3977\n1035815364 3969\n ... \n860314843 11\n363130183 10\n1104751085 10\n3353718864 10\n2413235131 10\n1654771868 9\n228048336 9\n2451220701 8\n2533511588 8\n2757594487 8\n2001839996 8\n2472604797 4\n1720137283 4\n1984785060 3\n2872626515 3\n1346837276 3\n1326182378 2\n1806924515 2\n2877371725 2\n2778970491 2\n2048015571 1\n49702558 1\n2428347877 1\n265977211 1\n2554446569 1\n1096934960 1\n1697129879 1\n2453773661 1\n1929708017 1\n2795284148 1\nName: C2, Length: 178, dtype: int64\n\n# C3\n2448089184 197430\n1892769125 180326\n98956388 86206\n3260269773 76239\n1998340283 73844\n1658216424 69975\n189706718 60680\n330630449 53494\n3950375036 41675\n3788069208 29631\n575044823 27564\n3862505502 24885\n3542634185 23270\n3889790948 21650\n1367884996 16679\n2225505109 12964\n781073294 11810\n1025908077 10908\n3653461831 10708\n2560571521 8994\n3377311081 8503\n2673245129 8166\n795321583 8048\n3093387819 6646\n1526405953 6635\n1853807563 5213\n1224055469 5200\n3664490197 5179\n2780865838 4929\n140916896 4885\n ... \n1772149368 4\n3404861148 4\n2232386765 3\n37584204 3\n377353736 3\n4022433484 3\n191235966 2\n3668968220 2\n2926245736 2\n3263006221 2\n230134730 2\n2839737601 2\n2177228600 1\n3437234351 1\n2658490502 1\n3498436490 1\n4293804481 1\n1614334011 1\n3693598308 1\n3457749051 1\n4110645563 1\n4041631916 1\n2031985494 1\n1942921800 1\n1403799000 1\n2298492792 1\n1389540541 1\n2651132136 1\n2711384551 1\n1428959068 1\nName: C3, Length: 347, dtype: int64\n\n# C4\n2411205997 681911\n2298910535 326329\n1332932160 170103\n1825710598 94775\n400122826 14725\n3954817824 7104\n1404944795 139\nName: C4, dtype: int64\n\n# C5\n2321672976 472702\n476085660 123957\n1410458339 111344\n181964554 45283\n1309926075 41407\n2456844420 32575\n3202148071 21729\n3072929459 20429\n2141561259 17904\n2078330963 14462\n446821382 14071\n677318072 13935\n1845092915 13185\n265977211 10357\n1033636479 10128\n1107442947 10016\n2866892650 9832\n1238839469 8348\n2330764261 7880\n1852834532 7659\n3578008125 7071\n3800848110 7057\n1500791674 7035\n1512150701 6803\n1798740705 6792\n405013202 6558\n518945081 6456\n4028731966 6125\n692665223 6045\n4041648880 5866\n ... \n3771951677 346\n2407591390 334\n3480022372 277\n3827743566 274\n4153084877 273\n3013209392 264\n571532436 242\n652467194 220\n2476349751 206\n2784295826 203\n1387348656 176\n1214424339 150\n4272797277 146\n494458745 124\n1256974341 99\n1049113504 93\n1674701931 89\n2881538594 89\n2233076873 76\n2308021301 73\n2207460318 73\n738834268 67\n2409789027 65\n2168444827 34\n1277233889 13\n3679904194 10\n1884425930 4\n1223643474 3\n2133676728 1\n980845682 1\nName: C5, Length: 224, dtype: int64\n\n# C6\n1509930964 510855\n3668889797 260254\n2068230268 192493\n3111151614 110606\n1479325723 56414\n147733495 40407\n3684013252 32480\n1769722553 26826\n1238839469 21853\n3016268466 13248\n2798684489 9850\n1410751928 7413\n4161523369 6300\n2141877921 5691\n1991667031 220\n1802588585 176\nName: C6, dtype: int64\n\n"
],
[
"def get_C_dummie(df):\n dct_dummie = {}\n\n for tgt_col in lst_srt_tgt_cols:\n psr_tgt_col = df[tgt_col]\n dct_dummie[tgt_col] = {}\n\n for tgt_val in dct_C_vals[tgt_col]:\n dummie = psr_tgt_col.apply(lambda x: 1 if x == tgt_val else 0)\n dct_dummie[tgt_col][tgt_col + \"_%s\"%tgt_val] = dummie\n \n _df = df.copy()\n\n for tgt_col in dct_dummie.keys():\n dummies = pd.DataFrame(dct_dummie[tgt_col])\n _df = pd.concat([_df, dummies], axis=1)\n \n else:\n lst_str_drop_tgt_str = [\"C%s\"%i for i in range(1,7)]\n# lst_str_drop_tgt_int = [\"I%s\"%i for i in range(11,15)]\n _df = _df.drop(lst_str_drop_tgt_str + [\"id\"],1)\n# _df = _df.drop(lst_str_drop_tgt_str + lst_str_drop_tgt_int + [\"id\"],1)\n \n return _df\n",
"_____no_output_____"
],
[
"pdf_clns_train = get_C_dummie(pdf_train)\npdf_clns_test = get_C_dummie(pdf_test)\n",
"_____no_output_____"
],
[
"pdf_clns_train.T",
"_____no_output_____"
],
[
"pdf_clns_test.T",
"_____no_output_____"
]
],
[
[
"# モデリング",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import ParameterGrid, KFold, train_test_split\nimport lightgbm as lgb\n",
"_____no_output_____"
]
],
[
[
"## べースモデル",
"_____no_output_____"
]
],
[
[
"def cross_valid_lgb(X,y,param,n_splits,random_state=1234, num_boost_round=1000,early_stopping_rounds=5):\n from sklearn.metrics import roc_auc_score\n from sklearn.model_selection import StratifiedKFold, train_test_split\n import lightgbm as lgb\n\n kf = StratifiedKFold(n_splits=n_splits, shuffle=True,random_state=random_state)\n\n lst_auc = []\n\n for train_index, test_index in kf.split(X,y):\n learn_X, learn_y, test_X, test_y = X.iloc[train_index], y.iloc[train_index], X.iloc[test_index], y.iloc[test_index]\n\n train_X, valid_X, train_y, valid_y = train_test_split(learn_X, learn_y,test_size=0.3, random_state=random_state)\n\n lgb_train = lgb.Dataset(train_X, train_y)\n lgb_valid = lgb.Dataset(valid_X, valid_y)\n\n gbm = lgb.train(\n params,\n lgb_train,\n num_boost_round=num_boost_round,\n valid_sets=lgb_valid,\n early_stopping_rounds=early_stopping_rounds,\n verbose_eval = False\n )\n\n pred = gbm.predict(test_X)\n\n auc = roc_auc_score(y_true=test_y, y_score=pred)\n\n lst_auc.append(auc)\n\n auc_mean = np.mean(lst_auc)\n \n return auc_mean\n",
"_____no_output_____"
],
[
"def cross_lgb(X,y,param,n_splits,random_state=1234, num_boost_round=1000,early_stopping_rounds=5):\n from sklearn.metrics import roc_auc_score\n from sklearn.model_selection import StratifiedKFold, train_test_split\n import lightgbm as lgb\n\n kf = StratifiedKFold(n_splits=n_splits, shuffle=True,random_state=random_state)\n\n lst_model = []\n\n for train_index, test_index in kf.split(X,y):\n learn_X, learn_y, test_X, test_y = X.iloc[train_index], y.iloc[train_index], X.iloc[test_index], y.iloc[test_index]\n\n# train_X, valid_X, train_y, valid_y = train_test_split(learn_X, learn_y,test_size=0.3, random_state=random_state)\n\n lgb_train = lgb.Dataset(learn_X, learn_y)\n lgb_valid = lgb.Dataset(test_X, test_y)\n\n gbm = lgb.train(\n params,\n lgb_train,\n num_boost_round=num_boost_round,\n valid_sets=lgb_valid,\n early_stopping_rounds=early_stopping_rounds,\n verbose_eval = False\n )\n\n lst_model.append(gbm)\n \n return lst_model\n",
"_____no_output_____"
]
],
[
[
"## グリッドサーチ",
"_____no_output_____"
]
],
[
[
"grid = {\n 'boosting_type': ['goss'],\n 'objective': ['binary'],\n 'metric': ['auc'],\n \n# 'num_leaves':[31 + i for i in range(-10, 11)],\n# 'min_data_in_leaf':[20 + i for i in range(-10, 11)],\n 'num_leaves':[31],\n 'min_data_in_leaf':[20],\n 'max_depth':[-1]\n}\n",
"_____no_output_____"
],
[
"X = pdf_clns_train.drop(\"click\",1)\ny = pdf_clns_train.click\n\nscore_grid = []\nbest_param = {}\nbest_auc = 0\n\nfor param in list(ParameterGrid(grid)):\n auc_mean = cross_valid_lgb(\n X,\n y,\n param=param,n_splits=3,\n random_state=1234, \n num_boost_round=1000,\n early_stopping_rounds=5\n )\n \n score_grid.append([param,auc_mean])\n \n if auc_mean >= best_auc:\n best_auc = auc_mean\n best_param = param\n",
"_____no_output_____"
],
[
"print(best_auc,best_param)\n",
"0.8083715191616564 {'boosting_type': 'goss', 'max_depth': -1, 'metric': 'auc', 'min_data_in_leaf': 20, 'num_leaves': 31, 'objective': 'binary'}\n"
]
],
[
[
"# 本番モデル",
"_____no_output_____"
]
],
[
[
"lst_model = cross_lgb(X,y,param=best_param,n_splits=5,random_state=1234, num_boost_round=1000,early_stopping_rounds=5)",
"_____no_output_____"
],
[
"lst_model",
"_____no_output_____"
],
[
"lst_pred = []\n\nfor mdl in lst_model:\n pred = mdl.predict(pdf_clns_test)\n lst_pred.append(pred)",
"_____no_output_____"
],
[
"nparr_preds = np.array(lst_pred)\nmean_pred = nparr_preds.mean(0)\nmean_pred",
"_____no_output_____"
],
[
"pdf_submit = pd.DataFrame({\n \"id\":pdf_test.id,\n \"score\":mean_pred\n})\npdf_submit.T",
"_____no_output_____"
],
[
"pdf_submit.to_csv(\"submit_v02_lgb5.csv\", index=False, header=False)",
"_____no_output_____"
]
],
[
[
"# ランダムフォレスト",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import RandomForestClassifier",
"_____no_output_____"
],
[
"X = X.fillna(0)\n",
"_____no_output_____"
],
[
"clf = RandomForestClassifier()\nclf.fit(X,y)\n",
"_____no_output_____"
],
[
"pdf_clns_test = pdf_clns_test.fillna(0)\n\npred = clf.predict_proba(pdf_clns_test)\npred\n",
"_____no_output_____"
],
[
"pdf_submit_rf = pd.DataFrame({\n \"id\":pdf_test.id,\n \"score\":pred[:,1]\n})\npdf_submit_rf.T\n",
"_____no_output_____"
],
[
"pdf_submit_rf.to_csv(\"submit_rf.csv\", index=False, header=False)",
"_____no_output_____"
]
],
[
[
"# work",
"_____no_output_____"
]
],
[
[
"params = {\n 'boosting_type': 'goss',\n 'objective': 'binary',\n 'metric': 'auc',\n\n 'learning_rate': 0.1,\n 'num_leaves': 23,\n 'min_data_in_leaf': 1,\n}\n\ngbm = lgb.train(\n params,\n lds_train,\n num_boost_round=1000,\n valid_sets=lds_test,\n early_stopping_rounds=5,\n)\n",
"_____no_output_____"
],
[
"dct_C_vals",
"_____no_output_____"
],
[
"pdf_train.C1",
"_____no_output_____"
],
[
"params = {\n 'boosting_type': 'goss',\n 'objective': 'binary',\n 'metric': 'auc',\n 'verbose': 0,\n\n 'learning_rate': 0.1,\n 'num_leaves': 23,\n 'min_data_in_leaf': 1\n}\n\nkf = KFold(n_splits=3, shuffle=True,random_state=1234)\n\n# score_grid = []\n\nlst_auc = []\n\nfor train_index, test_index in kf.split(pdf_train):\n pdf_train_kf, pdf_test_kf = pdf_clns_train.iloc[train_index], pdf_clns_train.iloc[test_index]\n \n train, valid = train_test_split(pdf_train_kf,test_size=0.3, random_state=1234)\n\n lgb_train = lgb.Dataset(train.drop(\"click\",1), train[\"click\"])\n lgb_valid = lgb.Dataset(valid.drop(\"click\",1), valid[\"click\"])\n# lgb_test = lgb.Dataset(pdf_test_kf.drop(\"click\",1), pdf_test_kf[\"click\"])\n pdf_test_X = pdf_test_kf.drop(\"click\",1)\n pdf_test_y = pdf_test_kf[\"click\"]\n \n gbm = lgb.train(\n params,\n lgb_train,\n num_boost_round=10,\n valid_sets=lgb_valid,\n early_stopping_rounds=5,\n )\n \n pred = gbm.predict(pdf_test_X)\n \n auc = roc_auc_score(y_true=pdf_test_y, y_score=pred)\n \n lst_auc.append(auc)\n \nauc_mean = np.mean(lst_auc)\n",
"[1]\tvalid_0's auc: 0.720192\nTraining until validation scores don't improve for 5 rounds.\n[2]\tvalid_0's auc: 0.726811\n[3]\tvalid_0's auc: 0.731667\n[4]\tvalid_0's auc: 0.737238\n[5]\tvalid_0's auc: 0.739277\n[6]\tvalid_0's auc: 0.740883\n[7]\tvalid_0's auc: 0.742216\n[8]\tvalid_0's auc: 0.743704\n[9]\tvalid_0's auc: 0.744488\n[10]\tvalid_0's auc: 0.745019\nDid not meet early stopping. Best iteration is:\n[10]\tvalid_0's auc: 0.745019\n[1]\tvalid_0's auc: 0.718608\nTraining until validation scores don't improve for 5 rounds.\n[2]\tvalid_0's auc: 0.728747\n[3]\tvalid_0's auc: 0.730248\n[4]\tvalid_0's auc: 0.733139\n[5]\tvalid_0's auc: 0.735163\n[6]\tvalid_0's auc: 0.736818\n[7]\tvalid_0's auc: 0.739096\n[8]\tvalid_0's auc: 0.740416\n[9]\tvalid_0's auc: 0.739959\n[10]\tvalid_0's auc: 0.741348\nDid not meet early stopping. Best iteration is:\n[10]\tvalid_0's auc: 0.741348\n[1]\tvalid_0's auc: 0.721096\nTraining until validation scores don't improve for 5 rounds.\n[2]\tvalid_0's auc: 0.729421\n[3]\tvalid_0's auc: 0.736833\n[4]\tvalid_0's auc: 0.738976\n[5]\tvalid_0's auc: 0.739032\n[6]\tvalid_0's auc: 0.739811\n[7]\tvalid_0's auc: 0.741738\n[8]\tvalid_0's auc: 0.745557\n[9]\tvalid_0's auc: 0.745884\n[10]\tvalid_0's auc: 0.748055\nDid not meet early stopping. Best iteration is:\n[10]\tvalid_0's auc: 0.748055\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
d049e606481c356a83758027f20634fde8f11743 | 24,559 | ipynb | Jupyter Notebook | Course_1_Part_4_Lesson_2_Notebook.ipynb | poojan-dalal/fashion-MNIST | 273959f9ac64b9900dce9bcc71667c7c03988521 | [
"MIT"
] | null | null | null | Course_1_Part_4_Lesson_2_Notebook.ipynb | poojan-dalal/fashion-MNIST | 273959f9ac64b9900dce9bcc71667c7c03988521 | [
"MIT"
] | null | null | null | Course_1_Part_4_Lesson_2_Notebook.ipynb | poojan-dalal/fashion-MNIST | 273959f9ac64b9900dce9bcc71667c7c03988521 | [
"MIT"
] | 1 | 2021-09-08T12:12:39.000Z | 2021-09-08T12:12:39.000Z | 37.042232 | 540 | 0.544648 | [
[
[
"<a href=\"https://colab.research.google.com/github/poojan-dalal/fashion-MNIST/blob/master/Course_1_Part_4_Lesson_2_Notebook.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nprint(tf.__version__)",
"_____no_output_____"
]
],
[
[
"The Fashion MNIST data is available directly in the tf.keras datasets API. You load it like this:",
"_____no_output_____"
]
],
[
[
"mnist = tf.keras.datasets.fashion_mnist",
"_____no_output_____"
]
],
[
[
"Calling load_data on this object will give you two sets of two lists, these will be the training and testing values for the graphics that contain the clothing items and their labels.\n",
"_____no_output_____"
]
],
[
[
"(training_images, training_labels), (test_images, test_labels) = mnist.load_data()",
"_____no_output_____"
]
],
[
[
"What does these values look like? Let's print a training image, and a training label to see...Experiment with different indices in the array. For example, also take a look at index 42...that's a a different boot than the one at index 0\n",
"_____no_output_____"
]
],
[
[
"import numpy as np\nnp.set_printoptions(linewidth=200)\nimport matplotlib.pyplot as plt\nplt.imshow(training_images[0])\nprint(training_labels[0])\nprint(training_images[0])",
"_____no_output_____"
]
],
[
[
"You'll notice that all of the values in the number are between 0 and 255. If we are training a neural network, for various reasons it's easier if we treat all values as between 0 and 1, a process called '**normalizing**'...and fortunately in Python it's easy to normalize a list like this without looping. You do it like this:",
"_____no_output_____"
]
],
[
[
"training_images = training_images / 255.0\ntest_images = test_images / 255.0",
"_____no_output_____"
]
],
[
[
"Now you might be wondering why there are 2 sets...training and testing -- remember we spoke about this in the intro? The idea is to have 1 set of data for training, and then another set of data...that the model hasn't yet seen...to see how good it would be at classifying values. After all, when you're done, you're going to want to try it out with data that it hadn't previously seen!",
"_____no_output_____"
],
[
"Let's now design the model. There's quite a few new concepts here, but don't worry, you'll get the hang of them. ",
"_____no_output_____"
]
],
[
[
"model = tf.keras.models.Sequential([tf.keras.layers.Flatten(), \n tf.keras.layers.Dense(128, activation=tf.nn.relu), \n tf.keras.layers.Dense(10, activation=tf.nn.softmax)])",
"_____no_output_____"
]
],
[
[
"**Sequential**: That defines a SEQUENCE of layers in the neural network\n\n**Flatten**: Remember earlier where our images were a square, when you printed them out? Flatten just takes that square and turns it into a 1 dimensional set.\n\n**Dense**: Adds a layer of neurons\n\nEach layer of neurons need an **activation function** to tell them what to do. There's lots of options, but just use these for now. \n\n**Relu** effectively means \"If X>0 return X, else return 0\" -- so what it does it it only passes values 0 or greater to the next layer in the network.\n\n**Softmax** takes a set of values, and effectively picks the biggest one, so, for example, if the output of the last layer looks like [0.1, 0.1, 0.05, 0.1, 9.5, 0.1, 0.05, 0.05, 0.05], it saves you from fishing through it looking for the biggest value, and turns it into [0,0,0,0,1,0,0,0,0] -- The goal is to save a lot of coding!\n",
"_____no_output_____"
],
[
"The next thing to do, now the model is defined, is to actually build it. You do this by compiling it with an optimizer and loss function as before -- and then you train it by calling **model.fit ** asking it to fit your training data to your training labels -- i.e. have it figure out the relationship between the training data and its actual labels, so in future if you have data that looks like the training data, then it can make a prediction for what that data would look like. ",
"_____no_output_____"
]
],
[
[
"model.compile(optimizer = tf.optimizers.Adam(),\n loss = 'sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel.fit(training_images, training_labels, epochs=5)",
"_____no_output_____"
]
],
[
[
"Once it's done training -- you should see an accuracy value at the end of the final epoch. It might look something like 0.9098. This tells you that your neural network is about 91% accurate in classifying the training data. I.E., it figured out a pattern match between the image and the labels that worked 91% of the time. Not great, but not bad considering it was only trained for 5 epochs and done quite quickly.\n\nBut how would it work with unseen data? That's why we have the test images. We can call model.evaluate, and pass in the two sets, and it will report back the loss for each. Let's give it a try:",
"_____no_output_____"
]
],
[
[
"model.evaluate(test_images, test_labels)",
"_____no_output_____"
]
],
[
[
"For me, that returned a accuracy of about .8838, which means it was about 88% accurate. As expected it probably would not do as well with *unseen* data as it did with data it was trained on! As you go through this course, you'll look at ways to improve this. \n\nTo explore further, try the below exercises:\n",
"_____no_output_____"
],
[
"###Exercise 1:\nFor this first exercise run the below code: It creates a set of classifications for each of the test images, and then prints the first entry in the classifications. The output, after you run it is a list of numbers. Why do you think this is, and what do those numbers represent? ",
"_____no_output_____"
]
],
[
[
"classifications = model.predict(test_images)\n\nprint(classifications[0])",
"_____no_output_____"
]
],
[
[
"Hint: try running print(test_labels[0]) -- and you'll get a 9. Does that help you understand why this list looks the way it does? ",
"_____no_output_____"
]
],
[
[
"print(test_labels[0])",
"_____no_output_____"
]
],
[
[
"##Exercise 2: \nLet's now look at the layers in your model. Experiment with different values for the dense layer with 512 neurons. What different results do you get for loss, training time etc? Why do you think that's the case? \n",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nprint(tf.__version__)\n\nmnist = tf.keras.datasets.mnist\n\n(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()\n\ntraining_images = training_images/255.0\ntest_images = test_images/255.0\n\nmodel = tf.keras.models.Sequential([tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(1024, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)])\n\nmodel.compile(optimizer = 'adam',\n loss = 'sparse_categorical_crossentropy')\n\nmodel.fit(training_images, training_labels, epochs=5)\n\nmodel.evaluate(test_images, test_labels)\n\nclassifications = model.predict(test_images)\n\nprint(classifications[0])\nprint(test_labels[0])",
"_____no_output_____"
]
],
[
[
"##Exercise 3: \n\nWhat would happen if you remove the Flatten() layer. Why do you think that's the case? \n\nYou get an error about the shape of the data. It may seem vague right now, but it reinforces the rule of thumb that the first layer in your network should be the same shape as your data. Right now our data is 28x28 images, and 28 layers of 28 neurons would be infeasible, so it makes more sense to 'flatten' that 28,28 into a 784x1. Instead of wriitng all the code to handle that ourselves, we add the Flatten() layer at the begining, and when the arrays are loaded into the model later, they'll automatically be flattened for us.",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nprint(tf.__version__)\n\nmnist = tf.keras.datasets.mnist\n\n(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()\n\ntraining_images = training_images/255.0\ntest_images = test_images/255.0\n\nmodel = tf.keras.models.Sequential([#tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(64, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)])\n\nmodel.compile(optimizer = 'adam',\n loss = 'sparse_categorical_crossentropy')\n\nmodel.fit(training_images, training_labels, epochs=5)\n\nmodel.evaluate(test_images, test_labels)\n\nclassifications = model.predict(test_images)\n\nprint(classifications[0])\nprint(test_labels[0])",
"_____no_output_____"
]
],
[
[
"##Exercise 4: \n\nConsider the final (output) layers. Why are there 10 of them? What would happen if you had a different amount than 10? For example, try training the network with 5\n\nYou get an error as soon as it finds an unexpected value. Another rule of thumb -- the number of neurons in the last layer should match the number of classes you are classifying for. In this case it's the digits 0-9, so there are 10 of them, hence you should have 10 neurons in your final layer.",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nprint(tf.__version__)\n\nmnist = tf.keras.datasets.mnist\n\n(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()\n\ntraining_images = training_images/255.0\ntest_images = test_images/255.0\n\nmodel = tf.keras.models.Sequential([tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(64, activation=tf.nn.relu),\n tf.keras.layers.Dense(5, activation=tf.nn.softmax)])\n\nmodel.compile(optimizer = 'adam',\n loss = 'sparse_categorical_crossentropy')\n\nmodel.fit(training_images, training_labels, epochs=5)\n\nmodel.evaluate(test_images, test_labels)\n\nclassifications = model.predict(test_images)\n\nprint(classifications[0])\nprint(test_labels[0])",
"_____no_output_____"
]
],
[
[
"##Exercise 5: \n\nConsider the effects of additional layers in the network. What will happen if you add another layer between the one with 512 and the final layer with 10. \n\nAns: There isn't a significant impact -- because this is relatively simple data. For far more complex data (including color images to be classified as flowers that you'll see in the next lesson), extra layers are often necessary. ",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nprint(tf.__version__)\n\nmnist = tf.keras.datasets.mnist\n\n(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()\n\ntraining_images = training_images/255.0\ntest_images = test_images/255.0\n\nmodel = tf.keras.models.Sequential([tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)])\n\nmodel.compile(optimizer = 'adam',\n loss = 'sparse_categorical_crossentropy')\n\nmodel.fit(training_images, training_labels, epochs=5)\n\nmodel.evaluate(test_images, test_labels)\n\nclassifications = model.predict(test_images)\n\nprint(classifications[0])\nprint(test_labels[0])",
"_____no_output_____"
]
],
[
[
"#Exercise 6: \n\nConsider the impact of training for more or less epochs. Why do you think that would be the case? \n\nTry 15 epochs -- you'll probably get a model with a much better loss than the one with 5\nTry 30 epochs -- you might see the loss value stops decreasing, and sometimes increases. This is a side effect of something called 'overfitting' which you can learn about [somewhere] and it's something you need to keep an eye out for when training neural networks. There's no point in wasting your time training if you aren't improving your loss, right! :)",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nprint(tf.__version__)\n\nmnist = tf.keras.datasets.mnist\n\n(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()\n\ntraining_images = training_images/255.0\ntest_images = test_images/255.0\n\nmodel = tf.keras.models.Sequential([tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(128, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)])\n\nmodel.compile(optimizer = 'adam',\n loss = 'sparse_categorical_crossentropy')\n\nmodel.fit(training_images, training_labels, epochs=30)\n\nmodel.evaluate(test_images, test_labels)\n\nclassifications = model.predict(test_images)\n\nprint(classifications[34])\nprint(test_labels[34])",
"_____no_output_____"
]
],
[
[
"#Exercise 7: \n\nBefore you trained, you normalized the data, going from values that were 0-255 to values that were 0-1. What would be the impact of removing that? Here's the complete code to give it a try. Why do you think you get different results? ",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nprint(tf.__version__)\nmnist = tf.keras.datasets.mnist\n(training_images, training_labels), (test_images, test_labels) = mnist.load_data()\ntraining_images=training_images/255.0\ntest_images=test_images/255.0\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n])\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy')\nmodel.fit(training_images, training_labels, epochs=5)\nmodel.evaluate(test_images, test_labels)\nclassifications = model.predict(test_images)\nprint(classifications[0])\nprint(test_labels[0])",
"_____no_output_____"
]
],
[
[
"#Exercise 8: \n\nEarlier when you trained for extra epochs you had an issue where your loss might change. It might have taken a bit of time for you to wait for the training to do that, and you might have thought 'wouldn't it be nice if I could stop the training when I reach a desired value?' -- i.e. 95% accuracy might be enough for you, and if you reach that after 3 epochs, why sit around waiting for it to finish a lot more epochs....So how would you fix that? Like any other program...you have callbacks! Let's see them in action...",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nprint(tf.__version__)\n\nclass myCallback(tf.keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs={}):\n if(logs.get('loss')<0.4):\n print(\"\\nReached 60% accuracy so cancelling training!\")\n self.model.stop_training = True\n\ncallbacks = myCallback()\nmnist = tf.keras.datasets.fashion_mnist\n(training_images, training_labels), (test_images, test_labels) = mnist.load_data()\ntraining_images=training_images/255.0\ntest_images=test_images/255.0\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n])\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy')\nmodel.fit(training_images, training_labels, epochs=5, callbacks=[callbacks])\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d049e66b16bf1a6ad75e0f962a6456aab35db31a | 3,585 | ipynb | Jupyter Notebook | 01_Workshop-master/Chapter06/Exercise94/Exercise94.ipynb | Anna-MarieTomm/Learn_Python_with_Anna-Marie | e1d7b0f95674a91b1f30acd8923e0fc54f823182 | [
"MIT"
] | null | null | null | 01_Workshop-master/Chapter06/Exercise94/Exercise94.ipynb | Anna-MarieTomm/Learn_Python_with_Anna-Marie | e1d7b0f95674a91b1f30acd8923e0fc54f823182 | [
"MIT"
] | null | null | null | 01_Workshop-master/Chapter06/Exercise94/Exercise94.ipynb | Anna-MarieTomm/Learn_Python_with_Anna-Marie | e1d7b0f95674a91b1f30acd8923e0fc54f823182 | [
"MIT"
] | null | null | null | 21.088235 | 70 | 0.463598 | [
[
[
"### Configure through code.\nRestart the kernel here.",
"_____no_output_____"
]
],
[
[
"import logging\nimport sys\nroot_logger = logging.getLogger()\nhandler = logging.StreamHandler(sys.stdout)\nformatter = logging.Formatter(\"%(levelname)s: %(message)s\")\nhandler.setFormatter(formatter)\nroot_logger.addHandler(handler)\nroot_logger.setLevel(\"INFO\")\n\nlogging.info(\"Hello logging world\")",
"INFO: Hello logging world\n"
]
],
[
[
"### Configure with dictConfig.\nRestart the kernel here.",
"_____no_output_____"
]
],
[
[
"import logging\nfrom logging.config import dictConfig\n\ndictConfig({\n \"version\": 1,\n \"formatters\": {\n \"short\":{\n \"format\": \"%(levelname)s: %(message)s\",\n }\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"short\",\n \"stream\": \"ext://sys.stdout\",\n \"level\": \"DEBUG\",\n }\n },\n \"loggers\": {\n \"\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\"\n } \n }\n})\nlogging.info(\"Hello logging world\")",
"INFO: Hello logging world\n"
]
],
[
[
"### Configure with basicConfig.\nRestart the kernel here.",
"_____no_output_____"
]
],
[
[
"import sys\nimport logging\nlogging.basicConfig(\n level=\"INFO\",\n format=\"%(levelname)s: %(message)s\",\n stream=sys.stdout\n)\nlogging.info(\"Hello there!\")",
"INFO: Hello there!\n"
]
],
[
[
"### Configure with fileconfig.\nRestart the kernel here.",
"_____no_output_____"
]
],
[
[
"import logging\nfrom logging.config import fileConfig\nfileConfig(\"logging-config.ini\")\nlogging.info(\"Hello there!\")",
"INFO: Hello there!\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d049e978f57fbb80e08a9090382c67534e863e6d | 14,740 | ipynb | Jupyter Notebook | AoC 2020/AoC 2020 - Day 10.ipynb | RubenFixit/AoC | af042a5d6ca230a767862b471400275d2258a116 | [
"MIT"
] | null | null | null | AoC 2020/AoC 2020 - Day 10.ipynb | RubenFixit/AoC | af042a5d6ca230a767862b471400275d2258a116 | [
"MIT"
] | null | null | null | AoC 2020/AoC 2020 - Day 10.ipynb | RubenFixit/AoC | af042a5d6ca230a767862b471400275d2258a116 | [
"MIT"
] | null | null | null | 36.942356 | 307 | 0.520488 | [
[
[
"# [Advent of Code 2020: Day 10](https://adventofcode.com/2020/day/10)",
"_____no_output_____"
],
[
"## \\-\\-\\- Day 10: Adapter Array \\-\\-\\-\n\nPatched into the aircraft's data port, you discover weather forecasts of a massive tropical storm. Before you can figure out whether it will impact your vacation plans, however, your device suddenly turns off!\n\nIts battery is dead.\n\nYou'll need to plug it in. There's only one problem: the charging outlet near your seat produces the wrong number of **jolts**. Always prepared, you make a list of all of the joltage adapters in your bag.\n\nEach of your joltage adapters is rated for a specific **output joltage** (your puzzle input). Any given adapter can take an input `1`, `2`, or `3` jolts **lower** than its rating and still produce its rated output joltage.\n\nIn addition, your device has a built\\-in joltage adapter rated for **`3` jolts higher** than the highest\\-rated adapter in your bag. (If your adapter list were `3`, `9`, and `6`, your device's built\\-in adapter would be rated for `12` jolts.)\n\nTreat the charging outlet near your seat as having an effective joltage rating of `0`.\n\nSince you have some time to kill, you might as well test all of your adapters. Wouldn't want to get to your resort and realize you can't even charge your device!\n\nIf you *use every adapter in your bag* at once, what is the distribution of joltage differences between the charging outlet, the adapters, and your device?\n\nFor example, suppose that in your bag, you have adapters with the following joltage ratings:\n\n```\n16\n10\n15\n5\n1\n11\n7\n19\n6\n12\n4\n\n```\n\nWith these adapters, your device's built\\-in joltage adapter would be rated for `19 + 3 = `**`22`** jolts, 3 higher than the highest\\-rated adapter.\n\nBecause adapters can only connect to a source 1\\-3 jolts lower than its rating, in order to use every adapter, you'd need to choose them like this:\n\n* The charging outlet has an effective rating of `0` jolts, so the only adapters that could connect to it directly would need to have a joltage rating of `1`, `2`, or `3` jolts. Of these, only one you have is an adapter rated `1` jolt (difference of **`1`**).\n* From your `1`\\-jolt rated adapter, the only choice is your `4`\\-jolt rated adapter (difference of **`3`**).\n* From the `4`\\-jolt rated adapter, the adapters rated `5`, `6`, or `7` are valid choices. However, in order to not skip any adapters, you have to pick the adapter rated `5` jolts (difference of **`1`**).\n* Similarly, the next choices would need to be the adapter rated `6` and then the adapter rated `7` (with difference of **`1`** and **`1`**).\n* The only adapter that works with the `7`\\-jolt rated adapter is the one rated `10` jolts (difference of **`3`**).\n* From `10`, the choices are `11` or `12`; choose `11` (difference of **`1`**) and then `12` (difference of **`1`**).\n* After `12`, only valid adapter has a rating of `15` (difference of **`3`**), then `16` (difference of **`1`**), then `19` (difference of **`3`**).\n* Finally, your device's built\\-in adapter is always 3 higher than the highest adapter, so its rating is `22` jolts (always a difference of **`3`**).\n\nIn this example, when using every adapter, there are **`7`** differences of 1 jolt and **`5`** differences of 3 jolts.\n\nHere is a larger example:\n\n```\n28\n33\n18\n42\n31\n14\n46\n20\n48\n47\n24\n23\n49\n45\n19\n38\n39\n11\n1\n32\n25\n35\n8\n17\n7\n9\n4\n2\n34\n10\n3\n\n```\n\nIn this larger example, in a chain that uses all of the adapters, there are **`22`** differences of 1 jolt and **`10`** differences of 3 jolts.\n\nFind a chain that uses all of your adapters to connect the charging outlet to your device's built\\-in adapter and count the joltage differences between the charging outlet, the adapters, and your device. **What is the number of 1\\-jolt differences multiplied by the number of 3\\-jolt differences?**",
"_____no_output_____"
]
],
[
[
"import unittest\nfrom IPython.display import Markdown, display\n\nfrom aoc_puzzle import AocPuzzle\n\nclass AdapterArray(AocPuzzle):\n \n def parse_data(self, raw_data):\n self.adapter_list = list(map(int, raw_data.split('\\n')))\n self.adapter_list.sort()\n self.adapter_list.insert(0,0)\n self.adapter_list.append(self.adapter_list[-1]+3)\n \n def calc_jolt_diff(self, output=False):\n jolt_diffs = {}\n for i in range(1,len(self.adapter_list)):\n adapter = self.adapter_list[i]\n prev_adapter = self.adapter_list[i-1]\n jdiff = adapter - prev_adapter\n if jdiff not in jolt_diffs:\n jolt_diffs[jdiff] = 1\n else:\n jolt_diffs[jdiff] += 1\n \n jolt_diff_product = jolt_diffs[1] * jolt_diffs[3]\n \n if output:\n display(Markdown(f'### Jolt diff product: `{jolt_diff_product}`')) \n return jolt_diff_product\n \n\nclass TestBasic(unittest.TestCase):\n\n def test_parse_data(self):\n in_data = '16\\n10\\n15\\n5\\n1\\n11\\n7\\n19\\n6\\n12\\n4'\n exp_out = [0, 1, 4, 5, 6, 7, 10, 11, 12, 15, 16, 19, 22]\n aa = AdapterArray(in_data)\n self.assertEqual(aa.adapter_list, exp_out)\n \n def test_puzzle(self):\n input_data = ['16\\n10\\n15\\n5\\n1\\n11\\n7\\n19\\n6\\n12\\n4','28\\n33\\n18\\n42\\n31\\n14\\n46\\n20\\n48\\n47\\n24\\n23\\n49\\n45\\n19\\n38\\n39\\n11\\n1\\n32\\n25\\n35\\n8\\n17\\n7\\n9\\n4\\n2\\n34\\n10\\n3']\n exp_output = [35,220]\n for in_data, exp_out in tuple(zip(input_data, exp_output)):\n aa = AdapterArray(in_data)\n self.assertEqual(aa.calc_jolt_diff(), exp_out)\n \nunittest.main(argv=[\"\"], exit=False)",
"..\n----------------------------------------------------------------------\nRan 2 tests in 0.002s\n\nOK\n"
],
[
"aa = AdapterArray(\"input/d10.txt\")\naa.calc_jolt_diff(output=True)",
"_____no_output_____"
]
],
[
[
"## --- Part Two ---\n\nTo completely determine whether you have enough adapters, you'll need to figure out how many different ways they can be arranged. Every arrangement needs to connect the charging outlet to your device. The previous rules about when adapters can successfully connect still apply.\n\nThe first example above (the one that starts with `16`, `10`, `15`) supports the following arrangements:\n\n```\n(0), 1, 4, 5, 6, 7, 10, 11, 12, 15, 16, 19, (22)\n(0), 1, 4, 5, 6, 7, 10, 12, 15, 16, 19, (22)\n(0), 1, 4, 5, 7, 10, 11, 12, 15, 16, 19, (22)\n(0), 1, 4, 5, 7, 10, 12, 15, 16, 19, (22)\n(0), 1, 4, 6, 7, 10, 11, 12, 15, 16, 19, (22)\n(0), 1, 4, 6, 7, 10, 12, 15, 16, 19, (22)\n(0), 1, 4, 7, 10, 11, 12, 15, 16, 19, (22)\n(0), 1, 4, 7, 10, 12, 15, 16, 19, (22)\n\n```\n\n(The charging outlet and your device's built-in adapter are shown in parentheses.) Given the adapters from the first example, the total number of arrangements that connect the charging outlet to your device is **`8`**.\n\nThe second example above (the one that starts with `28`, `33`, `18`) has many arrangements. Here are a few:\n\n```\n(0), 1, 2, 3, 4, 7, 8, 9, 10, 11, 14, 17, 18, 19, 20, 23, 24, 25, 28, 31,\n32, 33, 34, 35, 38, 39, 42, 45, 46, 47, 48, 49, (52)\n\n(0), 1, 2, 3, 4, 7, 8, 9, 10, 11, 14, 17, 18, 19, 20, 23, 24, 25, 28, 31,\n32, 33, 34, 35, 38, 39, 42, 45, 46, 47, 49, (52)\n\n(0), 1, 2, 3, 4, 7, 8, 9, 10, 11, 14, 17, 18, 19, 20, 23, 24, 25, 28, 31,\n32, 33, 34, 35, 38, 39, 42, 45, 46, 48, 49, (52)\n\n(0), 1, 2, 3, 4, 7, 8, 9, 10, 11, 14, 17, 18, 19, 20, 23, 24, 25, 28, 31,\n32, 33, 34, 35, 38, 39, 42, 45, 46, 49, (52)\n\n(0), 1, 2, 3, 4, 7, 8, 9, 10, 11, 14, 17, 18, 19, 20, 23, 24, 25, 28, 31,\n32, 33, 34, 35, 38, 39, 42, 45, 47, 48, 49, (52)\n\n(0), 3, 4, 7, 10, 11, 14, 17, 20, 23, 25, 28, 31, 34, 35, 38, 39, 42, 45,\n46, 48, 49, (52)\n\n(0), 3, 4, 7, 10, 11, 14, 17, 20, 23, 25, 28, 31, 34, 35, 38, 39, 42, 45,\n46, 49, (52)\n\n(0), 3, 4, 7, 10, 11, 14, 17, 20, 23, 25, 28, 31, 34, 35, 38, 39, 42, 45,\n47, 48, 49, (52)\n\n(0), 3, 4, 7, 10, 11, 14, 17, 20, 23, 25, 28, 31, 34, 35, 38, 39, 42, 45,\n47, 49, (52)\n\n(0), 3, 4, 7, 10, 11, 14, 17, 20, 23, 25, 28, 31, 34, 35, 38, 39, 42, 45,\n48, 49, (52)\n\n```\n\nIn total, this set of adapters can connect the charging outlet to your device in **`19208`** distinct arrangements.\n\nYou glance back down at your bag and try to remember why you brought so many adapters; there must be **more than a trillion** valid ways to arrange them! Surely, there must be an efficient way to count the arrangements.\n\n**What is the total number of distinct ways you can arrange the adapters to connect the charging outlet to your device?**",
"_____no_output_____"
]
],
[
[
"class AdapterArray2(AdapterArray): \n \n def count_all_arrangements(self, output=False):\n arrangements_list = [1] \n for a_index in range(1,len(self.adapter_list)):\n adapter = self.adapter_list[a_index]\n arrangements = 0\n for pa_index in range(a_index):\n prev_adapter = self.adapter_list[pa_index]\n jdiff = adapter - prev_adapter\n if jdiff <= 3:\n arrangements += arrangements_list[pa_index]\n \n arrangements_list.append(arrangements) \n \n all_arrangements = arrangements_list[-1]\n if output:\n display(Markdown(f'### Total possible ways to arrange the adapters: `{all_arrangements}`'))\n return all_arrangements\n\n\nclass TestBasic(unittest.TestCase):\n \n def test_puzzle2(self):\n input_data = ['28\\n33\\n18\\n42\\n31\\n14\\n46\\n20\\n48\\n47\\n24\\n23\\n49\\n45\\n19\\n38\\n39\\n11\\n1\\n32\\n25\\n35\\n8\\n17\\n7\\n9\\n4\\n2\\n34\\n10\\n3','16\\n10\\n15\\n5\\n1\\n11\\n7\\n19\\n6\\n12\\n4']\n exp_output = [19208, 8]\n for in_data, exp_out in tuple(zip(input_data, exp_output)):\n aa = AdapterArray2(in_data)\n self.assertEqual(aa.count_all_arrangements(), exp_out)\n \nunittest.main(argv=[\"\"], exit=False)",
".\n----------------------------------------------------------------------\nRan 1 test in 0.001s\n\nOK\n"
],
[
"aa = AdapterArray2(\"input/d10.txt\")\naa.count_all_arrangements(output=True)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
d04a0cd3822bfc3ef5d46ddc0ea2636d9829bc6b | 341,746 | ipynb | Jupyter Notebook | 05 Using PCSE WOFOST with a CGMS8 database.ipynb | CHNEWUAF/pcse_notebooks | 73092bfd4ae56a9b67a65efbf149ea8604385e12 | [
"MIT"
] | null | null | null | 05 Using PCSE WOFOST with a CGMS8 database.ipynb | CHNEWUAF/pcse_notebooks | 73092bfd4ae56a9b67a65efbf149ea8604385e12 | [
"MIT"
] | null | null | null | 05 Using PCSE WOFOST with a CGMS8 database.ipynb | CHNEWUAF/pcse_notebooks | 73092bfd4ae56a9b67a65efbf149ea8604385e12 | [
"MIT"
] | 1 | 2021-02-20T09:15:40.000Z | 2021-02-20T09:15:40.000Z | 610.260714 | 162,060 | 0.942943 | [
[
[
"<img style=\"float: right;\" src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOIAAAAjCAYAAACJpNbGAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAABR0RVh0Q3JlYXRpb24gVGltZQAzLzcvMTNND4u/AAAAHHRFWHRTb2Z0d2FyZQBBZG9iZSBGaXJld29ya3MgQ1M26LyyjAAACMFJREFUeJztnD1y20gWgD+6nJtzAsPhRqKL3AwqwQdYDpXDZfoEppNNTaWbmD7BUEXmI3EPMFCR2YI1UDQpdAPqBNzgvRZA/BGUZEnk9FeFIgj0z2ugX7/XP+jGer2mLv/8b6d+4Efgf/8KG0+Zn8XyXLx+bgEslqegcfzxSY3Irrx6bgEsFssBWsRGowGufwHAYtq7u+H6fUCOxTTWax4wBAbr+SRqNDKesOv3gN/133sW0yh927j1mucIaFWINl7PJ+OcvMcfW8Bol3iN44+mLIOsTCp3UJFfAETr+WRQcG8EOJpunEnTyDlYzycbeWr5xxq3jOF6PglK8ix9buv5xCsrAzBkMV1l5OwD/aJ4BXzV3+8F9z4gz/hTSbz8cxc84FuNvDc4VIsYA7+qohmGwAnycA194G22YqUYlZxv4vpN4AuwBv4oON5m8k3TVLnK4sYFcRyN86dWvCwnlCvFCeUVvwX8CkSZZ5eWs5mLJWE/VZThBMgpfirPk5J4f1SU4QsQ6LNP4+j9OkSUKdRiGlD87CWe3PcyR5PFdAhc1cz/joOziMoIeVF95GX1EGVY6bWhvsAeZQrm+kON80PDneD6PRbTi4LQpmJfsZieFaR1qXlXURh3y2BaBPyG63sspv0t6e+CKJTrf2YxHe8Qr6z8AXBdGbMoHgCTshgr4AiItfxljenPJGv5roCi+rGVw1TExTTWl99ThRsglfYHUnF7SMv+Bhjn4idxbhFLGiAu6gjXD3LuUBF5VzWi3CoAfMP1kxe7mNYZMT5DLFgf13eAXi3ZtvMOsUb3V3J5/mmqy+/66RbnTC1LFdfIu/kd8Qx2bTQeg2GBTPfiUF1TgHNE0QaIq/JDX9RKr/WBy/V8EhfEHWncWMO2EKV8S7UypYnYdE2r+o8gyj5MHXVYsZh+JnG7A+3LPQxR5g9II/UJ148ockmrybqm2+Qapo6gppwB8J7EM6jqaz8u0lhfkXgB58BKPam6rvEdh2kRARbTMa7/HXEfVqnW8hxxWwE+5+JJRTYd9CM90gxw/XFuMKMo/yTNDzUkLnbr6rCYnuH6N8igQ3CvNPJproDPuH6MKMd4Z5kMUjnrh98tn1if72/Ie729Vzq708L0YV3/HGmgB4iHsjOProhhd1lrEr4zaz/FvM4lolTnqWum/6jKmeuDmFb1jHylNg96hPQbhcU0wPVBXESvQI4W5aNshsK4jeOPhSOcOaThMVb48dhU8m2UlR+29ZHzrqyhLL0EaTROteGt67EYIsT6F1HXC/ikcvS00dl51PRwLaIwQtzCxGWRFnRMkT8v/SyAy8I+iliHJtDUsHHq7imipE42GtJanxdcB6mgQcm9MmKNs1m5F9MI13+n+cXZSEpAeV8mQgZqNkmU/HsuT7kf4PrGhXcK0h1SXv7iPKsJKCrDYvoV17+meMqhiDFlll7GEb4U3iseAf+k7mqksmU9qUoaj73E7TEtol3iZnks7Moai8WylUN3TS0WANbzyYv2rqxFtFheANYi7iGNRoPOrO2QGTQIu8vhU8vSmbWNDAHQD7vLYWfWbgFx2F3ee3FBZ9ZuIgMpTWAQdpeRXm9pPoPOrD3UMCtkQM4BRmF3ubG6ZZdxkOfCWsT9pU96CuX56KfOjeIFVC8Ar8NI0xuyOQJsVkWl8xzptQGPNY/6xFiLuL+0gIu0FVTrNESmbK7C7tLrzNpmPW0EeGF32UyFN19UnCAT4ZHGWWnYqDNrB4jViZBK/kbD9sLuMiBZSD8AVp1Z+0LD/NmZta+BIzOS3pm1xwBhd9kvkeEGUbQeqSmIdHhkXnGs5fIQRUxPV1x0Zm2zMuoq7C69rU/yBWAt4v7iAd86s/ZaDweZP+wBvwBOZ9b2SCrrmPzk+AWizA09j1QxMK4gZumcWKUWMvkdA56mfxN2l7GmHWk6V2F32Qi7yxaIsmnYHvkJ9zEQqAwBotQXwK2m0c+EN/Kk8zPTZiOkIWrp/xNTnpeOtYh7iFauN+k5W+0vXab6UsbyecAw229SxWiG3aVZ7NBCKrGHuneazy2iyBeIuxkjk9UDE1bzOtJ4IzbdwysNN0D6dnf9Rk3/iKSBWOnhUbASSWW+DbvLWM+HKreZ3O/r77gza5u842w6LxFrEfcTj+Jv3mK4q7Co63hE+fI6E94hUaT0cry+XushSuvoNZO2CdsCrlXJHDYVMUIUJso2BmhfL+wuV6rMvVR6AXnS1428XupaE7Hwnrqkg4cMGD0lr3NfpVegrUw1m2sN0+crNirEX1uTqiPbPoyI/QSKKmqA9I9aer+fcR2zxIj7GiMV+EYVIkZc3r5eH2rYI+0vnpBYIE/vGwUCdYM7s3agbqXJu58VIOwug86sfd2ZtSPNKwi7S9PHy4UnscCmXKuUZQRdsqbPwCHp2754pKYnW0akcZBO/x2df29XnvA//6iV8T3TSluBmOQlR+v5JNvaHixlDZRalRZifbZaAg3vIIrkmP6YVu6owI1M9x2r0vVIFCBGXNLS96Ph45IGY2ey6e1DY20UMaLGItUXoIhVvCv5tvDg2MWLqYNaoKBKWe6Z7gBR8OwAzZOyD4poBmtidlwt/gIxw/QHz0+oWKIoj19fRz8p3YOjoV8195F5l31ltZ5PfnluISyW+/IK6SPstRIiH/FaLHvLa2R+6F6f978AVsD7v0vf0HK4vNK9VfbVojSBceP4o/PcglgsD8GMmjaRbRCc1PEQIrbv45nlIfleIrs778XkrcWSZXMcXPZyqbvfxy7ckuyqHJPslJzH9c3We2ZRbx1O/07ziJbDI1FE2Qwp4n4DNzHJhkZF16+3bnwrCmi40U2eWoj7KZvobn7+YtKO1vPJVyyWPSZrER1kNU0TqfienpvlaWZR7oX+3tba6lxcX7MK3tNfo2RlpNc8tthsIFbAKYtpsA+TtRbLNp5/H4/EFXX0MOfbOGUxvbCKaDkEnl8Rq0jc1ayFjhFFjKwiWg6B/wNk+JCXXNBIXQAAAABJRU5ErkJggg==\">\n",
"_____no_output_____"
],
[
"\n# An Jupyter notebook for running PCSE/WOFOST on a CGMS8 database\n\nThis Jupyter notebook will demonstrate how to connect and read data from a CGMS8 database for a single grid. Next the data will be used to run a PCSE/WOFOST simulation for potential and water-limited conditions, the latter is done for all soil types present in the selected grid. Results are visualized and exported to an Excel file.\n\nNote that no attempt is made to *write* data to a CGMS8 database as writing data to a CGMS database can be tricky and slow. In our experience it is better to first dump simulation results to a CSV file and use specialized loading tools for loading data into the database such as [SQLLoader](http://www.oracle.com/technetwork/database/enterprise-edition/sql-loader-overview-095816.html) for ORACLE or [pgloader](http://pgloader.io/) for PostgreSQL databases.\n\nA dedicated package is now available for running WOFOST simulations using a CGMS database: [pyCGMS](https://github.com/ajwdewit/pycgms). The steps demonstrated in this notebook are implemented in as well in the pyCGMS package which provides a nicer interface to run simulations using a CGMS database.\n\n**Prerequisites for running this notebook**\n\nSeveral packages need to be installed for running PCSE/WOFOST on a CGMS8 database:\n\n 1. PCSE and its dependencies. See the [PCSE user guide](http://pcse.readthedocs.io/en/stable/installing.html) for more information;\n 2. The database client software for the database that will be used, this depends on your database of choice. For SQLite no client software is needed as it is included with python. For Oracle you will need the [Oracle client software](http://www.oracle.com/technetwork/database/features/instant-client/index-097480.html) as well as the [python bindings for the Oracle client (cx_Oracle)](http://sourceforge.net/projects/cx-oracle/files/)). See [here](https://wiki.python.org/moin/DatabaseInterfaces) for an overview of database connectors for python;\n 3. The `pandas` module for processing and visualizing WOFOST output;\n 4. The `matplotlib` module, although we will mainly use it through pandas; ",
"_____no_output_____"
],
[
"## Importing the relevant modules\n\nFirst the required modules need to be imported. These include the CGMS8 data providers for PCSE as well as other relevant modules.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport os, sys\ndata_dir = os.path.join(os.getcwd(), \"data\")\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\nimport sqlalchemy as sa\nimport pandas as pd\n\nimport pcse\nfrom pcse.db.cgms8 import GridWeatherDataProvider, AgroManagementDataProvider, SoilDataIterator, \\\n CropDataProvider, STU_Suitability, SiteDataProvider\nfrom pcse.models import Wofost71_WLP_FD, Wofost71_PP\nfrom pcse.util import DummySoilDataProvider, WOFOST71SiteDataProvider\nfrom pcse.base_classes import ParameterProvider\n\nprint(\"This notebook was built with:\")\nprint(\"python version: %s \" % sys.version)\nprint(\"PCSE version: %s\" % pcse.__version__)",
"This notebook was built with:\npython version: 3.6.7 |Anaconda, Inc.| (default, Oct 24 2018, 09:45:24) [MSC v.1912 64 bit (AMD64)] \nPCSE version: 5.4.1\n"
]
],
[
[
"## Building the connection to a CGMS8 database\n\nThe connection to the database will be made using SQLAlchemy. This requires a database URL to be provided, the format of this URL depends on the database of choice. See the SQLAlchemy documentation on [database URLs](http://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls) for the different database URL formats.\n\nFor this example we will use a database that was created for Anhui province in China. This database can be downloaded [here](https://wageningenur4-my.sharepoint.com/:u:/g/personal/allard_dewit_wur_nl/EdwuayKW2IhOp6zCYElA0zsB3NGxcKjZc2zE_JGfVPv89Q?e=oEgI9R).",
"_____no_output_____"
]
],
[
[
"cgms8_db = \"d:/nobackup/CGMS8_Anhui/CGMS_Anhui_complete.db\"\ndbURL = \"sqlite:///%s\" % cgms8_db\nengine = sa.create_engine(dbURL)",
"_____no_output_____"
]
],
[
[
"## Defining what should be simulated\n\nFor the simulation to run, some IDs must be provided that refer to the location (`grid_no`), crop type (`crop_no`) and year (`campaign_year`) for which the simulation should be carried out. These IDs refer to columns in the CGMS database that are used to define the relationships.",
"_____no_output_____"
]
],
[
[
"grid_no = 81159\ncrop_no = 1 # Winter-wheat\ncampaign_year = 2008\n# if input/output should be printed set show_results=True\nshow_results = True",
"_____no_output_____"
]
],
[
[
"## Retrieving data for the simulation from the database\n\n### Weather data\n\nWeather data will be derived from the GRID_WEATHER table in the database. By default, the entire time-series of weather data available for this grid cell will be fetched from the database.",
"_____no_output_____"
]
],
[
[
"weatherdata = GridWeatherDataProvider(engine, grid_no)\nprint(weatherdata)",
"Weather data provided by: GridWeatherDataProvider\n--------Description---------\nWeather data derived for grid_no: 81159\n----Site characteristics----\nElevation: 29.0\nLatitude: 33.170\nLongitude: 116.334\nData available for 1990-01-01 - 2013-11-03\nNumber of missing days: 0\n\n"
]
],
[
[
"### Agromanagement information\n\nAgromanagement in CGMS mainly refers to the cropping calendar for the given crop and location.",
"_____no_output_____"
]
],
[
[
"agromanagement = AgroManagementDataProvider(engine, grid_no, crop_no, campaign_year)\nagromanagement",
"_____no_output_____"
]
],
[
[
"### Soil information\n\nA CGMS grid cell can contain multiple soils which may or may not be suitable for a particular crop. Moreover, a complicating factor is the arrangement of soils in many soil maps which consist of *Soil Mapping Units* `(SMUs)` which are soil associations whose location on the map is known. Within an SMU, the actual soil types are known as *Soil Typological Units* `(STUs)` whose spatial delination is not known, only the percentage area within the SMU is known. \n\nTherefore, fetching soil information works in two steps:\n 1. First of all the `SoilDataIterator` will fetch all soil information for the given grid cell. It presents it as a list which contains all the SMUs that are present in the grid cell with their internal STU representation. The soil information is organized in such a way that the system can iterate over the different soils including information on soil physical properties as well as SMU area and STU percentage with the SMU.\n 2. Second, the `STU_Suitability` will contain all soils that are suitable for a given crop. The 'STU_NO' of each crop can be used to check if a particular STU is suitable for that crop.\n \nThe example grid cell used here only contains a single SMU/STU combination.",
"_____no_output_____"
]
],
[
[
"soil_iterator = SoilDataIterator(engine, grid_no)\nsoil_iterator",
"_____no_output_____"
],
[
"suitable_stu = STU_Suitability(engine, crop_no)",
"_____no_output_____"
]
],
[
[
"### Crop parameters\n\nCrop parameters are needed for parameterizing the crop simulation model. The `CropDataProvider` will retrieve them from the database for the given crop_no, grid_no and campaign_year.",
"_____no_output_____"
]
],
[
[
"cropd = CropDataProvider(engine, grid_no, crop_no, campaign_year)\nif show_results:\n print(cropd)",
"Crop parameter values for grid_no=81159, crop_no=1 (winter wheat), variety_no=55, campaign_year=2008 derived from sqlite:///d:/nobackup/CGMS8_Anhui/CGMS_Anhui_complete.db\n{'CFET': 1.0, 'CVL': 0.685, 'CVO': 0.709, 'CVR': 0.694, 'CVS': 0.662, 'DEPNR': 4.5, 'DLC': 10.0, 'DLO': 13.5, 'DVSEND': 2.0, 'EFFTB': [0.0, 0.45, 40.0, 0.45], 'IAIRDU': 0.0, 'IDSL': 1.0, 'KDIFTB': [0.0, 0.6, 2.0, 0.6], 'LAIEM': 0.138, 'PERDL': 0.03, 'Q10': 2.0, 'RDI': 10.0, 'RDMCR': 125.0, 'RGRLAI': 0.0082, 'RML': 0.03, 'RMO': 0.01, 'RMR': 0.015, 'RMS': 0.015, 'RRI': 1.2, 'SPA': 0.0, 'SPAN': 23.5, 'SSATB': [0.0, 0.0, 2.0, 0.0], 'TBASE': 0.0, 'TBASEM': 0.0, 'TDWI': 195.0, 'TEFFMX': 30.0, 'TSUM1': 794.0, 'TSUM2': 715.0, 'TSUMEM': 100.0, 'AMAXTB': [0.0, 29.4766, 1.0, 29.4766, 1.3, 29.4766, 2.0, 3.6856, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'DTSMTB': [0.0, 0.0, 25.0, 25.0, 45.0, 25.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'FLTB': [0.0, 0.65, 0.1, 0.65, 0.25, 0.7, 0.5, 0.5, 0.646, 0.3, 0.95, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'FOTB': [0.0, 0.0, 0.95, 0.0, 1.0, 1.0, 2.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'FRTB': [0.0, 0.5, 0.1, 0.5, 0.2, 0.4, 0.35, 0.22, 0.4, 0.17, 0.5, 0.13, 0.7, 0.07, 0.9, 0.03, 1.2, 0.0, 2.0, 0.0], 'FSTB': [0.0, 0.35, 0.1, 0.35, 0.25, 0.3, 0.5, 0.5, 0.646, 0.7, 0.95, 1.0, 1.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'RDRRTB': [0.0, 0.0, 1.5, 0.0, 1.5001, 0.02, 2.0, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'RDRSTB': [0.0, 0.0, 1.5, 0.0, 1.5001, 0.02, 2.0, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'RFSETB': [0.0, 1.0, 2.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'SLATB': [0.0, 0.0021, 0.5, 0.0015, 2.0, 0.0015, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'TMNFTB': [-5.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'TMPFTB': [0.0, 0.01, 10.0, 0.6, 15.0, 1.0, 25.0, 1.0, 35.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'VERNRTB': [], 'DVSI': 0.0, 'IOX': 0, 'CRPNAM': 'winter wheat'}\n"
]
],
[
[
"### Site parameters\n\nSite parameters are an ancillary class of parameters that are related to a given site. For example, an important parameter is the initial amount of moisture in the soil profile (WAV) and the Atmospheric CO$_2$ concentration (CO2). Site parameters will be fetched for each soil type within the soil iteration loop.",
"_____no_output_____"
],
[
"## Simulating with WOFOST\n\n### Place holders for storing simulation results",
"_____no_output_____"
]
],
[
[
"daily_results = {}\nsummary_results = {}",
"_____no_output_____"
]
],
[
[
"### Potential production",
"_____no_output_____"
]
],
[
[
"# For potential production we can provide site data directly\nsited = WOFOST71SiteDataProvider(CO2=360, WAV=25)\n# We do not need soildata for potential production so we provide some dummy values here\nsoild = DummySoilDataProvider()\n# Start WOFOST, run the simulation \nparameters = ParameterProvider(sitedata=sited, soildata=soild, cropdata=cropd)\nwofost = Wofost71_PP(parameters, weatherdata, agromanagement)\nwofost.run_till_terminate()\n# convert output to Pandas DataFrame and store it\ndaily_results['Potential'] = pd.DataFrame(wofost.get_output()).set_index(\"day\")\nsummary_results['Potential'] = wofost.get_summary_output()",
"_____no_output_____"
]
],
[
[
"### Water-limited production\n\nWater-limited simulations will be carried out for each soil type. First we will check that the soil type is suitable. Next we will retrieve the site data and run the simulation. Finally, we will collect the output and store the results. ",
"_____no_output_____"
]
],
[
[
"for smu_no, area, stu_no, percentage, soild in soil_iterator:\n # Check if this is a suitable STU\n if stu_no not in suitable_stu:\n continue\n # retrieve the site data for this soil type\n sited = SiteDataProvider(engine, grid_no, crop_no, campaign_year, stu_no)\n # Start WOFOST, run the simulation \n parameters = ParameterProvider(sitedata=sited, soildata=soild, cropdata=cropd)\n wofost = Wofost71_WLP_FD(parameters, weatherdata, agromanagement)\n wofost.run_till_terminate()\n # Store simulation results\n runid = \"smu_%s-stu_%s\" % (smu_no, stu_no)\n daily_results[runid] = pd.DataFrame(wofost.get_output()).set_index(\"day\")\n summary_results[runid] = wofost.get_summary_output()",
"_____no_output_____"
]
],
[
[
"## Visualizing and exporting simulation results\n\n### We can visualize the simulation results using pandas and matplotlib",
"_____no_output_____"
]
],
[
[
"# Generate a figure with 10 subplots\nfig, axes = plt.subplots(nrows=5, ncols=2, figsize=(12, 30))\n# Plot results\nfor runid, results in daily_results.items():\n for var, ax in zip(results, axes.flatten()):\n results[var].plot(ax=ax, title=var, label=runid)\n ax.set_title(var)\nfig.autofmt_xdate()\naxes[0][0].legend(loc='upper left')",
"_____no_output_____"
]
],
[
[
"### Exporting the simulation results\n\nA pandas DataFrame or panel can be easily export to a [variety of formats](http://pandas.pydata.org/pandas-docs/stable/io.html) including CSV, Excel or HDF5. First we convert the results to a Panel, next we will export to an Excel file. ",
"_____no_output_____"
]
],
[
[
"excel_fname = os.path.join(data_dir, \"output\", \"cgms8_wofost_results.xls\")\npanel = pd.Panel(daily_results)\npanel.to_excel(excel_fname)",
"_____no_output_____"
]
],
[
[
"## Simulating with a different start date waterbalance\nBy default CGMS starts the simulation when the crop is planted. Particularly in dry climates this can be problematic because the results become very sensitive to the initial value of the soil water balance. In such scenarios, it is more realistic to start the water balance with a dry soil profile well before the crop is planted and let the soil 'fill up' as a result of rainfall. \n\nTo enable this option, the column `GIVEN_STARTDATE_WATBAL` in the table `INITIAL_SOIL_WATER` should be set to the right starting date for each grid_no, crop_no, year and stu_no. Moreover, the other parameters in the table should be set to the appropriate values (particularly the initial soil moisture `WAV`).\n\nThe start date of the water balance should then be used to update the agromanagement data during the simulation loop, see the example below.",
"_____no_output_____"
]
],
[
[
"for smu_no, area, stu_no, percentage, soild in soil_iterator:\n # Check if this is a suitable STU\n if stu_no not in suitable_stu:\n continue\n # retrieve the site data for this soil type\n sited = SiteDataProvider(engine, grid_no, crop_no, campaign_year, stu_no)\n # update the campaign start date in the agromanagement data\n agromanagement.set_campaign_start_date(sited.start_date_waterbalance)\n # Start WOFOST, run the simulation \n parameters = ParameterProvider(sitedata=sited, soildata=soild, cropdata=cropd)\n wofost = Wofost71_WLP_FD(parameters, weatherdata, agromanagement)\n wofost.run_till_terminate()\n # Store simulation results\n runid = \"smu_%s-stu_%s\" % (smu_no, stu_no)\n daily_results[runid] = pd.DataFrame(wofost.get_output()).set_index(\"day\")\n summary_results[runid] = wofost.get_summary_output()",
"_____no_output_____"
]
],
[
[
"## Let's show the results \n\nAs you can see, the results from the simulation are slightly different because of a different start date of the water balance.\n\nNOTE: the dates on the x-axis are the same except for the soil moisture chart 'SM' where the water-limited simulation results start before potential results. This is a matplotlib problem.",
"_____no_output_____"
]
],
[
[
"# Generate a figure with 10 subplots\nfig, axes = plt.subplots(nrows=5, ncols=2, figsize=(12, 30))\n# Plot results\nfor runid, results in daily_results.items():\n for var, ax in zip(results, axes.flatten()):\n results[var].plot(ax=ax, title=var, label=runid)\nfig.autofmt_xdate()\naxes[0][0].legend(loc='upper left')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d04a1939a641831d030bcbe2675ecf9d10ed5fae | 16,121 | ipynb | Jupyter Notebook | 7_nlp_sentiment_transformer/7-6_Transformer.ipynb | Jinx-git/pytorch_advanced | 63a5bcdb03b7861b1edfa82f6ed7d40392ab25f6 | [
"MIT"
] | 1 | 2020-06-08T04:34:36.000Z | 2020-06-08T04:34:36.000Z | 7_nlp_sentiment_transformer/7-6_Transformer.ipynb | Jinx-git/pytorch_advanced | 63a5bcdb03b7861b1edfa82f6ed7d40392ab25f6 | [
"MIT"
] | null | null | null | 7_nlp_sentiment_transformer/7-6_Transformer.ipynb | Jinx-git/pytorch_advanced | 63a5bcdb03b7861b1edfa82f6ed7d40392ab25f6 | [
"MIT"
] | 2 | 2020-01-09T07:56:54.000Z | 2020-02-19T05:27:06.000Z | 29.417883 | 100 | 0.485516 | [
[
[
"# 7.6 Transformerモデル(分類タスク用)の実装\n\n- 本ファイルでは、クラス分類のTransformerモデルを実装します。\n\n",
"_____no_output_____"
],
[
"※ 本章のファイルはすべてUbuntuでの動作を前提としています。Windowsなど文字コードが違う環境での動作にはご注意下さい。",
"_____no_output_____"
],
[
"# 7.6 学習目標\n\n1.\tTransformerのモジュール構成を理解する\n2.\tLSTMやRNNを使用せずCNNベースのTransformerで自然言語処理が可能な理由を理解する\n3.\tTransformerを実装できるようになる\n\n",
"_____no_output_____"
],
[
"# 事前準備\n書籍の指示に従い、本章で使用するデータを用意します\n",
"_____no_output_____"
]
],
[
[
"import math\nimport numpy as np\nimport random\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F \nimport torchtext",
"_____no_output_____"
],
[
"# Setup seeds\ntorch.manual_seed(1234)\nnp.random.seed(1234)\nrandom.seed(1234)",
"_____no_output_____"
],
[
"class Embedder(nn.Module):\n '''idで示されている単語をベクトルに変換します'''\n\n def __init__(self, text_embedding_vectors):\n super(Embedder, self).__init__()\n\n self.embeddings = nn.Embedding.from_pretrained(\n embeddings=text_embedding_vectors, freeze=True)\n # freeze=Trueによりバックプロパゲーションで更新されず変化しなくなります\n\n def forward(self, x):\n x_vec = self.embeddings(x)\n\n return x_vec\n",
"_____no_output_____"
],
[
"# 動作確認\n\n# 前節のDataLoaderなどを取得\nfrom utils.dataloader import get_IMDb_DataLoaders_and_TEXT\ntrain_dl, val_dl, test_dl, TEXT = get_IMDb_DataLoaders_and_TEXT(\n max_length=256, batch_size=24)\n\n# ミニバッチの用意\nbatch = next(iter(train_dl))\n\n# モデル構築\nnet1 = Embedder(TEXT.vocab.vectors)\n\n# 入出力\nx = batch.Text[0]\nx1 = net1(x) # 単語をベクトルに\n\nprint(\"入力のテンソルサイズ:\", x.shape)\nprint(\"出力のテンソルサイズ:\", x1.shape)\n",
"入力のテンソルサイズ: torch.Size([24, 256])\n出力のテンソルサイズ: torch.Size([24, 256, 300])\n"
],
[
"class PositionalEncoder(nn.Module):\n '''入力された単語の位置を示すベクトル情報を付加する'''\n\n def __init__(self, d_model=300, max_seq_len=256):\n super().__init__()\n\n self.d_model = d_model # 単語ベクトルの次元数\n\n # 単語の順番(pos)と埋め込みベクトルの次元の位置(i)によって一意に定まる値の表をpeとして作成\n pe = torch.zeros(max_seq_len, d_model)\n\n # GPUが使える場合はGPUへ送る、ここでは省略。実際に学習時には使用する\n # device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n # pe = pe.to(device)\n\n for pos in range(max_seq_len):\n for i in range(0, d_model, 2):\n pe[pos, i] = math.sin(pos / (10000 ** ((2 * i)/d_model)))\n pe[pos, i + 1] = math.cos(pos /\n (10000 ** ((2 * (i + 1))/d_model)))\n\n # 表peの先頭に、ミニバッチ次元となる次元を足す\n self.pe = pe.unsqueeze(0)\n\n # 勾配を計算しないようにする\n self.pe.requires_grad = False\n\n def forward(self, x):\n\n # 入力xとPositonal Encodingを足し算する\n # xがpeよりも小さいので、大きくする\n ret = math.sqrt(self.d_model)*x + self.pe\n return ret\n",
"_____no_output_____"
],
[
"# 動作確認\n\n# モデル構築\nnet1 = Embedder(TEXT.vocab.vectors)\nnet2 = PositionalEncoder(d_model=300, max_seq_len=256)\n\n# 入出力\nx = batch.Text[0]\nx1 = net1(x) # 単語をベクトルに\nx2 = net2(x1)\n\nprint(\"入力のテンソルサイズ:\", x1.shape)\nprint(\"出力のテンソルサイズ:\", x2.shape)\n",
"入力のテンソルサイズ: torch.Size([24, 256, 300])\n出力のテンソルサイズ: torch.Size([24, 256, 300])\n"
],
[
"class Attention(nn.Module):\n '''Transformerは本当はマルチヘッドAttentionですが、\n 分かりやすさを優先しシングルAttentionで実装します'''\n\n def __init__(self, d_model=300):\n super().__init__()\n\n # SAGANでは1dConvを使用したが、今回は全結合層で特徴量を変換する\n self.q_linear = nn.Linear(d_model, d_model)\n self.v_linear = nn.Linear(d_model, d_model)\n self.k_linear = nn.Linear(d_model, d_model)\n\n # 出力時に使用する全結合層\n self.out = nn.Linear(d_model, d_model)\n\n # Attentionの大きさ調整の変数\n self.d_k = d_model\n\n def forward(self, q, k, v, mask):\n # 全結合層で特徴量を変換\n k = self.k_linear(k)\n q = self.q_linear(q)\n v = self.v_linear(v)\n\n # Attentionの値を計算する\n # 各値を足し算すると大きくなりすぎるので、root(d_k)で割って調整\n weights = torch.matmul(q, k.transpose(1, 2)) / math.sqrt(self.d_k)\n\n # ここでmaskを計算\n mask = mask.unsqueeze(1)\n weights = weights.masked_fill(mask == 0, -1e9)\n\n # softmaxで規格化をする\n normlized_weights = F.softmax(weights, dim=-1)\n\n # AttentionをValueとかけ算\n output = torch.matmul(normlized_weights, v)\n\n # 全結合層で特徴量を変換\n output = self.out(output)\n\n return output, normlized_weights\n",
"_____no_output_____"
],
[
"class FeedForward(nn.Module):\n def __init__(self, d_model, d_ff=1024, dropout=0.1):\n '''Attention層から出力を単純に全結合層2つで特徴量を変換するだけのユニットです'''\n super().__init__()\n\n self.linear_1 = nn.Linear(d_model, d_ff)\n self.dropout = nn.Dropout(dropout)\n self.linear_2 = nn.Linear(d_ff, d_model)\n\n def forward(self, x):\n x = self.linear_1(x)\n x = self.dropout(F.relu(x))\n x = self.linear_2(x)\n return x\n",
"_____no_output_____"
],
[
"class TransformerBlock(nn.Module):\n def __init__(self, d_model, dropout=0.1):\n super().__init__()\n\n # LayerNormalization層\n # https://pytorch.org/docs/stable/nn.html?highlight=layernorm\n self.norm_1 = nn.LayerNorm(d_model)\n self.norm_2 = nn.LayerNorm(d_model)\n\n # Attention層\n self.attn = Attention(d_model)\n\n # Attentionのあとの全結合層2つ\n self.ff = FeedForward(d_model)\n\n # Dropout\n self.dropout_1 = nn.Dropout(dropout)\n self.dropout_2 = nn.Dropout(dropout)\n\n def forward(self, x, mask):\n # 正規化とAttention\n x_normlized = self.norm_1(x)\n output, normlized_weights = self.attn(\n x_normlized, x_normlized, x_normlized, mask)\n \n x2 = x + self.dropout_1(output)\n\n # 正規化と全結合層\n x_normlized2 = self.norm_2(x2)\n output = x2 + self.dropout_2(self.ff(x_normlized2))\n\n return output, normlized_weights\n",
"_____no_output_____"
],
[
"# 動作確認\n\n# モデル構築\nnet1 = Embedder(TEXT.vocab.vectors)\nnet2 = PositionalEncoder(d_model=300, max_seq_len=256)\nnet3 = TransformerBlock(d_model=300)\n\n# maskの作成\nx = batch.Text[0]\ninput_pad = 1 # 単語のIDにおいて、'<pad>': 1 なので\ninput_mask = (x != input_pad)\nprint(input_mask[0])\n\n# 入出力\nx1 = net1(x) # 単語をベクトルに\nx2 = net2(x1) # Positon情報を足し算\nx3, normlized_weights = net3(x2, input_mask) # Self-Attentionで特徴量を変換\n\nprint(\"入力のテンソルサイズ:\", x2.shape)\nprint(\"出力のテンソルサイズ:\", x3.shape)\nprint(\"Attentionのサイズ:\", normlized_weights.shape)\n",
"tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=torch.uint8)\n入力のテンソルサイズ: torch.Size([24, 256, 300])\n出力のテンソルサイズ: torch.Size([24, 256, 300])\nAttentionのサイズ: torch.Size([24, 256, 256])\n"
],
[
"class ClassificationHead(nn.Module):\n '''Transformer_Blockの出力を使用し、最後にクラス分類させる'''\n\n def __init__(self, d_model=300, output_dim=2):\n super().__init__()\n\n # 全結合層\n self.linear = nn.Linear(d_model, output_dim) # output_dimはポジ・ネガの2つ\n\n # 重み初期化処理\n nn.init.normal_(self.linear.weight, std=0.02)\n nn.init.normal_(self.linear.bias, 0)\n\n def forward(self, x):\n x0 = x[:, 0, :] # 各ミニバッチの各文の先頭の単語の特徴量(300次元)を取り出す\n out = self.linear(x0)\n\n return out\n",
"_____no_output_____"
],
[
"# 動作確認\n\n# ミニバッチの用意\nbatch = next(iter(train_dl))\n\n# モデル構築\nnet1 = Embedder(TEXT.vocab.vectors)\nnet2 = PositionalEncoder(d_model=300, max_seq_len=256)\nnet3 = TransformerBlock(d_model=300)\nnet4 = ClassificationHead(output_dim=2, d_model=300)\n\n# 入出力\nx = batch.Text[0]\nx1 = net1(x) # 単語をベクトルに\nx2 = net2(x1) # Positon情報を足し算\nx3, normlized_weights = net3(x2, input_mask) # Self-Attentionで特徴量を変換\nx4 = net4(x3) # 最終出力の0単語目を使用して、分類0-1のスカラーを出力\n\nprint(\"入力のテンソルサイズ:\", x3.shape)\nprint(\"出力のテンソルサイズ:\", x4.shape)\n",
"入力のテンソルサイズ: torch.Size([24, 256, 300])\n出力のテンソルサイズ: torch.Size([24, 2])\n"
],
[
"# 最終的なTransformerモデルのクラス\n\n\nclass TransformerClassification(nn.Module):\n '''Transformerでクラス分類させる'''\n\n def __init__(self, text_embedding_vectors, d_model=300, max_seq_len=256, output_dim=2):\n super().__init__()\n\n # モデル構築\n self.net1 = Embedder(text_embedding_vectors)\n self.net2 = PositionalEncoder(d_model=d_model, max_seq_len=max_seq_len)\n self.net3_1 = TransformerBlock(d_model=d_model)\n self.net3_2 = TransformerBlock(d_model=d_model)\n self.net4 = ClassificationHead(output_dim=output_dim, d_model=d_model)\n\n def forward(self, x, mask):\n x1 = self.net1(x) # 単語をベクトルに\n x2 = self.net2(x1) # Positon情報を足し算\n x3_1, normlized_weights_1 = self.net3_1(\n x2, mask) # Self-Attentionで特徴量を変換\n x3_2, normlized_weights_2 = self.net3_2(\n x3_1, mask) # Self-Attentionで特徴量を変換\n x4 = self.net4(x3_2) # 最終出力の0単語目を使用して、分類0-1のスカラーを出力\n return x4, normlized_weights_1, normlized_weights_2\n",
"_____no_output_____"
],
[
"# 動作確認\n\n# ミニバッチの用意\nbatch = next(iter(train_dl))\n\n# モデル構築\nnet = TransformerClassification(\n text_embedding_vectors=TEXT.vocab.vectors, d_model=300, max_seq_len=256, output_dim=2)\n\n# 入出力\nx = batch.Text[0]\ninput_mask = (x != input_pad)\nout, normlized_weights_1, normlized_weights_2 = net(x, input_mask)\n\nprint(\"出力のテンソルサイズ:\", out.shape)\nprint(\"出力テンソルのsigmoid:\", F.softmax(out, dim=1))\n",
"出力のテンソルサイズ: torch.Size([24, 2])\n出力テンソルのsigmoid: tensor([[0.6980, 0.3020],\n [0.7318, 0.2682],\n [0.7244, 0.2756],\n [0.7135, 0.2865],\n [0.7022, 0.2978],\n [0.6974, 0.3026],\n [0.6831, 0.3169],\n [0.6487, 0.3513],\n [0.7096, 0.2904],\n [0.7221, 0.2779],\n [0.7213, 0.2787],\n [0.7046, 0.2954],\n [0.6738, 0.3262],\n [0.7069, 0.2931],\n [0.7217, 0.2783],\n [0.6837, 0.3163],\n [0.7011, 0.2989],\n [0.6944, 0.3056],\n [0.6860, 0.3140],\n [0.7183, 0.2817],\n [0.7256, 0.2744],\n [0.7288, 0.2712],\n [0.6678, 0.3322],\n [0.7253, 0.2747]], grad_fn=<SoftmaxBackward>)\n"
]
],
[
[
"ここまでの内容をフォルダ「utils」のtransformer.pyに別途保存しておき、次節からはこちらから読み込むようにします",
"_____no_output_____"
],
[
"以上",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
d04a1ba627962e1ce98e30bbc1526a9b87100d48 | 216,795 | ipynb | Jupyter Notebook | machine_learning/gan/cdcgan/tf_cdcgan/tf_cdcgan_run_module_local.ipynb | ryangillard/artificial_intelligence | f7c21af221f366b075d351deeeb00a1b266ac3e3 | [
"Apache-2.0"
] | 4 | 2019-07-04T05:15:59.000Z | 2020-06-29T19:34:33.000Z | machine_learning/gan/cdcgan/tf_cdcgan/tf_cdcgan_run_module_local.ipynb | ryangillard/artificial_intelligence | f7c21af221f366b075d351deeeb00a1b266ac3e3 | [
"Apache-2.0"
] | null | null | null | machine_learning/gan/cdcgan/tf_cdcgan/tf_cdcgan_run_module_local.ipynb | ryangillard/artificial_intelligence | f7c21af221f366b075d351deeeb00a1b266ac3e3 | [
"Apache-2.0"
] | 1 | 2019-05-23T16:06:51.000Z | 2019-05-23T16:06:51.000Z | 575.05305 | 203,624 | 0.944173 | [
[
[
"# Run model module locally",
"_____no_output_____"
]
],
[
[
"import os\n\n# Import os environment variables for file hyperparameters.\nos.environ[\"TRAIN_FILE_PATTERN\"] = \"gs://machine-learning-1234-bucket/gan/data/cifar10/train*.tfrecord\"\nos.environ[\"EVAL_FILE_PATTERN\"] = \"gs://machine-learning-1234-bucket/gan/data/cifar10/test*.tfrecord\"\nos.environ[\"OUTPUT_DIR\"] = \"gs://machine-learning-1234-bucket/gan/cdcgan/trained_model2\"\n\n# Import os environment variables for train hyperparameters.\nos.environ[\"TRAIN_BATCH_SIZE\"] = str(100)\nos.environ[\"TRAIN_STEPS\"] = str(50000)\nos.environ[\"SAVE_SUMMARY_STEPS\"] = str(100)\nos.environ[\"SAVE_CHECKPOINTS_STEPS\"] = str(5000)\nos.environ[\"KEEP_CHECKPOINT_MAX\"] = str(10)\nos.environ[\"INPUT_FN_AUTOTUNE\"] = \"False\"\n\n# Import os environment variables for eval hyperparameters.\nos.environ[\"EVAL_BATCH_SIZE\"] = str(16)\nos.environ[\"EVAL_STEPS\"] = str(10)\nos.environ[\"START_DELAY_SECS\"] = str(6000)\nos.environ[\"THROTTLE_SECS\"] = str(6000)\n\n# Import os environment variables for image hyperparameters.\nos.environ[\"HEIGHT\"] = str(32)\nos.environ[\"WIDTH\"] = str(32)\nos.environ[\"DEPTH\"] = str(3)\n\n# Import os environment variables for label hyperparameters.\nnum_classes = 10\nos.environ[\"NUM_CLASSES\"] = str(num_classes)\nos.environ[\"LABEL_EMBEDDING_DIMENSION\"] = str(10)\n\n# Import os environment variables for generator hyperparameters.\nos.environ[\"LATENT_SIZE\"] = str(512)\nos.environ[\"GENERATOR_PROJECTION_DIMS\"] = \"4,4,256\"\nos.environ[\"GENERATOR_USE_LABELS\"] = \"True\"\nos.environ[\"GENERATOR_EMBED_LABELS\"] = \"True\"\nos.environ[\"GENERATOR_CONCATENATE_LABELS\"] = \"True\"\nos.environ[\"GENERATOR_NUM_FILTERS\"] = \"128,128,128\"\nos.environ[\"GENERATOR_KERNEL_SIZES\"] = \"4,4,4\"\nos.environ[\"GENERATOR_STRIDES\"] = \"2,2,2\"\nos.environ[\"GENERATOR_FINAL_NUM_FILTERS\"] = str(3)\nos.environ[\"GENERATOR_FINAL_KERNEL_SIZE\"] = str(3)\nos.environ[\"GENERATOR_FINAL_STRIDE\"] = str(1)\nos.environ[\"GENERATOR_LEAKY_RELU_ALPHA\"] = str(0.2)\nos.environ[\"GENERATOR_FINAL_ACTIVATION\"] = \"tanh\"\nos.environ[\"GENERATOR_L1_REGULARIZATION_SCALE\"] = str(0.)\nos.environ[\"GENERATOR_L2_REGULARIZATION_SCALE\"] = str(0.)\nos.environ[\"GENERATOR_OPTIMIZER\"] = \"Adam\"\nos.environ[\"GENERATOR_LEARNING_RATE\"] = str(0.0002)\nos.environ[\"GENERATOR_ADAM_BETA1\"] = str(0.5)\nos.environ[\"GENERATOR_ADAM_BETA2\"] = str(0.999)\nos.environ[\"GENERATOR_ADAM_EPSILON\"] = str(1e-8)\nos.environ[\"GENERATOR_CLIP_GRADIENTS\"] = \"None\"\nos.environ[\"GENERATOR_TRAIN_STEPS\"] = str(1)\n\n# Import os environment variables for discriminator hyperparameters.\nos.environ[\"DISCRIMINATOR_USE_LABELS\"] = \"True\"\nos.environ[\"DISCRIMINATOR_EMBED_LABELS\"] = \"True\"\nos.environ[\"DISCRIMINATOR_CONCATENATE_LABELS\"] = \"True\"\nos.environ[\"DISCRIMINATOR_NUM_FILTERS\"] = \"64,128,128,256\"\nos.environ[\"DISCRIMINATOR_KERNEL_SIZES\"] = \"3,3,3,3\"\nos.environ[\"DISCRIMINATOR_STRIDES\"] = \"1,2,2,2\"\nos.environ[\"DISCRIMINATOR_DROPOUT_RATES\"] = \"0.3,0.3,0.3,0.3\"\nos.environ[\"DISCRIMINATOR_LEAKY_RELU_ALPHA\"] = str(0.2)\nos.environ[\"DISCRIMINATOR_L1_REGULARIZATION_SCALE\"] = str(0.)\nos.environ[\"DISCRIMINATOR_L2_REGULARIZATION_SCALE\"] = str(0.)\nos.environ[\"DISCRIMINATOR_OPTIMIZER\"] = \"Adam\"\nos.environ[\"DISCRIMINATOR_LEARNING_RATE\"] = str(0.0002)\nos.environ[\"DISCRIMINATOR_ADAM_BETA1\"] = str(0.5)\nos.environ[\"DISCRIMINATOR_ADAM_BETA2\"] = str(0.999)\nos.environ[\"DISCRIMINATOR_ADAM_EPSILON\"] = str(1e-8)\nos.environ[\"DISCRIMINATOR_CLIP_GRADIENTS\"] = \"None\"\nos.environ[\"DISCRIMINATOR_TRAIN_STEPS\"] = str(1)\nos.environ[\"LABEL_SMOOTHING\"] = str(0.9)\n",
"_____no_output_____"
]
],
[
[
"## Train cdcgan model",
"_____no_output_____"
]
],
[
[
"%%bash\ngsutil -m rm -rf ${OUTPUT_DIR}\nexport PYTHONPATH=$PYTHONPATH:$PWD/cdcgan_module\npython3 -m trainer.task \\\n --train_file_pattern=${TRAIN_FILE_PATTERN} \\\n --eval_file_pattern=${EVAL_FILE_PATTERN} \\\n --output_dir=${OUTPUT_DIR} \\\n --job-dir=./tmp \\\n \\\n --train_batch_size=${TRAIN_BATCH_SIZE} \\\n --train_steps=${TRAIN_STEPS} \\\n --save_summary_steps=${SAVE_SUMMARY_STEPS} \\\n --save_checkpoints_steps=${SAVE_CHECKPOINTS_STEPS} \\\n --keep_checkpoint_max=${KEEP_CHECKPOINT_MAX} \\\n --input_fn_autotune=${INPUT_FN_AUTOTUNE} \\\n \\\n --eval_batch_size=${EVAL_BATCH_SIZE} \\\n --eval_steps=${EVAL_STEPS} \\\n --start_delay_secs=${START_DELAY_SECS} \\\n --throttle_secs=${THROTTLE_SECS} \\\n \\\n --height=${HEIGHT} \\\n --width=${WIDTH} \\\n --depth=${DEPTH} \\\n \\\n --num_classes=${NUM_CLASSES} \\\n --label_embedding_dimension=${LABEL_EMBEDDING_DIMENSION} \\\n \\\n --latent_size=${LATENT_SIZE} \\\n --generator_projection_dims=${GENERATOR_PROJECTION_DIMS} \\\n --generator_use_labels=${GENERATOR_USE_LABELS} \\\n --generator_embed_labels=${GENERATOR_EMBED_LABELS} \\\n --generator_concatenate_labels=${GENERATOR_CONCATENATE_LABELS} \\\n --generator_num_filters=${GENERATOR_NUM_FILTERS} \\\n --generator_kernel_sizes=${GENERATOR_KERNEL_SIZES} \\\n --generator_strides=${GENERATOR_STRIDES} \\\n --generator_final_num_filters=${GENERATOR_FINAL_NUM_FILTERS} \\\n --generator_final_kernel_size=${GENERATOR_FINAL_KERNEL_SIZE} \\\n --generator_final_stride=${GENERATOR_FINAL_STRIDE} \\\n --generator_leaky_relu_alpha=${GENERATOR_LEAKY_RELU_ALPHA} \\\n --generator_final_activation=${GENERATOR_FINAL_ACTIVATION} \\\n --generator_l1_regularization_scale=${GENERATOR_L1_REGULARIZATION_SCALE} \\\n --generator_l2_regularization_scale=${GENERATOR_L2_REGULARIZATION_SCALE} \\\n --generator_optimizer=${GENERATOR_OPTIMIZER} \\\n --generator_learning_rate=${GENERATOR_LEARNING_RATE} \\\n --generator_adam_beta1=${GENERATOR_ADAM_BETA1} \\\n --generator_adam_beta2=${GENERATOR_ADAM_BETA2} \\\n --generator_adam_epsilon=${GENERATOR_ADAM_EPSILON} \\\n --generator_clip_gradients=${GENERATOR_CLIP_GRADIENTS} \\\n --generator_train_steps=${GENERATOR_TRAIN_STEPS} \\\n \\\n --discriminator_use_labels=${DISCRIMINATOR_USE_LABELS} \\\n --discriminator_embed_labels=${DISCRIMINATOR_EMBED_LABELS} \\\n --discriminator_concatenate_labels=${DISCRIMINATOR_CONCATENATE_LABELS} \\\n --discriminator_num_filters=${DISCRIMINATOR_NUM_FILTERS} \\\n --discriminator_kernel_sizes=${DISCRIMINATOR_KERNEL_SIZES} \\\n --discriminator_strides=${DISCRIMINATOR_STRIDES} \\\n --discriminator_dropout_rates=${DISCRIMINATOR_DROPOUT_RATES} \\\n --discriminator_leaky_relu_alpha=${DISCRIMINATOR_LEAKY_RELU_ALPHA} \\\n --discriminator_l1_regularization_scale=${DISCRIMINATOR_L1_REGULARIZATION_SCALE} \\\n --discriminator_l2_regularization_scale=${DISCRIMINATOR_L2_REGULARIZATION_SCALE} \\\n --discriminator_optimizer=${DISCRIMINATOR_OPTIMIZER} \\\n --discriminator_learning_rate=${DISCRIMINATOR_LEARNING_RATE} \\\n --discriminator_adam_beta1=${DISCRIMINATOR_ADAM_BETA1} \\\n --discriminator_adam_beta2=${DISCRIMINATOR_ADAM_BETA2} \\\n --discriminator_adam_epsilon=${DISCRIMINATOR_ADAM_EPSILON} \\\n --discriminator_clip_gradients=${DISCRIMINATOR_CLIP_GRADIENTS} \\\n --discriminator_train_steps=${DISCRIMINATOR_TRAIN_STEPS} \\\n --label_smoothing=${LABEL_SMOOTHING}",
"_____no_output_____"
]
],
[
[
"## Prediction",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf",
"_____no_output_____"
],
[
"!gsutil ls gs://machine-learning-1234-bucket/gan/cdcgan/trained_model2/export/exporter",
"gs://machine-learning-1234-bucket/gan/cdcgan/trained_model2/export/exporter/\ngs://machine-learning-1234-bucket/gan/cdcgan/trained_model2/export/exporter/1592853236/\ngs://machine-learning-1234-bucket/gan/cdcgan/trained_model2/export/exporter/1592859903/\n"
],
[
"predict_fn = tf.contrib.predictor.from_saved_model(\n \"gs://machine-learning-1234-bucket/gan/cdcgan/trained_model2/export/exporter/1592859903\"\n)\npredictions = predict_fn(\n {\n \"Z\": np.random.normal(size=(num_classes, 512)),\n \"label\": np.arange(num_classes)\n }\n)",
"INFO:tensorflow:Restoring parameters from gs://machine-learning-1234-bucket/gan/cdcgan/trained_model2/export/exporter/1592859903/variables/variables\n"
],
[
"print(list(predictions.keys()))",
"['generated_images']\n"
]
],
[
[
"Convert image back to the original scale.",
"_____no_output_____"
]
],
[
[
"generated_images = np.clip(\n a=((predictions[\"generated_images\"] + 1.0) * (255. / 2)).astype(np.int32),\n a_min=0,\n a_max=255\n)",
"_____no_output_____"
],
[
"print(generated_images.shape)",
"(10, 32, 32, 3)\n"
],
[
"def plot_images(images):\n \"\"\"Plots images.\n\n Args:\n images: np.array, array of images of\n [num_images, height, width, depth].\n \n \"\"\"\n num_images = len(images)\n\n plt.figure(figsize=(20, 20))\n for i in range(num_images):\n image = images[i]\n plt.subplot(1, num_images, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(\n image,\n cmap=plt.cm.binary\n )\n plt.show()",
"_____no_output_____"
],
[
"plot_images(generated_images)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
d04a2e9e1eb5d96597d59e4c868b33e3c140e79e | 209,767 | ipynb | Jupyter Notebook | Q3.ipynb | ramniwashkumar/Relevel_practice | 5d70e8d663bdd923620e51b1974be5e33f6bcc22 | [
"MIT"
] | null | null | null | Q3.ipynb | ramniwashkumar/Relevel_practice | 5d70e8d663bdd923620e51b1974be5e33f6bcc22 | [
"MIT"
] | null | null | null | Q3.ipynb | ramniwashkumar/Relevel_practice | 5d70e8d663bdd923620e51b1974be5e33f6bcc22 | [
"MIT"
] | null | null | null | 213.612016 | 65,612 | 0.899574 | [
[
[
"dataset = pd.read_csv(r\"D:\\true_car_project_full.csv\")\ndataset",
"_____no_output_____"
],
[
"## year, mileage, model, make, state",
"_____no_output_____"
],
[
"numerical_features = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O']\n\nprint('Number of numerical variables: ', len(numerical_features))\n\n# visualise the numerical variables\ndataset[numerical_features].head()",
"Number of numerical variables: 4\n"
],
[
"year_feature = \"Year\"",
"_____no_output_____"
],
[
"dataset.groupby('Year')['Price'].median().plot()\nplt.xlabel('Year')\nplt.ylabel('Median Car Price')\nplt.title(\"Car Price vs Year\")",
"_____no_output_____"
],
[
"dataset.groupby('Year')['Price'].median().plot.bar()\nplt.xlabel('Year')\nplt.ylabel('Median Car Price')\nplt.title(\"Car Price vs Year\")",
"_____no_output_____"
],
[
"dataset.Price.mean()",
"_____no_output_____"
],
[
"dataset.groupby('Year')['Mileage'].median().plot()\nplt.xlabel('Year')\nplt.ylabel('Median Car Mileage')\nplt.title(\"Car Mileage vs Year\")",
"_____no_output_____"
],
[
"plt.rcParams[\"figure.figsize\"] = (12, 12)\ndataset.groupby('Model')['Price'].median().plot()\nplt.xlabel('Model')\nplt.ylabel('Median Car Price')\nplt.title(\"Car Price vs Model\")",
"_____no_output_____"
],
[
"print(len(dataset['Model'].unique()))",
"2914\n"
],
[
"dataset.groupby('State')['Mileage'].median().plot.bar()\nplt.xlabel('State')\nplt.ylabel('Median Car Mileage')\nplt.title(\"Car Mileage vs State\")",
"_____no_output_____"
],
[
"print(len(dataset['State'].unique()))",
"59\n"
],
[
"dataset.groupby('Make')['Mileage'].median().plot.bar()\nplt.xlabel('Make')\nplt.ylabel('Median Car Mileage')\nplt.title(\"Car Mileage vs Make\")",
"_____no_output_____"
],
[
"print(len(dataset['Make'].unique()))",
"58\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d04a41e9b62fca923454093eb86248aedbda7db5 | 398,406 | ipynb | Jupyter Notebook | python_learner.ipynb | liujh168/tensorflow_learner | ee8ff975014ad23f4c0195b29e5af86d02c07c01 | [
"Apache-2.0"
] | null | null | null | python_learner.ipynb | liujh168/tensorflow_learner | ee8ff975014ad23f4c0195b29e5af86d02c07c01 | [
"Apache-2.0"
] | null | null | null | python_learner.ipynb | liujh168/tensorflow_learner | ee8ff975014ad23f4c0195b29e5af86d02c07c01 | [
"Apache-2.0"
] | null | null | null | 696.513986 | 111,384 | 0.948289 | [
[
[
"# !/usr/bin/env python\n# 测试tensorflow是否安装好\n\nimport numpy as np\nimport tensorflow as tf\n\n# Prepare train data\ntrain_X = np.linspace(-1, 1, 100)\ntrain_Y = 2 * train_X + np.random.randn(*train_X.shape) * 0.33 + 10\n\n# Define the model\nX = tf.placeholder(\"float\")\nY = tf.placeholder(\"float\")\nw = tf.Variable(0.0, name=\"weight\")\nb = tf.Variable(0.0, name=\"bias\")\nloss = tf.square(Y - X * w - b)\ntrain_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss)\n\n# Create session to run\nwith tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n\n epoch = 1\n for i in range(10):\n for (x, y) in zip(train_X, train_Y):\n _, w_value, b_value = sess.run([train_op, w, b], feed_dict={X: x, Y: y})\n print(\"Epoch: {}, w: {}, b: {}\".format(epoch, w_value, b_value))\n epoch += 1\n",
"_____no_output_____"
],
[
"### 窗口显示\nimport tkinter\nimport tkinter.messagebox\n\ndef main():\n\tflag = True\n\n\t# 修改标签上的文字\n\tdef change_label_text():\n\t\tnonlocal flag\n\t\tflag = not flag\n\t\tcolor, msg = ('red', 'Hello, world!')\\\n\t\t\tif flag else ('blue', 'Goodbye, world!')\n\t\tlabel.config(text=msg, fg=color)\n\n\t# 确认退出\n\tdef confirm_to_quit():\n\t\tif tkinter.messagebox.askokcancel('温馨提示', '确定要退出吗?'):\n\t\t\ttop.quit()\n\n\t# 创建顶层窗口\n\ttop = tkinter.Tk()\n\t# 设置窗口大小\n\ttop.geometry('240x160')\n\t# 设置窗口标题\n\ttop.title('小游戏')\n\t# 创建标签对象并添加到顶层窗口\n\tlabel = tkinter.Label(top, text='Hello, world!', font='Arial -32', fg='red')\n\tlabel.pack(expand=1)\n\t# 创建一个装按钮的容器\n\tpanel = tkinter.Frame(top)\n\t# 创建按钮对象 指定添加到哪个容器中 通过command参数绑定事件回调函数\n\tbutton1 = tkinter.Button(panel, text='修改', command=change_label_text)\n\tbutton1.pack(side='left')\n\tbutton2 = tkinter.Button(panel, text='退出', command=confirm_to_quit)\n\tbutton2.pack(side='right')\n\tpanel.pack(side='bottom')\n\t# 开启主事件循环\n\ttkinter.mainloop()\n\n\nif __name__ == '__main__':\n\tmain()",
"_____no_output_____"
],
[
"### 读取并显示cifar_10图片\nimport numpy as np\nimport os\nfrom matplotlib import pyplot as plt\nimport pickle\n\ndef load_batch_cifar10(filename,dtype=\"float 64\"):\n path = os.path.join(data_dir_cifar10,filename)#链接字符串,合成文件路径\n fi = open(path, 'rb') # 打开文件\n batch = pickle.load(fi, encoding=\"bytes\") # 读入数据\n fi.close()\n #batch = np.load(path)\n data = batch[b'data']/255.0\n labels = batch[b'labels']#每一个数据的标签\n return data,labels#返回标签矩阵\n\ndef load_cifar10():\n x_train = []#存放训练数据,最终是50000*3072的矩阵\n y_train = []\n for i in range(5):#读取五个文件\n x,t = load_batch_cifar10(\"data_batch_%d\"%(i+1))\n x_train.append(x)\n y_train.append(t)\n x_test ,y_test= load_batch_cifar10(\"test_batch\")#读取测试文件\n x_train = np.concatenate(x_train,axis=0)#将五个文件的矩阵合成一个\n y_train = np.concatenate(y_train, axis=0)\n x_train = x_train.reshape(x_train.shape[0],3,32,32)\n x_test = x_test.reshape(x_test.shape[0],3,32,32)\n return x_train,y_train,x_test,y_test\n\ndata_dir = \"C:\\\\dl\"\ndata_dir_cifar10 = os.path.join(data_dir,\"cifarpy\")\nclass_name_cifar10 = np.load(os.path.join(data_dir_cifar10,\"batches.meta\"))\n#print(class_name_cifar10)\nXtrain,Ytrain,Xtest,Ytest = load_cifar10()\nimlist = []\n\nfor i in range(24): #显示24张图片\n red = Xtrain[i][0].reshape(1024,1)\n green = Xtrain[i][1].reshape(1024,1)\n blue = Xtrain[i][2].reshape(1024,1)\n pic = np.hstack((red,green,blue))\n pic_grab = pic.reshape(32,32,3)#合成一个三维矩阵,每一个点包含红绿蓝三种颜色\n imlist.append(pic_grab)\n \nfig = plt.figure()\n\nfor j in range(1,25):\n ax = fig.add_subplot(4,6,j)#这三个参数是,图片行数,列数,编号\n plt.title(class_name_cifar10['label_names'][Ytrain[j-1]])\n plt.axis('off')#不显示坐标值\n plt.imshow(imlist[j-1])#显示图片\n \nplt.subplots_adjust(wspace=0,hspace=0)\nplt.show()\n#end",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom mpl_toolkits.mplot3d import Axes3D\n\nnp.random.seed(42)\n\n# 采样个数500\nn_samples = 500\ndim = 3\n\n# 先生成一组3维正态分布数据,数据方向完全随机\nsamples = np.random.multivariate_normal(\n np.zeros(dim),\n np.eye(dim),\n n_samples\n)\n\n# 通过把每个样本到原点距离和均匀分布吻合得到球体内均匀分布的样本\nfor i in range(samples.shape[0]):\n r = np.power(np.random.random(), 1.0/3.0)\n samples[i] *= r / np.linalg.norm(samples[i])\n\nupper_samples = []\nlower_samples = []\n\nfor x, y, z in samples:\n # 3x+2y-z=1作为判别平面\n if z > 3*x + 2*y - 1:\n upper_samples.append((x, y, z))\n else:\n lower_samples.append((x, y, z))\n\nfig = plt.figure('3D scatter plot')\nax = fig.add_subplot(111, projection='3d')\n\nuppers = np.array(upper_samples)\nlowers = np.array(lower_samples)\n\n# 用不同颜色不同形状的图标表示平面上下的样本\n# 判别平面上半部分为红色圆点,下半部分为绿色三角\nax.scatter(uppers[:, 0], uppers[:, 1], uppers[:, 2], c='r', marker='o')\nax.scatter(lowers[:, 0], lowers[:, 1], lowers[:, 2], c='g', marker='^')\n\nplt.show()",
"_____no_output_____"
],
[
"import numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nmpl.rcParams['axes.titlesize'] = 20\nmpl.rcParams['xtick.labelsize'] = 16\nmpl.rcParams['ytick.labelsize'] = 16\nmpl.rcParams['axes.labelsize'] = 16\nmpl.rcParams['xtick.major.size'] = 0\nmpl.rcParams['ytick.major.size'] = 0\n\n# 包含了狗,猫和猎豹的最高奔跑速度,还有对应的可视化颜色\nspeed_map = {\n 'dog': (48, '#7199cf'),\n 'cat': (45, '#4fc4aa'),\n 'cheetah': (120, '#e1a7a2')\n}\n\n# 整体图的标题\nfig = plt.figure('Bar chart & Pie chart')\n\n# 在整张图上加入一个子图,121的意思是在一个1行2列的子图中的第一张\nax = fig.add_subplot(121)\nax.set_title('Running speed - bar chart')\n\n# 生成x轴每个元素的位置\nxticks = np.arange(3)\n\n# 定义柱状图每个柱的宽度\nbar_width = 0.5\n\n# 动物名称\nanimals = speed_map.keys()\n\n# 奔跑速度\nspeeds = [x[0] for x in speed_map.values()]\n\n# 对应颜色\ncolors = [x[1] for x in speed_map.values()]\n\n# 画柱状图,横轴是动物标签的位置,纵轴是速度,定义柱的宽度,同时设置柱的边缘为透明\nbars = ax.bar(xticks, speeds, width=bar_width, edgecolor='none')\n\n# 设置y轴的标题\nax.set_ylabel('Speed(km/h)')\n\n# x轴每个标签的具体位置,设置为每个柱的中央\nax.set_xticks(xticks+bar_width/2)\n\n# 设置每个标签的名字\nax.set_xticklabels(animals)\n\n# 设置x轴的范围\nax.set_xlim([bar_width/2-0.5, 3-bar_width/2])\n\n# 设置y轴的范围\nax.set_ylim([0, 125])\n\n# 给每个bar分配指定的颜色\nfor bar, color in zip(bars, colors):\n bar.set_color(color)\n\n# 在122位置加入新的图\nax = fig.add_subplot(122)\nax.set_title('Running speed - pie chart')\n\n# 生成同时包含名称和速度的标签\nlabels = ['{}\\n{} km/h'.format(animal, speed) for animal, speed in zip(animals, speeds)]\n\n# 画饼状图,并指定标签和对应颜色\nax.pie(speeds, labels=labels, colors=colors)\n\nplt.show()",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\n\n# 读取一张照片并显示\nplt.figure('A Little White Dog')\nlittle_dog_img = plt.imread('c:\\\\dl\\\\other\\\\t.jpg')\nplt.imshow(little_dog_img)\n\nplt.show()",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nimport numpy as np\n\n# 3D图标必须的模块,project='3d'的定义\nfrom mpl_toolkits.mplot3d import Axes3D \n\nnp.random.seed(42)\n\nn_grids = 51 \t# x-y平面的格点数 \nc = n_grids // 2 \t# 中心位置\nnf = 2 \t# 低频成分的个数\n\n# 生成格点\nx = np.linspace(0, 1, n_grids)\ny = np.linspace(0, 1, n_grids)\n\n# x和y是长度为n_grids的array\n# meshgrid会把x和y组合成n_grids*n_grids的array,X和Y对应位置就是所有格点的坐标\nX, Y = np.meshgrid(x, y)\n\n# 生成一个0值的傅里叶谱\nspectrum = np.zeros((n_grids, n_grids), dtype=np.complex)\n\n# 生成一段噪音,长度是(2*nf+1)**2/2\nnoise = [np.complex(x, y) for x, y in np.random.uniform(-1,1,((2*nf+1)**2//2, 2))]\n\n# 傅里叶频谱的每一项和其共轭关于中心对称\nnoisy_block = np.concatenate((noise, [0j], np.conjugate(noise[::-1])))\n\n# 将生成的频谱作为低频成分\nspectrum[c-nf:c+nf+1, c-nf:c+nf+1] = noisy_block.reshape((2*nf+1, 2*nf+1))\n\n# 进行反傅里叶变换\nZ = np.real(np.fft.ifft2(np.fft.ifftshift(spectrum)))\n\n# 创建图表\nfig = plt.figure('3D surface & wire')\n\n# 第一个子图,surface图\nax = fig.add_subplot(1, 2, 1, projection='3d')\n\n# alpha定义透明度,cmap是color map\n# rstride和cstride是两个方向上的采样,越小越精细,lw是线宽\nax.plot_surface(X, Y, Z, alpha=0.7, cmap='jet', rstride=1, cstride=1, lw=0)\n\n# 第二个子图,网线图\nax = fig.add_subplot(1, 2, 2, projection='3d')\nax.plot_wireframe(X, Y, Z, rstride=3, cstride=3, lw=0.5)\n\nplt.show()",
"_____no_output_____"
],
[
"### Python+Opencv进行识别相似图片\n### 来自https://blog.csdn.net/feimengjuan/article/details/51279629\n\n### -*- coding: utf-8 -*-\n### 利用python实现多种方法来实现图像识别\n \nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n \n### 最简单的以灰度直方图作为相似比较的实现\ndef classify_gray_hist(image1,image2,size = (256,256)):\n ###### 先计算直方图\n ##### 几个参数必须用方括号括起来\n ##### 这里直接用灰度图计算直方图,所以是使用第一个通道,\n ##### 也可以进行通道分离后,得到多个通道的直方图\n ##### bins 取为16\n image1 = cv2.resize(image1,size)\n image2 = cv2.resize(image2,size)\n hist1 = cv2.calcHist([image1],[0],None,[256],[0.0,255.0])\n hist2 = cv2.calcHist([image2],[0],None,[256],[0.0,255.0])\n ##### 可以比较下直方图\n plt.plot(range(256),hist1,'r')\n plt.plot(range(256),hist2,'b')\n plt.show()\n ##### 计算直方图的重合度\n degree = 0\n for i in range(len(hist1)):\n if hist1[i] != hist2[i]:\n degree = degree + (1 - abs(hist1[i]-hist2[i])/max(hist1[i],hist2[i]))\n else:\n degree = degree + 1\n degree = degree/len(hist1)\n return degree\n \n### 计算单通道的直方图的相似值\ndef calculate(image1,image2):\n hist1 = cv2.calcHist([image1],[0],None,[256],[0.0,255.0])\n hist2 = cv2.calcHist([image2],[0],None,[256],[0.0,255.0])\n ##### 计算直方图的重合度\n degree = 0\n for i in range(len(hist1)):\n if hist1[i] != hist2[i]:\n degree = degree + (1 - abs(hist1[i]-hist2[i])/max(hist1[i],hist2[i]))\n else:\n degree = degree + 1\n degree = degree/len(hist1)\n return degree\n \n### 通过得到每个通道的直方图来计算相似度\ndef classify_hist_with_split(image1,image2,size = (256,256)):\n ##### 将图像resize后,分离为三个通道,再计算每个通道的相似值\n image1 = cv2.resize(image1,size)\n image2 = cv2.resize(image2,size)\n sub_image1 = cv2.split(image1)\n sub_image2 = cv2.split(image2)\n sub_data = 0\n for im1,im2 in zip(sub_image1,sub_image2):\n sub_data += calculate(im1,im2)\n sub_data = sub_data/3\n return sub_data\n \n### 平均哈希算法计算\ndef classify_aHash(image1,image2):\n image1 = cv2.resize(image1,(8,8))\n image2 = cv2.resize(image2,(8,8))\n gray1 = cv2.cvtColor(image1,cv2.COLOR_BGR2GRAY)\n gray2 = cv2.cvtColor(image2,cv2.COLOR_BGR2GRAY)\n hash1 = getHash(gray1)\n hash2 = getHash(gray2)\n return Hamming_distance(hash1,hash2)\n \ndef classify_pHash(image1,image2):\n image1 = cv2.resize(image1,(32,32))\n image2 = cv2.resize(image2,(32,32))\n gray1 = cv2.cvtColor(image1,cv2.COLOR_BGR2GRAY)\n gray2 = cv2.cvtColor(image2,cv2.COLOR_BGR2GRAY)\n ##### 将灰度图转为浮点型,再进行dct变换\n dct1 = cv2.dct(np.float32(gray1))\n dct2 = cv2.dct(np.float32(gray2))\n ##### 取左上角的8*8,这些代表图片的最低频率\n ##### 这个操作等价于c++中利用opencv实现的掩码操作\n ##### 在python中进行掩码操作,可以直接这样取出图像矩阵的某一部分\n dct1_roi = dct1[0:8,0:8]\n dct2_roi = dct2[0:8,0:8]\n hash1 = getHash(dct1_roi)\n hash2 = getHash(dct2_roi)\n return Hamming_distance(hash1,hash2)\n \n### 输入灰度图,返回hash\ndef getHash(image):\n avreage = np.mean(image)\n hash = []\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n if image[i,j] > avreage:\n hash.append(1)\n else:\n hash.append(0)\n return hash\n \n \n### 计算汉明距离\ndef Hamming_distance(hash1,hash2):\n num = 0\n for index in range(len(hash1)):\n if hash1[index] != hash2[index]:\n num += 1\n return num\n \n \nif __name__ == '__main__':\n img1 = cv2.imread('train\\\\黑炮1.jpg')\n cv2.imshow('img1',img1)\n img2 = cv2.imread('train\\\\红炮1.jpg')\n cv2.imshow('img2',img2)\n degree = classify_gray_hist(img1,img2)\n #degree = classify_hist_with_split(img1,img2)\n #degree = classify_aHash(img1,img2)\n #degree = classify_pHash(img1,img2)\n print(degree)\n cv2.waitKey(0)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d04a4949d034c3cd4be96e261bdab44d2eec99f1 | 65,354 | ipynb | Jupyter Notebook | s_6_air-sea_and_advective_fluxes_WS.ipynb | limash/ws_notebook | 7feecc3ea8535f4e51ba1dbec24655acf343e0d6 | [
"CC-BY-3.0"
] | null | null | null | s_6_air-sea_and_advective_fluxes_WS.ipynb | limash/ws_notebook | 7feecc3ea8535f4e51ba1dbec24655acf343e0d6 | [
"CC-BY-3.0"
] | null | null | null | s_6_air-sea_and_advective_fluxes_WS.ipynb | limash/ws_notebook | 7feecc3ea8535f4e51ba1dbec24655acf343e0d6 | [
"CC-BY-3.0"
] | null | null | null | 145.879464 | 32,292 | 0.892662 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport xarray as xr\nimport seaborn as sns\nsns.set()",
"_____no_output_____"
]
],
[
[
"#### Check surface fluxes of CO$_2$",
"_____no_output_____"
]
],
[
[
"# check the data folder to swith to another mixing conditions\n#ds = xr.open_dataset('data/results_so4_adv/5_po75-25_di10e-9/water.nc')\nds = xr.open_dataset('data/results_so4_adv/9_po75-25_di30e-9/water.nc')\n#ds = xr.open_dataset('data/no_denitrification/water.nc')\ndicflux_df = ds['B_C_DIC _flux'].to_dataframe()\noxyflux_df = ds['B_BIO_O2 _flux'].to_dataframe()\ndicflux_surface = dicflux_df.groupby('z_faces').get_group(0)\noxyflux_surface = oxyflux_df.groupby('z_faces').get_group(0)\ndicflux_surface_year = dicflux_surface.loc['2011-01-01':'2011-12-31']\noxyflux_surface_year = oxyflux_surface.loc['2011-01-01':'2011-12-31']",
"_____no_output_____"
],
[
"ox = np.arange(1,366,1)",
"_____no_output_____"
],
[
"plt.plot(ox, dicflux_surface_year); plt.gcf().set_size_inches(10, 2);\nplt.title('Air-sea CO$_2$ flux, positive means upwards');\nplt.xlabel('Day'); plt.ylabel('Flux, mmol m$^{-2}$ d$^{-1}$');",
"_____no_output_____"
]
],
[
[
"#### Advective TA exchange",
"_____no_output_____"
],
[
"These are data on how alkalinity in the Wadden Sea changes due to mixing with the North Sea. Positive means alkalinity comes from the North Sea, negative - goes to the North Sea.",
"_____no_output_____"
]
],
[
[
"nh4ta_df = ds['TA_due_to_NH4'].to_dataframe()\nno3ta_df = ds['TA_due_to_NO3'].to_dataframe()\npo4ta_df = ds['TA_due_to_PO4'].to_dataframe()\nso4ta_df = ds['TA_due_to_SO4'].to_dataframe()",
"_____no_output_____"
],
[
"nh4ta_year = nh4ta_df.loc['2011-01-01':'2011-12-31']\nno3ta_year = no3ta_df.loc['2011-01-01':'2011-12-31']\npo4ta_year = po4ta_df.loc['2011-01-01':'2011-12-31']\nso4ta_year = so4ta_df.loc['2011-01-01':'2011-12-31']",
"_____no_output_____"
],
[
"nh4ta = np.array(nh4ta_year.TA_due_to_NH4.values)\nno3ta = np.array(no3ta_year.TA_due_to_NO3.values)\npo4ta = np.array(po4ta_year.TA_due_to_PO4.values)\nso4ta = np.array(so4ta_year.TA_due_to_SO4.values)\ntotal = nh4ta+no3ta+po4ta+so4ta",
"_____no_output_____"
],
[
"plt.plot(ox, total); plt.gcf().set_size_inches(10, 2);\nplt.title('WS - NS alkalinity flux, positive means to the WS');\nplt.xlabel('Day'); plt.ylabel('Flux, mmol m$^{-2}$ d$^{-1}$');",
"_____no_output_____"
],
[
"year = (('2011-01-01','2011-01-31'), ('2011-02-01','2011-02-28'), ('2011-03-01','2011-03-31'), ('2011-04-01','2011-04-30'), \n ('2011-05-01','2011-05-31'), ('2011-06-01','2011-06-30'), ('2011-07-01','2011-07-31'), ('2011-08-01','2011-08-31'),\n ('2011-09-01','2011-09-30'), ('2011-10-01','2011-10-31'), ('2011-11-01','2011-11-30'), ('2011-12-01','2011-12-31'))",
"_____no_output_____"
],
[
"nh4ta_year = []\nno3ta_year = []\npo4ta_year = []\nso4ta_year = []\nfor month in year:\n nh4ta_month = nh4ta_df.loc[month[0]:month[1]]\n no3ta_month = no3ta_df.loc[month[0]:month[1]]\n po4ta_month = po4ta_df.loc[month[0]:month[1]]\n so4ta_month = so4ta_df.loc[month[0]:month[1]]\n nh4ta_year.append(nh4ta_month['TA_due_to_NH4'].sum())\n no3ta_year.append(no3ta_month['TA_due_to_NO3'].sum())\n po4ta_year.append(po4ta_month['TA_due_to_PO4'].sum())\n so4ta_year.append(so4ta_month['TA_due_to_SO4'].sum())",
"_____no_output_____"
],
[
"nh4ta = np.array(nh4ta_year)\nno3ta = np.array(no3ta_year)\npo4ta = np.array(po4ta_year)\nso4ta = np.array(so4ta_year)\ntotal = nh4ta+no3ta+po4ta+so4ta",
"_____no_output_____"
]
],
[
[
"here and further, units: mmol m$^{-2}$",
"_____no_output_____"
]
],
[
[
"nh4ta",
"_____no_output_____"
],
[
"sum(nh4ta)",
"_____no_output_____"
],
[
"no3ta",
"_____no_output_____"
],
[
"sum(no3ta)",
"_____no_output_____"
],
[
"po4ta",
"_____no_output_____"
],
[
"sum(po4ta)",
"_____no_output_____"
],
[
"so4ta",
"_____no_output_____"
],
[
"sum(so4ta)",
"_____no_output_____"
],
[
"total",
"_____no_output_____"
],
[
"sum(total)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d04a4c58c31c99b8e2d8d1fe3004223c56716c9a | 1,157 | ipynb | Jupyter Notebook | Aula_04/Untitled1.ipynb | elcbasilio/letscode | ea2ed5ee80485d98fad2c77a7a50927a7d524793 | [
"MIT"
] | null | null | null | Aula_04/Untitled1.ipynb | elcbasilio/letscode | ea2ed5ee80485d98fad2c77a7a50927a7d524793 | [
"MIT"
] | null | null | null | Aula_04/Untitled1.ipynb | elcbasilio/letscode | ea2ed5ee80485d98fad2c77a7a50927a7d524793 | [
"MIT"
] | null | null | null | 19.283333 | 70 | 0.492653 | [
[
[
"alfabeto = 'abcdefghijklmnopqrstuvwxyz'\nrotacao = -13\nmsg = 'bv zhaqb'\nsaida = ''\nfor entrada in msg:\n if entrada in alfabeto:\n letra = alfabeto.index(entrada)\n saida += alfabeto[(letra + rotacao) % len (alfabeto)]\n else:\n saida += entrada\nprint (saida)",
"oi mundo\n"
]
]
] | [
"code"
] | [
[
"code"
]
] |
d04a60c1ad37f8f31a587e34c29bd8ea312313ba | 207,434 | ipynb | Jupyter Notebook | doc/gallery/scatter_with_minimap.ipynb | mattijn/altdoc | 413c31bd28b564cd240234b695d4211896c01d5d | [
"MIT"
] | null | null | null | doc/gallery/scatter_with_minimap.ipynb | mattijn/altdoc | 413c31bd28b564cd240234b695d4211896c01d5d | [
"MIT"
] | null | null | null | doc/gallery/scatter_with_minimap.ipynb | mattijn/altdoc | 413c31bd28b564cd240234b695d4211896c01d5d | [
"MIT"
] | null | null | null | 1,420.780822 | 202,467 | 0.51951 | [
[
[
"# Scatter Plot with Minimap\n\nThis example shows how to create a miniature version of a plot such that creating a selection in the miniature version adjusts the axis limits in another, more detailed view.",
"_____no_output_____"
]
],
[
[
"import altair as alt\nfrom vega_datasets import data\n\nsource = data.seattle_weather()\n\nzoom = alt.selection_interval(encodings=[\"x\", \"y\"])\n\nminimap = (\n alt.Chart(source)\n .mark_point()\n .add_selection(zoom)\n .encode(\n x=\"date:T\",\n y=\"temp_max:Q\",\n color=alt.condition(zoom, \"weather\", alt.value(\"lightgray\")),\n )\n .properties(\n width=200,\n height=200,\n title=\"Minimap -- click and drag to zoom in the detail view\",\n )\n)\n\ndetail = (\n alt.Chart(source)\n .mark_point()\n .encode(\n x=alt.X(\n \"date:T\", scale=alt.Scale(domain={\"selection\": zoom.name, \"encoding\": \"x\"})\n ),\n y=alt.Y(\n \"temp_max:Q\",\n scale=alt.Scale(domain={\"selection\": zoom.name, \"encoding\": \"y\"}),\n ),\n color=\"weather\",\n )\n .properties(width=600, height=400, title=\"Seattle weather -- detail view\")\n)\n\ndetail | minimap",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
]
] |
d04a71219772b4d01f63d7aed228a7f7098fdc0b | 10,212 | ipynb | Jupyter Notebook | doc/source/ray-core/examples/highly_parallel.ipynb | minds-ai/ray | 09dee4d5dd221513fe9d8d196660115b5ee41962 | [
"Apache-2.0"
] | null | null | null | doc/source/ray-core/examples/highly_parallel.ipynb | minds-ai/ray | 09dee4d5dd221513fe9d8d196660115b5ee41962 | [
"Apache-2.0"
] | 12 | 2022-03-05T05:37:28.000Z | 2022-03-19T07:14:43.000Z | doc/source/ray-core/examples/highly_parallel.ipynb | minds-ai/ray | 09dee4d5dd221513fe9d8d196660115b5ee41962 | [
"Apache-2.0"
] | null | null | null | 27.978082 | 716 | 0.578339 | [
[
[
"# Using Ray for Highly Parallelizable Tasks\n\nWhile Ray can be used for very complex parallelization tasks,\noften we just want to do something simple in parallel.\nFor example, we may have 100,000 time series to process with exactly the same algorithm,\nand each one takes a minute of processing.\n\nClearly running it on a single processor is prohibitive: this would take 70 days.\nEven if we managed to use 8 processors on a single machine,\nthat would bring it down to 9 days. But if we can use 8 machines, each with 16 cores,\nit can be done in about 12 hours.\n\nHow can we use Ray for these types of task? \n\nWe take the simple example of computing the digits of pi.\nThe algorithm is simple: generate random x and y, and if ``x^2 + y^2 < 1``, it's\ninside the circle, we count as in. This actually turns out to be pi/4\n(remembering your high school math).\n\nThe following code (and this notebook) assumes you have already set up your Ray cluster and that you are running on the head node. For more details on how to set up a Ray cluster please see the [Ray Cluster Quickstart Guide](https://docs.ray.io/en/master/cluster/quickstart.html). \n",
"_____no_output_____"
]
],
[
[
"import ray\nimport random\nimport time\nimport math\nfrom fractions import Fraction",
"_____no_output_____"
],
[
"# Let's start Ray\nray.init(address='auto')",
"INFO:anyscale.snapshot_util:Synced git objects for /home/ray/workspace-project-waleed_test1 to /efs/workspaces/shared_objects in 0.07651424407958984s.\nINFO:anyscale.snapshot_util:Created snapshot for /home/ray/workspace-project-waleed_test1 at /tmp/snapshot_2022-05-16T16:38:57.388956_otbjcv41.zip of size 1667695 in 0.014925718307495117s.\nINFO:anyscale.snapshot_util:Content hashes b'f4fcea43e90a69d561bf323a07691536' vs b'f4fcea43e90a69d561bf323a07691536'\nINFO:anyscale.snapshot_util:Content hash unchanged, not saving new snapshot.\nINFO:ray.worker:Connecting to existing Ray cluster at address: 172.31.78.11:9031\n2022-05-16 16:38:57,451\tINFO packaging.py:269 -- Pushing file package 'gcs://_ray_pkg_bf4a08129b7b19b96a1701be1151f9a8.zip' (1.59MiB) to Ray cluster...\n2022-05-16 16:38:57,470\tINFO packaging.py:278 -- Successfully pushed file package 'gcs://_ray_pkg_bf4a08129b7b19b96a1701be1151f9a8.zip'.\n"
]
],
[
[
"We use the ``@ray.remote`` decorator to create a Ray task.\nA task is like a function, except the result is returned asynchronously.\n\nIt also may not run on the local machine, it may run elsewhere in the cluster.\nThis way you can run multiple tasks in parallel,\nbeyond the limit of the number of processors you can have in a single machine.",
"_____no_output_____"
]
],
[
[
"@ray.remote\ndef pi4_sample(sample_count):\n \"\"\"pi4_sample runs sample_count experiments, and returns the \n fraction of time it was inside the circle. \n \"\"\"\n in_count = 0\n for i in range(sample_count):\n x = random.random()\n y = random.random()\n if x*x + y*y <= 1:\n in_count += 1\n return Fraction(in_count, sample_count)\n",
"_____no_output_____"
]
],
[
[
"To get the result of a future, we use ray.get() which \nblocks until the result is complete. ",
"_____no_output_____"
]
],
[
[
"SAMPLE_COUNT = 1000 * 1000\nstart = time.time() \nfuture = pi4_sample.remote(sample_count = SAMPLE_COUNT)\npi4 = ray.get(future)\nend = time.time()\ndur = end - start\nprint(f'Running {SAMPLE_COUNT} tests took {dur} seconds')",
"Running 1000000 tests took 1.4935967922210693 seconds\n"
]
],
[
[
"Now let's see how good our approximation is.",
"_____no_output_____"
]
],
[
[
"pi = pi4 * 4",
"_____no_output_____"
],
[
"float(pi)",
"_____no_output_____"
],
[
"abs(pi-math.pi)/pi",
"_____no_output_____"
]
],
[
[
"Meh. A little off -- that's barely 4 decimal places.\nWhy don't we do it a 100,000 times as much? Let's do 100 billion!",
"_____no_output_____"
]
],
[
[
"FULL_SAMPLE_COUNT = 100 * 1000 * 1000 * 1000 # 100 billion samples! \nBATCHES = int(FULL_SAMPLE_COUNT / SAMPLE_COUNT)\nprint(f'Doing {BATCHES} batches')\nresults = []\nfor _ in range(BATCHES):\n results.append(pi4_sample.remote())\noutput = ray.get(results)",
"Doing 100000 batches\n"
]
],
[
[
"Notice that in the above, we generated a list with 100,000 futures.\nNow all we do is have to do is wait for the result.\n\nDepending on your ray cluster's size, this might take a few minutes.\nBut to give you some idea, if we were to do it on a single machine,\nwhen I ran this it took 0.4 seconds.\n\nOn a single core, that means we're looking at 0.4 * 100000 = about 11 hours. \n\nHere's what the Dashboard looks like: \n\n\n\nSo now, rather than just a single core working on this,\nI have 168 working on the task together. And its ~80% efficient.",
"_____no_output_____"
]
],
[
[
"pi = sum(output)*4/len(output)",
"_____no_output_____"
],
[
"float(pi)",
"_____no_output_____"
],
[
"abs(pi-math.pi)/pi",
"_____no_output_____"
]
],
[
[
"Not bad at all -- we're off by a millionth. ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
d04a7dcf6800ded76f98112ed77192ef9a9fe7a1 | 55,088 | ipynb | Jupyter Notebook | module3-ridge-regression/LS_DS_213_assignment.ipynb | Collin-Campbell/DS-Unit-2-Linear-Models | 783c7cbab2c41f062c0b3a321d5c41a9adb56aa9 | [
"MIT"
] | null | null | null | module3-ridge-regression/LS_DS_213_assignment.ipynb | Collin-Campbell/DS-Unit-2-Linear-Models | 783c7cbab2c41f062c0b3a321d5c41a9adb56aa9 | [
"MIT"
] | null | null | null | module3-ridge-regression/LS_DS_213_assignment.ipynb | Collin-Campbell/DS-Unit-2-Linear-Models | 783c7cbab2c41f062c0b3a321d5c41a9adb56aa9 | [
"MIT"
] | null | null | null | 36.123279 | 319 | 0.365778 | [
[
[
"<a href=\"https://colab.research.google.com/github/Collin-Campbell/DS-Unit-2-Linear-Models/blob/master/module3-ridge-regression/LS_DS_213_assignment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"Lambda School Data Science\n\n*Unit 2, Sprint 1, Module 3*\n\n---",
"_____no_output_____"
],
[
"# Ridge Regression\n\n## Assignment\n\nWe're going back to our other **New York City** real estate dataset. Instead of predicting apartment rents, you'll predict property sales prices.\n\nBut not just for condos in Tribeca...\n\n- [ ] Use a subset of the data where `BUILDING_CLASS_CATEGORY` == `'01 ONE FAMILY DWELLINGS'` and the sale price was more than 100 thousand and less than 2 million.\n- [ ] Do train/test split. Use data from January — March 2019 to train. Use data from April 2019 to test.\n- [ ] Do one-hot encoding of categorical features.\n- [ ] Do feature selection with `SelectKBest`.\n- [ ] Fit a ridge regression model with multiple features. Use the `normalize=True` parameter (or do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html) beforehand — use the scaler's `fit_transform` method with the train set, and the scaler's `transform` method with the test set)\n- [ ] Get mean absolute error for the test set.\n- [ ] As always, commit your notebook to your fork of the GitHub repo.\n\nThe [NYC Department of Finance](https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page) has a glossary of property sales terms and NYC Building Class Code Descriptions. The data comes from the [NYC OpenData](https://data.cityofnewyork.us/browse?q=NYC%20calendar%20sales) portal.\n\n\n## Stretch Goals\n\nDon't worry, you aren't expected to do all these stretch goals! These are just ideas to consider and choose from.\n\n- [ ] Add your own stretch goal(s) !\n- [ ] Instead of `Ridge`, try `LinearRegression`. Depending on how many features you select, your errors will probably blow up! 💥\n- [ ] Instead of `Ridge`, try [`RidgeCV`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html).\n- [ ] Learn more about feature selection:\n - [\"Permutation importance\"](https://www.kaggle.com/dansbecker/permutation-importance)\n - [scikit-learn's User Guide for Feature Selection](https://scikit-learn.org/stable/modules/feature_selection.html)\n - [mlxtend](http://rasbt.github.io/mlxtend/) library\n - scikit-learn-contrib libraries: [boruta_py](https://github.com/scikit-learn-contrib/boruta_py) & [stability-selection](https://github.com/scikit-learn-contrib/stability-selection)\n - [_Feature Engineering and Selection_](http://www.feat.engineering/) by Kuhn & Johnson.\n- [ ] Try [statsmodels](https://www.statsmodels.org/stable/index.html) if you’re interested in more inferential statistical approach to linear regression and feature selection, looking at p values and 95% confidence intervals for the coefficients.\n- [ ] Read [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapters 1-3, for more math & theory, but in an accessible, readable way.\n- [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html).",
"_____no_output_____"
]
],
[
[
"import numpy as np",
"_____no_output_____"
],
[
"%%capture\nimport sys\n\n# If you're on Colab:\nif 'google.colab' in sys.modules:\n DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'\n !pip install category_encoders==2.*\n\n# If you're working locally:\nelse:\n DATA_PATH = '../data/'\n \n# Ignore this Numpy warning when using Plotly Express:\n# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.\nimport warnings\nwarnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')",
"_____no_output_____"
],
[
"import pandas as pd\nimport pandas_profiling\n\n# Read New York City property sales data\ndf = pd.read_csv(DATA_PATH+'condos/NYC_Citywide_Rolling_Calendar_Sales.csv',\n parse_dates=['SALE DATE'],\n index_col=('SALE DATE'))\n",
"_____no_output_____"
],
[
"# Changing space to underscore in index name\n\ndf.index.name = 'SALE_DATE'",
"_____no_output_____"
],
[
"# Change column names: replace spaces with underscores\ndf.columns = [col.replace(' ', '_') for col in df]\n\n# SALE_PRICE was read as strings.\n# Remove symbols, convert to integer\ndf['SALE_PRICE'] = (\n df['SALE_PRICE']\n .str.replace('$','')\n .str.replace('-','')\n .str.replace(',','')\n .astype(int)\n)",
"_____no_output_____"
],
[
"# BOROUGH is a numeric column, but arguably should be a categorical feature,\n# so convert it from a number to a string\ndf['BOROUGH'] = df['BOROUGH'].astype(str)",
"_____no_output_____"
],
[
"# Reduce cardinality for NEIGHBORHOOD feature\n\n# Get a list of the top 10 neighborhoods\ntop10 = df['NEIGHBORHOOD'].value_counts()[:10].index\n\n# At locations where the neighborhood is NOT in the top 10, \n# replace the neighborhood with 'OTHER'\ndf.loc[~df['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER'",
"_____no_output_____"
],
[
"print(df.shape)\ndf.head()",
"(23040, 20)\n"
],
[
"# Getting rid of commas from land square ft and converting all values to floats\n\ndf['LAND_SQUARE_FEET'] = df['LAND_SQUARE_FEET'].str.replace(',','')",
"_____no_output_____"
],
[
"df['LAND_SQUARE_FEET'] = df['LAND_SQUARE_FEET'].replace({'': np.NaN, '########': np.NaN})",
"_____no_output_____"
],
[
"df['LAND_SQUARE_FEET'] = df['LAND_SQUARE_FEET'].astype(float)",
"_____no_output_____"
],
[
"df['LAND_SQUARE_FEET'].value_counts()",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nDatetimeIndex: 23040 entries, 2019-01-01 to 2019-04-30\nData columns (total 20 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 BOROUGH 23040 non-null object \n 1 NEIGHBORHOOD 23040 non-null object \n 2 BUILDING_CLASS_CATEGORY 23040 non-null object \n 3 TAX_CLASS_AT_PRESENT 23039 non-null object \n 4 BLOCK 23040 non-null int64 \n 5 LOT 23040 non-null int64 \n 6 EASE-MENT 0 non-null float64\n 7 BUILDING_CLASS_AT_PRESENT 23039 non-null object \n 8 ADDRESS 23040 non-null object \n 9 APARTMENT_NUMBER 5201 non-null object \n 10 ZIP_CODE 23039 non-null float64\n 11 RESIDENTIAL_UNITS 23039 non-null float64\n 12 COMMERCIAL_UNITS 23039 non-null float64\n 13 TOTAL_UNITS 23039 non-null float64\n 14 LAND_SQUARE_FEET 22986 non-null float64\n 15 GROSS_SQUARE_FEET 23039 non-null float64\n 16 YEAR_BUILT 23005 non-null float64\n 17 TAX_CLASS_AT_TIME_OF_SALE 23040 non-null int64 \n 18 BUILDING_CLASS_AT_TIME_OF_SALE 23040 non-null object \n 19 SALE_PRICE 23040 non-null int64 \ndtypes: float64(8), int64(4), object(8)\nmemory usage: 3.7+ MB\n"
],
[
"def wrangle(df):\n # Making a copy of the dataset\n df = df.copy()\n\n # Making a subset of the data where BUILDING_CLASS_CATEGORY == '01 ONE FAMILY \n # DWELLINGS' and the sale price was more than 100 thousand and less than 2 million\n df = df[(df['BUILDING_CLASS_CATEGORY'] == '01 ONE FAMILY DWELLINGS') & \n (df['SALE_PRICE'] > 100000) & \n (df['SALE_PRICE'] < 2000000)]\n\n # Dropping high-cardinality categorical columns\n hc_cols = [col for col in df.select_dtypes('object').columns\n if df[col].nunique() > 11]\n df.drop(columns=hc_cols, inplace=True)\n\n return df",
"_____no_output_____"
],
[
"df = wrangle(df)",
"_____no_output_____"
],
[
"df['TAX_CLASS_AT_TIME_OF_SALE'].value_counts()",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nDatetimeIndex: 3151 entries, 2019-01-01 to 2019-04-30\nData columns (total 18 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 BOROUGH 3151 non-null object \n 1 NEIGHBORHOOD 3151 non-null object \n 2 BUILDING_CLASS_CATEGORY 3151 non-null object \n 3 TAX_CLASS_AT_PRESENT 3151 non-null object \n 4 BLOCK 3151 non-null int64 \n 5 LOT 3151 non-null int64 \n 6 EASE-MENT 0 non-null float64\n 7 APARTMENT_NUMBER 1 non-null object \n 8 ZIP_CODE 3151 non-null float64\n 9 RESIDENTIAL_UNITS 3151 non-null float64\n 10 COMMERCIAL_UNITS 3151 non-null float64\n 11 TOTAL_UNITS 3151 non-null float64\n 12 LAND_SQUARE_FEET 3151 non-null float64\n 13 GROSS_SQUARE_FEET 3151 non-null float64\n 14 YEAR_BUILT 3151 non-null float64\n 15 TAX_CLASS_AT_TIME_OF_SALE 3151 non-null int64 \n 16 BUILDING_CLASS_AT_TIME_OF_SALE 3151 non-null object \n 17 SALE_PRICE 3151 non-null int64 \ndtypes: float64(8), int64(4), object(6)\nmemory usage: 467.7+ KB\n"
],
[
"# Dropping NaN columns, building class column since now they are all the same,\n# and tax class at time of sale column since they are also all identical\n\ndf = df.drop(['BUILDING_CLASS_CATEGORY', 'EASE-MENT', 'APARTMENT_NUMBER', 'TAX_CLASS_AT_TIME_OF_SALE'], axis=1)",
"_____no_output_____"
],
[
"print(df.shape)\ndf.head()",
"(3151, 14)\n"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nDatetimeIndex: 3151 entries, 2019-01-01 to 2019-04-30\nData columns (total 14 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 BOROUGH 3151 non-null object \n 1 NEIGHBORHOOD 3151 non-null object \n 2 TAX_CLASS_AT_PRESENT 3151 non-null object \n 3 BLOCK 3151 non-null int64 \n 4 LOT 3151 non-null int64 \n 5 ZIP_CODE 3151 non-null float64\n 6 RESIDENTIAL_UNITS 3151 non-null float64\n 7 COMMERCIAL_UNITS 3151 non-null float64\n 8 TOTAL_UNITS 3151 non-null float64\n 9 LAND_SQUARE_FEET 3151 non-null float64\n 10 GROSS_SQUARE_FEET 3151 non-null float64\n 11 YEAR_BUILT 3151 non-null float64\n 12 BUILDING_CLASS_AT_TIME_OF_SALE 3151 non-null object \n 13 SALE_PRICE 3151 non-null int64 \ndtypes: float64(7), int64(3), object(4)\nmemory usage: 369.3+ KB\n"
],
[
"# Splitting Data\n\n# splitting into target and feature matrix\ntarget = 'SALE_PRICE'\ny = df[target]\nX = df.drop(columns=target)",
"_____no_output_____"
],
[
"# splitting into training and test sets:\n# Using data from January — March 2019 to train. Using data from April 2019 to test\n\ncutoff = '2019-04-01'\nmask = X.index < cutoff\n\nX_train, y_train = X.loc[mask], y.loc[mask]\nX_test, y_test = X.loc[~mask], y.loc[~mask]",
"_____no_output_____"
],
[
"# Establishing Baseline\n\ny_pred = [y_train.mean()] * len(y_train)",
"_____no_output_____"
],
[
"from sklearn.metrics import mean_absolute_error\n\nprint('Baseline MAE:', mean_absolute_error(y_train, y_pred))",
"Baseline MAE: 214721.52773001452\n"
],
[
"# Applying transformer: OneHotEncoder\n\n# Step 1: Importing the transformer class\nfrom category_encoders import OneHotEncoder, OrdinalEncoder\n\n# Step 2: Instantiating the transformer\nohe = OneHotEncoder(use_cat_names=True)\n\n# Step 3: Fitting my TRAINING data to the transfomer\nohe.fit(X_train)\n\n# Step 4: Transforming\nXT_train = ohe.transform(X_train)",
"_____no_output_____"
],
[
"print(len(XT_train.columns))\nXT_train.columns",
"33\n"
],
[
"print(XT_train.shape)\nXT_train.head()",
"(2507, 33)\n"
],
[
"# Performing feature selection with SelectKBest\n\n# Importing the feature selector utility:\nfrom sklearn.feature_selection import SelectKBest, f_regression\n\n# Creating the selector object with the best k=1 features:\nselector = SelectKBest(score_func=f_regression, k=1)\n\n# Running the selector on the training data:\nXT_train_selected = selector.fit_transform(XT_train, y_train)\n\n# Finding the features that were selected:\nselected_mask = selector.get_support()\nall_features = XT_train.columns\nselected_feature = all_features[selected_mask]\n\nprint('The selected feature: ', selected_feature[0])",
"The selected feature: GROSS_SQUARE_FEET\n"
],
[
"# Scaling the ohe data with StandardScaler:\n\nfrom sklearn.preprocessing import StandardScaler\n\nss = StandardScaler()\nss.fit(XT_train)\nXTT_train = ss.transform(XT_train)",
"_____no_output_____"
],
[
"# Building Ridge Regression Model:\n\nfrom sklearn.linear_model import Ridge\n\nmodel = Ridge(alpha=150)\nmodel.fit(XTT_train, y_train)",
"_____no_output_____"
],
[
"# Checking metrics:\n\nXT_test = ohe.transform(X_test)\nXTT_test = ss.transform(XT_test)\n\nprint('RIDGE train MAE', mean_absolute_error(y_train, model.predict(XTT_train)))\nprint('RIDGE test MAE', mean_absolute_error(y_test, model.predict(XTT_test)))",
"RIDGE train MAE 151103.0875222934\nRIDGE test MAE 155194.34287168915\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d04a82a74319f2378ec79b7c7fb0b4c356bb72b3 | 1,893 | ipynb | Jupyter Notebook | python_exe84.ipynb | caiosouza25/Python-cursoemvideo | 08f93309e2d17c3e011f158328d491ad01dad219 | [
"MIT"
] | null | null | null | python_exe84.ipynb | caiosouza25/Python-cursoemvideo | 08f93309e2d17c3e011f158328d491ad01dad219 | [
"MIT"
] | null | null | null | python_exe84.ipynb | caiosouza25/Python-cursoemvideo | 08f93309e2d17c3e011f158328d491ad01dad219 | [
"MIT"
] | null | null | null | 31.032787 | 88 | 0.437401 | [
[
[
"#leia o nome e peso de varias pessoas e guarde em uma lista no final mostre\n# [A] - quantas pessoas foram cadastradas\n# [B] - uma listagem com as pessoas mais pesadas\n# [C] - uma listagem com as pessoas mais leves\n\npessoas = []\nprinc = []\nleve = pesado = 0\ncontador = 0\nwhile True:\n pessoas.append(str(input('digite seu nome: ')))\n pessoas.append(float(input('digite seu peso: ')))\n if len(princ) == 0:\n pesado = leve = pessoas[1]\n else:\n if pessoas[1] > pesado:\n pesado = pessoas[1]\n if pessoas[1] < leve:\n leve = pessoas[1]\n princ.append(pessoas[:])\n pessoas.clear()\n contador += 1\n continuar = str(input('deseja continuar [S/N]: ')).strip().upper()\n if continuar == 'N':\n break\nprint(f'foram cadastradas {contador} pessoas')\nprint(f'o maior peso foi {pesado}kg')\nfor p in princ:\n if p[1] == pesado:\n print(f'{p[0]}')\nprint(f'o menor peso foi {leve}kg')\nfor p in princ:\n if p[1] == leve:\n print(f'{p[0]}')\n",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code"
]
] |
d04a89407f2be449c065cb63422c2b5e3c7e791d | 27,101 | ipynb | Jupyter Notebook | training dataset/ResNet_colab_zirc_dims_train_model.ipynb | MCSitar/colab-zirc-dims | 1669bbac762e0ee117e27d189313547b8e9ca404 | [
"Apache-2.0"
] | 2 | 2021-11-04T01:15:21.000Z | 2021-11-07T02:27:35.000Z | training dataset/ResNet_colab_zirc_dims_train_model.ipynb | MCSitar/colab-zirc-dims | 1669bbac762e0ee117e27d189313547b8e9ca404 | [
"Apache-2.0"
] | null | null | null | training dataset/ResNet_colab_zirc_dims_train_model.ipynb | MCSitar/colab-zirc-dims | 1669bbac762e0ee117e27d189313547b8e9ca404 | [
"Apache-2.0"
] | null | null | null | 41.438838 | 480 | 0.530349 | [
[
[
"# Zircon model training notebook; (extensively) modified from Detectron2 training tutorial\n\nThis Colab Notebook will allow users to train new models to detect and segment detrital zircon from RL images using Detectron2 and the training dataset provided in the colab_zirc_dims repo. It is set up to train a Mask RCNN model (ResNet depth=101), but could be modified for other instance segmentation models provided that they are supported by Detectron2.\n\nThe training dataset should be uploaded to the user's Google Drive before running this notebook.",
"_____no_output_____"
],
[
"## Install detectron2",
"_____no_output_____"
]
],
[
[
"!pip install pyyaml==5.1\n\nimport torch\nTORCH_VERSION = \".\".join(torch.__version__.split(\".\")[:2])\nCUDA_VERSION = torch.__version__.split(\"+\")[-1]\nprint(\"torch: \", TORCH_VERSION, \"; cuda: \", CUDA_VERSION)\n# Install detectron2 that matches the above pytorch version\n# See https://detectron2.readthedocs.io/tutorials/install.html for instructions\n!pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/$CUDA_VERSION/torch$TORCH_VERSION/index.html\nexit(0) # Automatically restarts runtime after installation",
"_____no_output_____"
],
[
"# Some basic setup:\n# Setup detectron2 logger\nimport detectron2\nfrom detectron2.utils.logger import setup_logger\nsetup_logger()\n\n# import some common libraries\nimport numpy as np\nimport os, json, cv2, random\nfrom google.colab.patches import cv2_imshow\nimport copy\nimport time\nimport datetime\nimport logging\nimport random\nimport shutil\nimport torch\n\n# import some common detectron2 utilities\nfrom detectron2.engine.hooks import HookBase\nfrom detectron2 import model_zoo\nfrom detectron2.evaluation import inference_context, COCOEvaluator\nfrom detectron2.engine import DefaultPredictor\nfrom detectron2.config import get_cfg\nfrom detectron2.utils.visualizer import Visualizer\nfrom detectron2.utils.logger import log_every_n_seconds\nfrom detectron2.data import MetadataCatalog, DatasetCatalog, build_detection_train_loader, DatasetMapper, build_detection_test_loader\nimport detectron2.utils.comm as comm\nfrom detectron2.data import detection_utils as utils\nfrom detectron2.config import LazyConfig\nimport detectron2.data.transforms as T",
"_____no_output_____"
]
],
[
[
"## Define Augmentations",
"_____no_output_____"
],
[
"The cell below defines augmentations used while training to ensure that models never see the same exact image twice during training. This mitigates overfitting and allows models to achieve substantially higher accuracy in their segmentations/measurements.",
"_____no_output_____"
]
],
[
[
"custom_transform_list = [T.ResizeShortestEdge([800,800]), #resize shortest edge of image to 800 pixels\n T.RandomCrop('relative', (0.95, 0.95)), #randomly crop an area (95% size of original) from image\n T.RandomLighting(100), #minor lighting randomization\n T.RandomContrast(.85, 1.15), #minor contrast randomization\n T.RandomFlip(prob=.5, horizontal=False, vertical=True), #random vertical flipping\n T.RandomFlip(prob=.5, horizontal=True, vertical=False), #and horizontal flipping\n T.RandomApply(T.RandomRotation([-30, 30], False), prob=.8), #random (80% probability) rotation up to 30 degrees; \\\n # more rotation does not seem to improve results\n T.ResizeShortestEdge([800,800])] # resize img again for uniformity",
"_____no_output_____"
]
],
[
[
"## Mount Google Drive, set paths to dataset, model saving directories",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"_____no_output_____"
],
[
"#@markdown ### Add path to training dataset directory\ndataset_dir = '/content/drive/MyDrive/training_dataset' #@param {type:\"string\"}\n\n#@markdown ### Add path to model saving directory (automatically created if it does not yet exist)\nmodel_save_dir = '/content/drive/MyDrive/NAME FOR MODEL SAVING FOLDER HERE' #@param {type:\"string\"}\n\nos.makedirs(model_save_dir, exist_ok=True)",
"_____no_output_____"
]
],
[
[
"## Define dataset mapper, training, loss eval functions",
"_____no_output_____"
]
],
[
[
"from detectron2.engine import DefaultTrainer\nfrom detectron2.data import DatasetMapper\nfrom detectron2.structures import BoxMode\n\n# a function to convert Via image annotation .json dict format to Detectron2 \\\n# training input dict format\ndef get_zircon_dicts(img_dir):\n json_file = os.path.join(img_dir, \"via_region_data.json\")\n with open(json_file) as f:\n imgs_anns = json.load(f)['_via_img_metadata']\n\n dataset_dicts = []\n for idx, v in enumerate(imgs_anns.values()):\n record = {}\n \n filename = os.path.join(img_dir, v[\"filename\"])\n height, width = cv2.imread(filename).shape[:2]\n \n record[\"file_name\"] = filename\n record[\"image_id\"] = idx\n record[\"height\"] = height\n record[\"width\"] = width\n \n #annos = v[\"regions\"]\n annos = {}\n for n, eachitem in enumerate(v['regions']):\n annos[str(n)] = eachitem\n objs = []\n for _, anno in annos.items():\n #assert not anno[\"region_attributes\"]\n anno = anno[\"shape_attributes\"]\n px = anno[\"all_points_x\"]\n py = anno[\"all_points_y\"]\n poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)]\n poly = [p for x in poly for p in x]\n\n obj = {\n \"bbox\": [np.min(px), np.min(py), np.max(px), np.max(py)],\n \"bbox_mode\": BoxMode.XYXY_ABS,\n \"segmentation\": [poly],\n \"category_id\": 0,\n }\n objs.append(obj)\n record[\"annotations\"] = objs\n dataset_dicts.append(record)\n return dataset_dicts\n\n# loss eval hook for getting vaidation loss, copying to metrics.json; \\\n# from https://gist.github.com/ortegatron/c0dad15e49c2b74de8bb09a5615d9f6b\nclass LossEvalHook(HookBase):\n def __init__(self, eval_period, model, data_loader):\n self._model = model\n self._period = eval_period\n self._data_loader = data_loader\n \n def _do_loss_eval(self):\n # Copying inference_on_dataset from evaluator.py\n total = len(self._data_loader)\n num_warmup = min(5, total - 1)\n \n start_time = time.perf_counter()\n total_compute_time = 0\n losses = []\n for idx, inputs in enumerate(self._data_loader): \n if idx == num_warmup:\n start_time = time.perf_counter()\n total_compute_time = 0\n start_compute_time = time.perf_counter()\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n total_compute_time += time.perf_counter() - start_compute_time\n iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup)\n seconds_per_img = total_compute_time / iters_after_start\n if idx >= num_warmup * 2 or seconds_per_img > 5:\n total_seconds_per_img = (time.perf_counter() - start_time) / iters_after_start\n eta = datetime.timedelta(seconds=int(total_seconds_per_img * (total - idx - 1)))\n log_every_n_seconds(\n logging.INFO,\n \"Loss on Validation done {}/{}. {:.4f} s / img. ETA={}\".format(\n idx + 1, total, seconds_per_img, str(eta)\n ),\n n=5,\n )\n loss_batch = self._get_loss(inputs)\n losses.append(loss_batch)\n mean_loss = np.mean(losses)\n self.trainer.storage.put_scalar('validation_loss', mean_loss)\n comm.synchronize()\n\n return losses\n \n def _get_loss(self, data):\n # How loss is calculated on train_loop \n metrics_dict = self._model(data)\n metrics_dict = {\n k: v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v)\n for k, v in metrics_dict.items()\n }\n total_losses_reduced = sum(loss for loss in metrics_dict.values())\n return total_losses_reduced\n \n \n def after_step(self):\n next_iter = self.trainer.iter + 1\n is_final = next_iter == self.trainer.max_iter\n if is_final or (self._period > 0 and next_iter % self._period == 0):\n self._do_loss_eval()\n\n#trainer for zircons which incorporates augmentation, hooks for eval\nclass ZirconTrainer(DefaultTrainer):\n \n @classmethod\n def build_train_loader(cls, cfg):\n #return a custom train loader with augmentations; recompute_boxes \\\n # is important given cropping, rotation augs\n return build_detection_train_loader(cfg, mapper=\n DatasetMapper(cfg, is_train=True, recompute_boxes = True,\n augmentations = custom_transform_list\n ),\n )\n\n @classmethod\n def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n return COCOEvaluator(dataset_name, cfg, True, output_folder)\n \n #set up validation loss eval hook\n def build_hooks(self):\n hooks = super().build_hooks()\n hooks.insert(-1,LossEvalHook(\n cfg.TEST.EVAL_PERIOD,\n self.model,\n build_detection_test_loader(\n self.cfg,\n self.cfg.DATASETS.TEST[0],\n DatasetMapper(self.cfg,True)\n )\n ))\n return hooks\n",
"_____no_output_____"
]
],
[
[
"## Import train, val catalogs",
"_____no_output_____"
]
],
[
[
"#registers training, val datasets (converts annotations using get_zircon_dicts)\nfor d in [\"train\", \"val\"]:\n DatasetCatalog.register(\"zircon_\" + d, lambda d=d: get_zircon_dicts(dataset_dir + \"/\" + d))\n MetadataCatalog.get(\"zircon_\" + d).set(thing_classes=[\"zircon\"])\nzircon_metadata = MetadataCatalog.get(\"zircon_train\")\n\ntrain_cat = DatasetCatalog.get(\"zircon_train\")",
"_____no_output_____"
]
],
[
[
"## Visualize train dataset",
"_____no_output_____"
]
],
[
[
"# visualize random sample from training dataset\ndataset_dicts = get_zircon_dicts(os.path.join(dataset_dir, 'train'))\nfor d in random.sample(dataset_dicts, 4): #change int here to change sample size\n img = cv2.imread(d[\"file_name\"])\n visualizer = Visualizer(img[:, :, ::-1], metadata=zircon_metadata, scale=0.5)\n out = visualizer.draw_dataset_dict(d)\n cv2_imshow(out.get_image()[:, :, ::-1])\n",
"_____no_output_____"
]
],
[
[
"# Define save to Drive function",
"_____no_output_____"
]
],
[
[
"# a function to save models (with iteration number in name), metrics to drive; \\\n# important in case training crashes or is left unattended and disconnects. \\\ndef save_outputs_to_drive(model_name, iters):\n root_output_dir = os.path.join(model_save_dir, model_name) #output_dir = save dir from user input\n\n #creates individual model output directory if it does not already exist\n os.makedirs(root_output_dir, exist_ok=True)\n #creates a name for this version of model; include iteration number\n curr_iters_str = str(round(iters/1000, 1)) + 'k'\n curr_model_name = model_name + '_' + curr_iters_str + '.pth'\n model_save_pth = os.path.join(root_output_dir, curr_model_name)\n\n #get most recent model, current metrics, copy to drive\n model_path = os.path.join(cfg.OUTPUT_DIR, \"model_final.pth\")\n metrics_path = os.path.join(cfg.OUTPUT_DIR, 'metrics.json')\n shutil.copy(model_path, model_save_pth)\n shutil.copy(metrics_path, root_output_dir)",
"_____no_output_____"
]
],
[
[
"## Build, train model\n",
"_____no_output_____"
],
[
"### Set some parameters for training",
"_____no_output_____"
]
],
[
[
"#@markdown ### Add a base name for the model\nmodel_save_name = 'your model name here' #@param {type:\"string\"}\n\n#@markdown ### Final iteration before training stops\nfinal_iteration = 8000 #@param {type:\"slider\", min:3000, max:15000, step:1000}",
"_____no_output_____"
]
],
[
[
"### Actually build and train model",
"_____no_output_____"
]
],
[
[
"#train from a pre-trained Mask RCNN model\ncfg = get_cfg()\n\n# train from base model: Default Mask RCNN\ncfg.merge_from_file(model_zoo.get_config_file(\"COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml\"))\n# Load starting weights (COCO trained) from Detectron2 model zoo.\ncfg.MODEL.WEIGHTS = \"https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x/138205316/model_final_a3ec72.pkl\"\n\n\ncfg.DATASETS.TRAIN = (\"zircon_train\",) #load training dataset\ncfg.DATASETS.TEST = (\"zircon_val\",) # load validation dataset\ncfg.DATALOADER.NUM_WORKERS = 2\ncfg.SOLVER.IMS_PER_BATCH = 2 #2 ims per batch seems to be good for model generalization\ncfg.SOLVER.BASE_LR = 0.00025 # low but reasonable learning rate given pre-training; \\\n # by default initializes with a 1000 iteration warmup\ncfg.SOLVER.MAX_ITER = 2000 #train for 2000 iterations before 1st save\ncfg.SOLVER.GAMMA = 0.5\n\n#decay learning rate by factor of GAMMA every 1000 iterations after 2000 iterations \\\n# and until 10000 iterations This works well for current version of training \\\n# dataset but should be modified (probably a longer interval) if dataset is ever\\\n# extended.\ncfg.SOLVER.STEPS = (1999, 2999, 3999, 4999, 5999, 6999, 7999, 8999, 9999)\n\ncfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 # use default ROI heads batch size\ncfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # only class here is zircon\n\ncfg.MODEL.RPN.NMS_THRESH = 0.1 #sets NMS threshold lower than default; should(?) eliminate overlapping regions\ncfg.TEST.EVAL_PERIOD = 200 # validation eval every 200 iterations\n\nos.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\ntrainer = ZirconTrainer(cfg) #our zircon trainer, w/ built-in augs and val loss eval\ntrainer.resume_or_load(resume=False)\ntrainer.train() #start training\n\n# stop training and save for the 1st time after 2000 iterations\nsave_outputs_to_drive(model_save_name, 2000)\n\n# Saves, cold restarts training from saved model weights every 1000 iterations \\\n# until final iteration. This should probably be done via hooks without stopping \\\n# training but *seems* to produce faster decrease in validation loss.\nfor each_iters in [iter*1000 for iter in list(range(3, \n int(final_iteration/1000) + 1,\n 1))]:\n #reload model with last iteration model weights\n resume_model_path = os.path.join(cfg.OUTPUT_DIR, \"model_final.pth\")\n cfg.MODEL.WEIGHTS = resume_model_path\n cfg.SOLVER.MAX_ITER = each_iters #increase max iterations\n trainer = ZirconTrainer(cfg)\n trainer.resume_or_load(resume=True)\n trainer.train() #restart training\n #save again\n save_outputs_to_drive(model_save_name, each_iters)",
"_____no_output_____"
],
[
"# open tensorboard training metrics curves (metrics.json):\n%load_ext tensorboard\n%tensorboard --logdir output",
"_____no_output_____"
]
],
[
[
"## Inference & evaluation with final trained model\n\n",
"_____no_output_____"
],
[
"Initialize model from saved weights:",
"_____no_output_____"
]
],
[
[
"cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, \"model_final.pth\") # final model; modify path to other non-final model to view their segmentations\ncfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set a custom testing threshold\ncfg.MODEL.RPN.NMS_THRESH = 0.1\npredictor = DefaultPredictor(cfg)",
"_____no_output_____"
]
],
[
[
"View model segmentations for random sample of images from zircon validation dataset:",
"_____no_output_____"
]
],
[
[
"from detectron2.utils.visualizer import ColorMode\ndataset_dicts = get_zircon_dicts(os.path.join(dataset_dir, 'val'))\nfor d in random.sample(dataset_dicts, 5): \n im = cv2.imread(d[\"file_name\"])\n outputs = predictor(im) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format\n v = Visualizer(im[:, :, ::-1],\n metadata=zircon_metadata, \n scale=1.5, \n instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models\n )\n out = v.draw_instance_predictions(outputs[\"instances\"].to(\"cpu\"))\n cv2_imshow(out.get_image()[:, :, ::-1])",
"_____no_output_____"
]
],
[
[
"Validation eval with COCO API metric:",
"_____no_output_____"
]
],
[
[
"from detectron2.evaluation import COCOEvaluator, inference_on_dataset\nfrom detectron2.data import build_detection_test_loader\nevaluator = COCOEvaluator(\"zircon_val\", (\"bbox\", \"segm\"), False, output_dir=\"./output/\")\nval_loader = build_detection_test_loader(cfg, \"zircon_val\")\nprint(inference_on_dataset(trainer.model, val_loader, evaluator))",
"_____no_output_____"
]
],
[
[
"## Final notes:\n\nTo use newly-trained models in colab_zirc_dims:\n\n#### Option A:\nModify the cell that initializes model(s) in colab_zirc_dims processing notebooks:\n```\ncfg.merge_from_file(model_zoo.get_config_file(DETECTRON2 BASE CONFIG FILE LINK FOR YOUR MODEL HERE))\ncfg.MODEL.RESNETS.DEPTH = RESNET DEPTH FOR YOUR MODEL (E.G., 101) HERE\ncfg.MODEL.WEIGHTS = PATH TO YOUR MODEL IN YOUR GOOGLE DRIVE HERE\n```\n\n#### Option B (more complicated but potentially useful for many models):\nThe dynamic model selection tool in colab_zirc_dims is populated from a .json file model library dictionary, which is by default [the current version on the GitHub repo.](https://github.com/MCSitar/colab_zirc_dims/blob/main/czd_model_library.json) The 'url' key in the dict will work with either an AWS download link for the model or the path to model in your Google Drive.\n\nTo use a custom model library dictionary:\nModify a copy of the colab_zirc_dims [.json file model library dictionary](https://github.com/MCSitar/colab_zirc_dims/blob/main/czd_model_library.json) to include download link(s)/Drive path(s) and metadata (e.g., resnet depth and config file) for your model(s). Upload this .json file to your Google Drive and change the 'model_lib_loc' variable in a processing Notebook to the .json's path for dynamic download and loading of this and other models within the Notebook.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d04a8b5609315018c7c59cf1f7f3bf52f1c5cc62 | 864,444 | ipynb | Jupyter Notebook | notebooks/.ipynb_checkpoints/Analysis-checkpoint.ipynb | mmData/Hack4Good | 61d79a4c56872ed5fabb280b8e9f91c4c4f71ece | [
"RSA-MD"
] | null | null | null | notebooks/.ipynb_checkpoints/Analysis-checkpoint.ipynb | mmData/Hack4Good | 61d79a4c56872ed5fabb280b8e9f91c4c4f71ece | [
"RSA-MD"
] | null | null | null | notebooks/.ipynb_checkpoints/Analysis-checkpoint.ipynb | mmData/Hack4Good | 61d79a4c56872ed5fabb280b8e9f91c4c4f71ece | [
"RSA-MD"
] | null | null | null | 849.159136 | 186,228 | 0.950627 | [
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sb\n%matplotlib inline",
"_____no_output_____"
],
[
"from sklearn.utils.multiclass import unique_labels\nfrom sklearn.metrics import confusion_matrix\n\ndef plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n \n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n\n fig, ax = plt.subplots(figsize=(14,7))\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.grid(False)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax",
"_____no_output_____"
]
],
[
[
"# Analysis",
"_____no_output_____"
]
],
[
[
"# Prepare data\ndemographic = pd.read_csv('../data/processed/demographic.csv')\nseverity = pd.read_csv('../data/processed/severity.csv', index_col=0)\nfeatures = demographic.columns \nX = demographic.astype(np.float64)\ny = (severity >= 4).sum(axis=1)",
"_____no_output_____"
],
[
"needs_to_label = {0:'no needs', 1:'low_needs', 2:'moderate needs', 3:'high needs', 4:'very high needs'}\nlabels = [\"no needs\", \"low needs\", \"moderate needs\", \"high needs\", \"very high needs\"]\nseverity_to_needs = {0:0, 1:1, 2:1, 3:2, 4:2, 5:3, 6:3, 7:4, 8:4}\ny = np.array([severity_to_needs[i] for i in y])\n# Color vector, for illustration purposes\ncolors = {0:'b', 1:'r', 2:'g', 3:'c', 4:'y'}\ny_c = np.array([colors[i] for i in y])",
"_____no_output_____"
]
],
[
[
"## Understanding the features",
"_____no_output_____"
]
],
[
[
"from yellowbrick.features import Rank2D\nfrom yellowbrick.features.manifold import Manifold\nfrom yellowbrick.features.pca import PCADecomposition",
"_____no_output_____"
],
[
"from yellowbrick.style import set_palette\nset_palette('flatui')",
"_____no_output_____"
]
],
[
[
"### Feature covariance plot",
"_____no_output_____"
]
],
[
[
"visualizer = Rank2D(algorithm='covariance')\nvisualizer.fit(X, y)\nvisualizer.transform(X)\nvisualizer.poof()",
"/home/muhadriy/.conda/envs/ml/lib/python3.6/site-packages/yellowbrick/features/rankd.py:262: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n X = X.as_matrix()\n"
]
],
[
[
"### Principal Component Projection",
"_____no_output_____"
]
],
[
[
"visualizer = PCADecomposition(scale=True, color = y_c, proj_dim=3)\nvisualizer.fit_transform(X, y)\nvisualizer.poof()",
"_____no_output_____"
]
],
[
[
"### Manifold projections",
"_____no_output_____"
]
],
[
[
"visualizer = Manifold(manifold='tsne', target='discrete')\nvisualizer.fit_transform(X, y)\nvisualizer.poof()",
"_____no_output_____"
],
[
"visualizer = Manifold(manifold='modified', target='discrete')\nvisualizer.fit_transform(X, y)\nvisualizer.poof()",
"_____no_output_____"
]
],
[
[
"No apparent structure from the PCA and Manifold projections.",
"_____no_output_____"
],
[
"### Class Balance",
"_____no_output_____"
]
],
[
[
"categories, counts = np.unique(y, return_counts=True)\nfig, ax = plt.subplots(figsize=(9, 7))\nsb.set(style=\"whitegrid\")\nsb.barplot(labels, counts, ax=ax, tick_label=labels)\nax.set(xlabel='Need Categories',\n ylabel='Number of HHs');",
"_____no_output_____"
]
],
[
[
"Heavy class imbalances. Use appropriate scoring metrics/measures.",
"_____no_output_____"
],
[
"### Learning and Validation",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import StratifiedKFold\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import RidgeClassifier\nfrom yellowbrick.model_selection import LearningCurve",
"_____no_output_____"
],
[
"cv = StratifiedKFold(10)\nsizes = np.linspace(0.1, 1., 20)",
"_____no_output_____"
],
[
"visualizer = LearningCurve(RidgeClassifier(), cv=cv, train_sizes=sizes, \n scoring='balanced_accuracy', n_jobs=-1)\nvisualizer.fit(X,y)\nvisualizer.poof()",
"_____no_output_____"
],
[
"visualizer = LearningCurve(GaussianNB(), cv=cv, train_sizes=sizes, \n scoring='balanced_accuracy', n_jobs=-1)\nvisualizer.fit(X,y)\nvisualizer.poof()",
"_____no_output_____"
]
],
[
[
"### Classification",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import RidgeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import VotingClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils.class_weight import compute_class_weight\nfrom imblearn.metrics import classification_report_imbalanced\nfrom sklearn.metrics import balanced_accuracy_score",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42, stratify=y)\ncv_ = StratifiedKFold(5)\nclass_weights = compute_class_weight(class_weight='balanced', classes= np.unique(y), y=y)",
"_____no_output_____"
],
[
"clf = RidgeClassifier()\nclf.fit(X_train, y_train)\ny_pred = clf.predict(X_test)\nprint('Balanced accuracy: {:.2f}'.format(balanced_accuracy_score(y_test,y_pred)))\nprint('Classification report: ')\nprint(classification_report_imbalanced(y_test, y_pred, target_names=labels))\nplot_confusion_matrix(y_test, y_pred, classes=np.unique(y), normalize=True)",
"Balanced accuracy: 0.23\nClassification report: \n pre rec spe f1 geo iba sup\n\n no needs 0.00 0.00 1.00 0.00 0.00 0.00 63\n low needs 0.55 0.11 0.97 0.18 0.33 0.10 594\n moderate needs 0.50 0.88 0.18 0.64 0.40 0.17 1258\n high needs 0.41 0.17 0.92 0.24 0.40 0.15 655\nvery high needs 0.00 0.00 1.00 0.00 0.00 0.00 25\n\n avg / total 0.47 0.50 0.58 0.41 0.37 0.14 2595\n\nNormalized confusion matrix\n"
],
[
"clf = KNeighborsClassifier()\nclf.fit(X_train, y_train)\ny_pred = clf.predict(X_test)\nprint('Balanced accuracy: {:.2f}'.format(balanced_accuracy_score(y_test,y_pred)))\nprint('Classification report: ')\nprint(classification_report_imbalanced(y_test, y_pred, target_names=labels))\nplot_confusion_matrix(y_test, y_pred, classes=np.unique(y), normalize=True)",
"Balanced accuracy: 0.22\nClassification report: \n pre rec spe f1 geo iba sup\n\n no needs 0.05 0.02 0.99 0.02 0.13 0.01 63\n low needs 0.29 0.31 0.77 0.30 0.49 0.23 594\n moderate needs 0.49 0.61 0.39 0.54 0.49 0.25 1258\n high needs 0.32 0.17 0.88 0.22 0.38 0.14 655\nvery high needs 0.00 0.00 1.00 0.00 0.00 0.00 25\n\n avg / total 0.38 0.41 0.62 0.39 0.45 0.21 2595\n\nNormalized confusion matrix\n"
],
[
"clf = GaussianNB()\nclf.fit(X_train, y_train)\ny_pred = clf.predict(X_test)\nprint('Balanced accuracy: {:.2f}'.format(balanced_accuracy_score(y_test,y_pred)))\nprint('Classification report: ')\nprint(classification_report_imbalanced(y_test, y_pred, target_names=labels))\nplot_confusion_matrix(y_test, y_pred, classes=np.unique(y), normalize=True)",
"Balanced accuracy: 0.34\nClassification report: \n pre rec spe f1 geo iba sup\n\n no needs 0.09 0.49 0.87 0.15 0.65 0.41 63\n low needs 0.29 0.32 0.77 0.30 0.49 0.23 594\n moderate needs 0.64 0.01 1.00 0.01 0.07 0.00 1258\n high needs 0.35 0.01 0.99 0.02 0.11 0.01 655\nvery high needs 0.01 0.88 0.40 0.03 0.59 0.37 25\n\n avg / total 0.47 0.10 0.94 0.08 0.20 0.07 2595\n\nNormalized confusion matrix\n"
],
[
"clf = ExtraTreesClassifier()\nclf.fit(X_train, y_train)\ny_pred = clf.predict(X_test)\nprint('Balanced accuracy: {:.2f}'.format(balanced_accuracy_score(y_test,y_pred)))\nprint('Classification report: ')\nprint(classification_report_imbalanced(y_test, y_pred, target_names=labels))\nplot_confusion_matrix(y_test, y_pred, classes=np.unique(y), normalize=True)",
"Balanced accuracy: 0.27\nClassification report: \n pre rec spe f1 geo iba sup\n\n no needs 0.23 0.16 0.99 0.19 0.40 0.14 63\n low needs 0.31 0.29 0.81 0.30 0.48 0.22 594\n moderate needs 0.50 0.58 0.46 0.54 0.52 0.27 1258\n high needs 0.37 0.31 0.83 0.34 0.50 0.24 655\nvery high needs 0.00 0.00 1.00 0.00 0.00 0.00 25\n\n avg / total 0.41 0.43 0.65 0.42 0.50 0.25 2595\n\nNormalized confusion matrix\n"
],
[
"clf = GradientBoostingClassifier()\nclf.fit(X_train, y_train)\ny_pred = clf.predict(X_test)\nprint('Balanced accuracy: {:.2f}'.format(balanced_accuracy_score(y_test,y_pred)))\nprint('Classification report: ')\nprint(classification_report_imbalanced(y_test, y_pred, target_names=labels))\nplot_confusion_matrix(y_test, y_pred, classes=np.unique(y), normalize=True)",
"Balanced accuracy: 0.25\nClassification report: \n pre rec spe f1 geo iba sup\n\n no needs 0.20 0.02 1.00 0.03 0.13 0.01 63\n low needs 0.52 0.18 0.95 0.27 0.42 0.16 594\n moderate needs 0.51 0.84 0.24 0.64 0.45 0.21 1258\n high needs 0.47 0.22 0.92 0.30 0.45 0.19 655\nvery high needs 0.00 0.00 1.00 0.00 0.00 0.00 25\n\n avg / total 0.49 0.51 0.60 0.45 0.43 0.19 2595\n\nNormalized confusion matrix\n"
]
],
[
[
"## Voting Classifier",
"_____no_output_____"
],
[
"### Hard Voting",
"_____no_output_____"
]
],
[
[
"clf1 = KNeighborsClassifier(weights='distance')\nclf2 = GaussianNB()\nclf3 = ExtraTreesClassifier(class_weight='balanced_subsample')\nclf4 = GradientBoostingClassifier()\nvote = VotingClassifier(estimators=[('knn', clf1), ('gnb', clf2), ('ext', clf3), ('gb', clf4)], voting='hard')",
"_____no_output_____"
],
[
"params = {'knn__n_neighbors': [2,3,4], 'gb__n_estimators':[50,100,200],\n 'gb__max_depth':[3,5,7], 'ext__n_estimators': [50,100,200]}\nscoring_fns = ['f1_weighted', 'balanced_accuracy']",
"_____no_output_____"
],
[
"grid = GridSearchCV(estimator=vote, param_grid=params, cv=cv_, \n verbose=2, n_jobs=-1, scoring=scoring_fns, refit='balanced_accuracy')",
"_____no_output_____"
],
[
"grid.fit(X_train, y_train)\ny_pred = grid.predict(X_test)\nprint('Balanced accuracy: {:.2f}'.format(balanced_accuracy_score(y_test,y_pred)))\nprint('Classification report: ')\nprint(classification_report_imbalanced(y_test, y_pred, target_names=labels))\nplot_confusion_matrix(y_test, y_pred, classes=np.unique(y), normalize=True)",
"Fitting 5 folds for each of 81 candidates, totalling 405 fits\n"
],
[
"clf1 = KNeighborsClassifier(weights='distance')\nclf2 = GaussianNB()\nclf3 = ExtraTreesClassifier(class_weight='balanced_subsample')\nclf4 = GradientBoostingClassifier()\nvote = VotingClassifier(estimators=[('knn', clf1), ('gnb', clf2), ('ext', clf3), ('gb', clf4)], voting='soft')",
"_____no_output_____"
],
[
"params = {'knn__n_neighbors': [2,3,4], 'gb__n_estimators':[50,100,200],\n 'gb__max_depth':[3,5,7], 'ext__n_estimators': [50,100,200]}\nscoring_fns = ['f1_weighted', 'balanced_accuracy']",
"_____no_output_____"
],
[
"grid_soft = GridSearchCV(estimator=vote, param_grid=params, cv=cv_, \n verbose=2, n_jobs=-1, scoring=scoring_fns, refit='balanced_accuracy')",
"_____no_output_____"
],
[
"grid_soft.fit(X_train, y_train)\ny_pred = grid_soft.predict(X_test)\nprint('Balanced accuracy: {:.2f}'.format(balanced_accuracy_score(y_test,y_pred)))\nprint('Classification report: ')\nprint(classification_report_imbalanced(y_test, y_pred, target_names=labels))\nplot_confusion_matrix(y_test, y_pred, classes=np.unique(y), normalize=True)",
"Fitting 5 folds for each of 81 candidates, totalling 405 fits\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d04a93d78698f302caf4e324ed857f84cc4687f1 | 1,025,009 | ipynb | Jupyter Notebook | Model History/Adi_iter6/Adi Iter 6.ipynb | dattasiddhartha/DataX-NeuralDecisionMaking | 68b0d79954d308c28febe07844674d9c034877d4 | [
"MIT"
] | null | null | null | Model History/Adi_iter6/Adi Iter 6.ipynb | dattasiddhartha/DataX-NeuralDecisionMaking | 68b0d79954d308c28febe07844674d9c034877d4 | [
"MIT"
] | null | null | null | Model History/Adi_iter6/Adi Iter 6.ipynb | dattasiddhartha/DataX-NeuralDecisionMaking | 68b0d79954d308c28febe07844674d9c034877d4 | [
"MIT"
] | null | null | null | 78.262885 | 486,992 | 0.663313 | [
[
[
"## Import packages",
"_____no_output_____"
]
],
[
[
"import warnings\nwarnings.filterwarnings(\"ignore\")",
"_____no_output_____"
],
[
"import pandas as pd\n\n# general packages\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport seaborn as sns\n\n# sklearn models\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\n\n# mne\nimport mne\nimport pickle\nfrom mne.datasets import sample\nfrom mne.decoding import (SlidingEstimator, GeneralizingEstimator,\n cross_val_multiscore, LinearModel, get_coef)",
"_____no_output_____"
]
],
[
[
"## sklearn models",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split\nfrom sklearn import linear_model\nfrom sklearn.metrics import confusion_matrix\n\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.tree import DecisionTreeClassifier",
"_____no_output_____"
]
],
[
[
"## Load preprocessed data",
"_____no_output_____"
]
],
[
[
"with open(os.path.join('data','Xdict.pickle'),'rb') as handle1:\n Xdict = pickle.load(handle1)\n \nwith open(os.path.join('data','ydict.pickle'),'rb') as handle2:\n ydict = pickle.load(handle2)",
"_____no_output_____"
],
[
"subjects = list(set(Xdict.keys()))",
"_____no_output_____"
]
],
[
[
"# FEATURE ENGINEERING",
"_____no_output_____"
],
[
"### Need to first make a master dataframe for the 5,6 numbers with corresponding result for all subjects compiled",
"_____no_output_____"
]
],
[
[
"s01 = ydict[1]\ndf1 = pd.DataFrame(s01, columns=['Result'])\ndf1['Subject'] = 1\ndf1['Time Series'] = [series[:-52] for series in Xdict[1].tolist()]\ndf1['Psd'] = [series[950:] for series in Xdict[1].tolist()]\ndf1\n\ns02 = ydict[2]\ndf2 = pd.DataFrame(s02, columns=['Result'])\ndf2['Subject'] = 2\ndf2['Time Series'] = [series[:-52] for series in Xdict[2].tolist()]\ndf2['Psd'] = [series[950:] for series in Xdict[2].tolist()]\ndf2\n\ns03 = ydict[3]\ndf3 = pd.DataFrame(s03, columns=['Result'])\ndf3['Subject'] = 3\ndf3['Time Series'] = [series[:-52] for series in Xdict[3].tolist()]\ndf3['Psd'] = [series[950:] for series in Xdict[3].tolist()]\ndf3\n\ns04 = ydict[4]\ndf4 = pd.DataFrame(s04, columns=['Result'])\ndf4['Subject'] = 4\ndf4['Time Series'] = [series[:-52] for series in Xdict[4].tolist()]\ndf4['Psd'] = [series[950:] for series in Xdict[4].tolist()]\ndf4\n\ns05 = ydict[5]\ndf5 = pd.DataFrame(s05, columns=['Result'])\ndf5['Subject'] = 5\ndf5['Time Series'] = [series[:-52] for series in Xdict[5].tolist()]\ndf5['Psd'] = [series[950:] for series in Xdict[5].tolist()]\ndf5\n\ns06 = ydict[6]\ndf6 = pd.DataFrame(s06, columns=['Result'])\ndf6['Subject'] = 6\ndf6['Time Series'] = [series[:-52] for series in Xdict[6].tolist()]\ndf6['Psd'] = [series[950:] for series in Xdict[6].tolist()]\ndf6\n\ns07 = ydict[7]\ndf7 = pd.DataFrame(s07, columns=['Result'])\ndf7['Subject'] = 7\ndf7['Time Series'] = [series[:-52] for series in Xdict[7].tolist()]\ndf7['Psd'] = [series[950:] for series in Xdict[7].tolist()]\ndf7\n\ns08 = ydict[8]\ndf8 = pd.DataFrame(s08, columns=['Result'])\ndf8['Subject'] = 8\ndf8['Time Series'] = [series[:-52] for series in Xdict[8].tolist()]\ndf8['Psd'] = [series[950:] for series in Xdict[8].tolist()]\ndf8\n\ns09 = ydict[9]\ndf9 = pd.DataFrame(s09, columns=['Result'])\ndf9['Subject'] = 9\ndf9['Time Series'] = [series[:-52] for series in Xdict[9].tolist()]\ndf9['Psd'] = [series[950:] for series in Xdict[9].tolist()]\ndf9\n\ns10 = ydict[10]\ndf10 = pd.DataFrame(s10, columns=['Result'])\ndf10['Subject'] = 10\ndf10['Time Series'] = [series[:-52] for series in Xdict[10].tolist()]\ndf10['Psd'] = [series[950:] for series in Xdict[10].tolist()]\n",
"_____no_output_____"
],
[
"frames = [df1, df2, df3, df4, df5, df6, df7, df8, df9, df10]\nresultframe = pd.concat(frames)\n\nresultframe = resultframe.reset_index().drop('index', axis=1)\nresultframe",
"_____no_output_____"
]
],
[
[
"Splitting the psd into 52 different columns so each value can be used as a feature:",
"_____no_output_____"
]
],
[
[
"resultframe[['psd'+str(i) for i in range(1,53)]] = pd.DataFrame(resultframe.Psd.values.tolist(), index= resultframe.index)\nresultframe = resultframe.drop('Psd', axis=1)\nresultframe.head()",
"_____no_output_____"
]
],
[
[
"### Assuming the merged table is formed correctly, we now have our outcomes ('Results') and their corresponding first 950 time points series data, and subject information. We no longer have information regarding which electrode collected the data (irrelevant since no biological correspondence), however, if needed, we can still filter by subject as we retain that data. \n\n#### NOTE: This table is only for the 5,6 first number trials as it is in that scenario the patient has the ability to \"Gamble\". \n\n#### NOTE: One of the disadvantages of compiling all patient data and not separating by subject is that we are ignoring behavioral characteristics (risk aversion and risk loving) and rather finding common trends in the time series data regardless of personal characteristics.\n\n#### NEED TO CHECK: Are all electrode data included for each patient? Is the corresponding Result matched with respective time series? Currently, I will proceed relying on the dictionary Kata made and will assume the order and correspondence is proper.",
"_____no_output_____"
],
[
"## Dataset Characteristics/Confirming master dataframe created above:",
"_____no_output_____"
]
],
[
[
"countframe = resultframe.groupby(\"Subject\").count().drop('Time Series', axis=1).drop(['psd'+str(i) for i in range(1,53)], axis=1)\ncountframe",
"_____no_output_____"
],
[
"plt.bar(countframe.index, countframe['Result'])\nplt.xlabel('Subject')\nplt.ylabel('Count')\nplt.title('Number of Entries per subject')\nplt.show();",
"_____no_output_____"
]
],
[
[
"#### Note: Number of Entries = Number of trials with first number as 5,6 * Number of electrodes for the subject\n\nIn preprocessing notebook, we determined the number of electrodes per subject to be as followed:",
"_____no_output_____"
]
],
[
[
"subject = [1,2,3,4,5,6,7,8,9,10]\nelectrodes = [5,6,59,5,61,7,11,10,19,16]\nelecframe = pd.DataFrame(data={'Subject': subject, 'Num Electrode' : electrodes})\nelecframe",
"_____no_output_____"
]
],
[
[
"In preprocessing notebook, we also determined the number of trials with 5 and 6 (in cleaned table, excluding all types of bad trials):",
"_____no_output_____"
]
],
[
[
"subject = [1,2,3,4,5,6,7,8,9,10]\nnum5 = [23, 24, 24, 12, 21, 22, 21, 24, 24, 16]\nnum6 = [20, 23, 24, 18, 21, 24, 22, 24, 24, 18]\n\ntrialframe = pd.DataFrame(data={'Subject': subject, 'Num 5': num5, 'Num 6': num6})\ntrialframe['Num Total Trials'] = trialframe['Num 5'] + trialframe['Num 6']\ntrialframe = trialframe.drop(['Num 5', 'Num 6'], axis=1)\ntrialframe",
"_____no_output_____"
]
],
[
[
"Merging the two tables together:",
"_____no_output_____"
]
],
[
[
"confframe = pd.concat([elecframe, trialframe.drop('Subject', axis=1)], axis=1)\nconfframe['Expected Entries'] = confframe['Num Electrode'] * confframe['Num Total Trials']\nconfframe",
"_____no_output_____"
],
[
"checkframe = pd.merge(confframe, countframe, how='inner', left_on='Subject', right_index=True)\ncheckframe",
"_____no_output_____"
]
],
[
[
"We now confirmed that our expected number of entries per subject matches the actual number of entries we obtained in the master dataframe created above. This indicates that the table above is likely created properly and it is safe to use it for further analysis.\n\nNext, we need to understand the characteristics of our dataset, mainly to understand the probability of obtaining a correct prediction due to chance.",
"_____no_output_____"
]
],
[
[
"outframe = resultframe.groupby('Result').count().drop('Time Series', axis=1).drop(['psd'+str(i) for i in range(1,53)], axis=1).rename(index=str, columns={'Subject':'Count'})\noutframe",
"_____no_output_____"
]
],
[
[
"We can observe that the distribution is not even between the two possible outcomes so we need to be careful when assessing the performance of our model. We will next calculate the prediction power of chance:",
"_____no_output_____"
]
],
[
[
"total = sum(outframe['Count'])\noutframe['Probability'] = outframe['Count']/total\noutframe",
"_____no_output_____"
]
],
[
[
"We can observe that the probability of getting a correct prediction due to purely chance is 56.988% (~57%) so we need to design a prediction model that performs better than this. We will now move on to feature engineering to create new features.",
"_____no_output_____"
],
[
"## Making new features:",
"_____no_output_____"
],
[
"We currently have 52 power spectral density (psd) features obtained from preprocessed file. Need to create new features from our time series data",
"_____no_output_____"
]
],
[
[
"resultframe.head()",
"_____no_output_____"
],
[
"resultframe['Max'] = [max(i) for i in resultframe['Time Series']]\nresultframe['Min'] = [min(i) for i in resultframe['Time Series']]\nresultframe['Std'] = [np.std(i) for i in resultframe['Time Series']]\nresultframe['Mean'] = [np.mean(i) for i in resultframe['Time Series']]\nresultframe['p2.5'] = [np.percentile(i, 2.5) for i in resultframe['Time Series']]\nresultframe['p97.5'] = [np.percentile(i, 97.5) for i in resultframe['Time Series']]\nresultframe.head()",
"_____no_output_____"
]
],
[
[
"Changing entries of \"Result\"\n\nSafebet = 0, Gamble = 1:",
"_____no_output_____"
]
],
[
[
"resultframe['Result'] = resultframe['Result'].map({'Safebet': 0, 'Gamble': 1})\nresultframe.head()",
"_____no_output_____"
]
],
[
[
"We should center all our data to 0.0 since we care about relative wave form and not baseline amplitude. The difference in baseline amplitude can be ascribed to hardware differences (electrode readings) and should not be considered in our predictive model. Thus, we need to adapt our features above by centering the values around 0.0. Hence, mean is dropped as a feature and a new feature \"Interval\" which is max-min is introduced.\n\nInterval = Max - Min\n\nPercentile 2.5 and Percentile 97.5 values were determined as features above. Now, a new feature is going to be introduced \"Percentile Interval\" which is the difference between the two values. \n\nPercentile Interval = p97.5 - p2.5",
"_____no_output_____"
]
],
[
[
"resultframe['Max'] = resultframe['Max'] - resultframe['Mean']\nresultframe['Min'] = resultframe['Min'] - resultframe['Mean']\nresultframe['p2.5'] = resultframe['p2.5'] - resultframe['Mean']\nresultframe['p97.5'] = resultframe['p97.5'] - resultframe['Mean']\nresultframe['Mean'] = resultframe['Mean'] - resultframe['Mean']\nresultframe['Interval'] = resultframe['Max'] - resultframe['Min']\nresultframe['Percentile Interval'] = resultframe['p97.5'] - resultframe['p2.5']\n#resultframe = resultframe[['Subject', 'Time Series', 'Max', 'Min', 'Std', 'Interval', 'p2.5', 'p97.5', 'Percentile Interval', 'Result']]\nresultframe",
"_____no_output_____"
]
],
[
[
"Since all the features currently in place are statistics that do not respect the temporal nature of our data (time-series data), we need to introduce features that also respect the morphology of the waves in the data. An example feature is number of peaks.\n\nNumber of peaks = number of data points i where i > i-1 and i > i+1 and will not include the i=0 and i=949 entries",
"_____no_output_____"
]
],
[
[
"peaks = []\n\nfor series in resultframe['Time Series']:\n no_peaks = 0 \n indices = range(2,949)\n for index in indices:\n if series[index] > series[index-1] and series[index] > series[index+1]:\n no_peaks += 1\n peaks.append(no_peaks)\n \nlen(peaks)",
"_____no_output_____"
],
[
"resultframe['Num Peaks'] = peaks\nresultframe.head()",
"_____no_output_____"
],
[
"#resultframe = resultframe[['Subject', 'Time Series', 'Max', 'Min', 'Interval', 'Std', 'p2.5', 'p97.5', 'Percentile Interval', 'Num Peaks', 'Result']]\n#resultframe.head()",
"_____no_output_____"
]
],
[
[
"#### Categorizing all our data",
"_____no_output_____"
]
],
[
[
"resultframe['Num Peaks Cat'] = pd.cut(resultframe['Num Peaks'], 4,labels=[1,2,3,4])\n#resultframe = resultframe[['Subject', 'Time Series', 'Max', 'Min', 'Interval', 'Std', 'p2.5', 'p97.5', 'Percentile Interval', 'Num Peaks', 'Num Peaks Cat', 'Result']]\nresultframe.head()",
"_____no_output_____"
],
[
"resultframe['p2.5 Cat'] = pd.qcut(resultframe['p2.5'], 3,labels=[1,2,3])\nresultframe['p97.5 Cat'] = pd.qcut(resultframe['p97.5'], 3,labels=[1,2,3])\nresultframe['Std Cat'] = pd.qcut(resultframe['Std'], 3,labels=[1,2,3])\nresultframe['Percentile Interval Cat'] = pd.qcut(resultframe['Percentile Interval'], 3,labels=[1,2,3])\n\n#resultframe = resultframe[['Subject', 'Time Series', 'Max', 'Min', 'Interval', 'Std', 'p2.5', 'p97.5', 'Percentile Interval', 'Num Peaks', 'Num Peaks Cat', 'p2.5 Cat', 'p97.5 Cat', 'Std Cat', 'Percentile Interval Cat', 'Result']]\nresultframe",
"_____no_output_____"
],
[
"resultframe['Num Peaks Cat'] = resultframe['Num Peaks Cat'].astype(int)\nresultframe['p2.5 Cat'] = resultframe['p2.5 Cat'].astype(int)\nresultframe['p97.5 Cat'] = resultframe['p97.5 Cat'].astype(int)\nresultframe['Std Cat'] = resultframe['Std Cat'].astype(int)\nresultframe['Percentile Interval Cat'] = resultframe['Percentile Interval Cat'].astype(int)\n\nresultframe.head()",
"_____no_output_____"
]
],
[
[
"### Checking our X and y matrices (selecting only features we want to pass into the model)",
"_____no_output_____"
]
],
[
[
"resultframe.loc[:,[\"Subject\", \"Result\"]][resultframe['Subject']==1].drop('Subject', axis=1).head()",
"_____no_output_____"
],
[
"#resultframe.iloc[:,[1,3]][resultframe['Subject']==1].drop(\"Subject\", axis=1).head()\n\nresultframe.drop([\"Subject\", \"Time Series\", \"Result\"], axis=1)",
"_____no_output_____"
]
],
[
[
"# Modeling",
"_____no_output_____"
],
[
"## Logistic Regression",
"_____no_output_____"
],
[
"### Initialize dataframe to track model performance per subject",
"_____no_output_____"
]
],
[
[
"performance_logistic = pd.DataFrame(index = Xdict.keys(), # subject\n columns=['naive_train_accuracy',\n 'naive_test_accuracy',\n 'model_train_accuracy',\n 'model_test_accuracy'])\n\nperformance_logistic",
"_____no_output_____"
]
],
[
[
"### Train model",
"_____no_output_____"
]
],
[
[
"coefficients = dict()\n\n# initialize dataframes to log predicted choice and true choice for each trial\npredictions_logistic_train_master = pd.DataFrame(columns=['predicted_choice',\n 'true_choice'])\n\npredictions_logistic_test_master = pd.DataFrame(columns=['predicted_choice',\n 'true_choice'])\n\nLogisticRegressionModel = linear_model.LogisticRegression()\n\n# two subclasses to start\nfor subject in subjects:\n print(subject)\n #X = resultframe.iloc[:,[0,5,8,10,11,12]][resultframe['Subject']==subject].drop(\"Subject\", axis=1) \n #y = resultframe.iloc[:,[0,-1]][resultframe['Subject']==subject].drop('Subject', axis=1)\n X = resultframe.drop([\"Time Series\", \"Result\"], axis=1)[resultframe['Subject']==subject].drop(\"Subject\", axis=1)\n y = resultframe.loc[:,[\"Subject\", \"Result\"]][resultframe['Subject']==subject].drop('Subject', axis=1)\n \n \n # train-test split\n Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2, random_state=100)\n \n # get naive performance (guessing most frequent category, the max of guessing one vs the other)\n performance_logistic.loc[subject,'naive_train_accuracy'] = max(float(np.mean(ytrain=='Gamble')),float(np.mean(ytrain=='Safebet')))\n performance_logistic.loc[subject, 'naive_test_accuracy'] = max(float(np.mean(ytrain=='Gamble')),float(np.mean(ytrain=='Safebet')))\n \n # make df to track predicted vs real choice for each subject\n predictions_logistic_train = pd.DataFrame(columns=['predicted_choice',\n 'true_choice'])\n predictions_logistic_test = pd.DataFrame(columns=['predicted_choice',\n 'true_choice'])\n \n predictions_logistic_train['true_choice'] = ytrain['Result']\n predictions_logistic_test['true_choice'] = ytest['Result']\n \n # logistic regression\n LogisticRegressionModel.fit(Xtrain, ytrain)\n \n # store coefficients\n coefficients[subject] = LogisticRegressionModel.coef_[0]\n \n performance_logistic.loc[subject,'model_train_accuracy'] = LogisticRegressionModel.score(Xtrain,ytrain)\n performance_logistic.loc[subject,'model_test_accuracy'] = LogisticRegressionModel.score(Xtest,ytest)\n\n # complete the guesses for each person\n predictions_logistic_train['predicted_choice'] = LogisticRegressionModel.predict(Xtrain)\n predictions_logistic_test['predicted_choice'] = LogisticRegressionModel.predict(Xtest)\n \n # concatenate dfs\n predictions_logistic_train_master = pd.concat([predictions_logistic_train_master, predictions_logistic_train])\n predictions_logistic_test_master = pd.concat([predictions_logistic_test_master, predictions_logistic_test])",
"1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n"
],
[
"%matplotlib inline",
"_____no_output_____"
],
[
"performance_logistic",
"_____no_output_____"
],
[
"train_accuracy_total = np.mean(predictions_logistic_train_master['true_choice'] == predictions_logistic_train_master['predicted_choice']) \ntest_accuracy_total = np.mean(predictions_logistic_test_master['true_choice'] == predictions_logistic_test_master['predicted_choice'])\n\ntrain_accuracy_total, test_accuracy_total",
"_____no_output_____"
]
],
[
[
"### FEATURE SELECTION\n\nsince not much improvement has been seen in iter5, I will attempt to selectivly include features from our current feature set that demonstrates strong predictive powers. I will first see any collinear features",
"_____no_output_____"
]
],
[
[
"train, test = train_test_split(resultframe, test_size=0.2, random_state=100)\ntrain_df = train.iloc[:, 2:]\n\ntrain_df.head()",
"_____no_output_____"
],
[
"train_df.corr()",
"_____no_output_____"
],
[
"colormap = plt.cm.viridis\nplt.figure(figsize=(12,12))\nplt.title('Pearson Correlation of Features', y=1.05, size=15)\nsns.heatmap(train_df.corr().round(2)\\\n ,linewidths=0.1,vmax=1.0, square=True, cmap=colormap, \\\n linecolor='white', annot=True);",
"_____no_output_____"
]
],
[
[
"As seen in the chart above, the correlation between different features is generally pretty high. Thus, we need to be more selective in choosing features for this model as uncorrelated features are generally more powerful predictors\n\nWill try these features: num peaks cat, percentile interval, std, p97.5 cat, p2.5 cat",
"_____no_output_____"
],
[
"## Random Forest",
"_____no_output_____"
],
[
"### Initialize dataframe to track model performance per subject",
"_____no_output_____"
]
],
[
[
"performance_forest = pd.DataFrame(index = Xdict.keys(), # subject\n columns=['naive_train_accuracy',\n 'naive_test_accuracy',\n 'model_train_accuracy',\n 'model_test_accuracy'])",
"_____no_output_____"
]
],
[
[
"### Initialize dataframes to log predicted choice and true choice for each trial",
"_____no_output_____"
]
],
[
[
"feature_importances = dict()\n\npredictions_forest_train_master = pd.DataFrame(columns=['predicted_choice',\n 'true_choice'])\npredictions_forest_test_master = pd.DataFrame(columns=['predicted_choice',\n 'true_choice'])\n\nrandom_forest = RandomForestClassifier()\n\n# two subclasses to start\nfor subject in subjects:\n print(subject)\n X = resultframe.iloc[:,[0,4,6,7,8]][resultframe['Subject']==subject].drop(\"Subject\", axis=1) \n y = resultframe.iloc[:,[0,-1]][resultframe['Subject']==subject].drop('Subject', axis=1)\n \n # train-test split\n Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2, random_state=100)\n \n # get naive performance (guessing most frequent category, the max of guessing one vs the other)\n performance_forest.loc[subject,'naive_train_accuracy'] = max(float(np.mean(ytrain=='Gamble')),float(np.mean(ytrain=='Safebet')))\n performance_forest.loc[subject,'naive_test_accuracy'] = max(float(np.mean(ytrain=='Gamble')),float(np.mean(ytrain=='Safebet')))\n \n # make df to track predicted vs real choice for each subject\n predictions_forest_train = pd.DataFrame(columns=['predicted_choice',\n 'true_choice'])\n predictions_forest_test = pd.DataFrame(columns=['predicted_choice',\n 'true_choice'])\n \n predictions_forest_train['true_choice'] = ytrain['Result']\n predictions_forest_test['true_choice'] = ytest['Result']\n \n # model\n random_forest.fit(Xtrain, ytrain)\n performance_forest.loc[subject,'model_train_accuracy'] = random_forest.score(Xtrain,ytrain)\n performance_forest.loc[subject,'model_test_accuracy'] = random_forest.score(Xtest,ytest)\n \n # store feature importances\n feature_importances[subject] = random_forest.feature_importances_\n \n # complete the guesses for each person\n predictions_forest_train['predicted_choice'] = random_forest.predict(Xtrain)\n predictions_forest_test['predicted_choice'] = random_forest.predict(Xtest)\n \n # concatenate dfs\n predictions_forest_train_master = pd.concat([predictions_forest_train_master, predictions_forest_train])\n predictions_forest_test_master = pd.concat([predictions_forest_test_master, predictions_forest_test])",
"1\n2\n3\n4\n"
],
[
"performance_forest",
"_____no_output_____"
],
[
"train_accuracy_total = np.mean(predictions_forest_train_master['true_choice'] == predictions_forest_train_master['predicted_choice']) \ntest_accuracy_total = np.mean(predictions_forest_test_master['true_choice'] == predictions_forest_test_master['predicted_choice'])\n\ntrain_accuracy_total, test_accuracy_total",
"_____no_output_____"
]
],
[
[
"Overfits a lot",
"_____no_output_____"
],
[
"## logistic regression modified with StandardScaler(), i.e., z-scoring the data before fitting model",
"_____no_output_____"
],
[
"### initialize dataframe to track model performance per subject",
"_____no_output_____"
]
],
[
[
"performance_logistic = pd.DataFrame(index = Xdict.keys(), # subject\n columns=['naive_train_accuracy',\n 'naive_test_accuracy',\n 'model_train_accuracy',\n 'model_test_accuracy'])",
"_____no_output_____"
]
],
[
[
"### initialize dataframes to log predicted choice and true choice for each trial",
"_____no_output_____"
]
],
[
[
"predictions_logistic_train_master = pd.DataFrame(columns=['predicted_choice',\n 'true_choice'])\n\npredictions_logistic_test_master = pd.DataFrame(columns=['predicted_choice',\n 'true_choice'])",
"_____no_output_____"
],
[
"LogisticRegressionModel = linear_model.LogisticRegression()",
"_____no_output_____"
],
[
"from sklearn.feature_selection import SelectKBest, f_classif # use f_regression for afresh feature selection\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import make_pipeline\n\n# pipe = make_pipeline(SelectKBest(k=50), StandardScaler(), linear_model.LogisticRegressionCV())\npipe = make_pipeline(StandardScaler(), linear_model.LogisticRegressionCV())\nLogisticRegressionModel = pipe",
"_____no_output_____"
],
[
"# two subclasses to start\nfor subject in subjects:\n print(subject)\n X = resultframe.iloc[:,[0,4,6,7,8]][resultframe['Subject']==subject].drop(\"Subject\", axis=1) \n y = resultframe.iloc[:,[0,-1]][resultframe['Subject']==subject].drop('Subject', axis=1)\n \n # train-test split\n Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2, random_state=100)\n \n # get naive performance (guessing most frequent category, the max of guessing one vs the other)\n performance_logistic.loc[subject,'naive_train_accuracy'] = max(float(np.mean(ytrain=='Gamble')),float(np.mean(ytrain=='Safebet')))\n performance_logistic.loc[subject,'naive_test_accuracy'] = max(float(np.mean(ytrain=='Gamble')),float(np.mean(ytrain=='Safebet')))\n \n # make df to track predicted vs real choice for each subject\n predictions_logistic_train = pd.DataFrame(columns=['predicted_choice',\n 'true_choice'])\n predictions_logistic_test = pd.DataFrame(columns=['predicted_choice',\n 'true_choice'])\n \n predictions_logistic_train['true_choice'] = ytrain['Result']\n predictions_logistic_test['true_choice'] = ytest['Result']\n \n # logistic regression\n LogisticRegressionModel.fit(Xtrain, ytrain)\n performance_logistic.loc[subject,'model_train_accuracy'] = LogisticRegressionModel.score(Xtrain,ytrain)\n performance_logistic.loc[subject,'model_test_accuracy'] = LogisticRegressionModel.score(Xtest,ytest)\n\n # complete the guesses for each person\n predictions_logistic_train['predicted_choice'] = LogisticRegressionModel.predict(Xtrain)\n predictions_logistic_test['predicted_choice'] = LogisticRegressionModel.predict(Xtest)\n \n # concatenate dfs\n predictions_logistic_train_master = pd.concat([predictions_logistic_train_master, predictions_logistic_train])\n predictions_logistic_test_master = pd.concat([predictions_logistic_test_master, predictions_logistic_test])",
"1\n"
],
[
"performance_logistic",
"_____no_output_____"
],
[
"train_accuracy_total = np.mean(predictions_logistic_train_master['true_choice'] == predictions_logistic_train_master['predicted_choice']) \ntest_accuracy_total = np.mean(predictions_logistic_test_master['true_choice'] == predictions_logistic_test_master['predicted_choice'])\n\ntrain_accuracy_total, test_accuracy_total",
"_____no_output_____"
]
],
[
[
"## random forest with StandardScaler()",
"_____no_output_____"
],
[
"### initialize dataframe to track model performance per subject",
"_____no_output_____"
]
],
[
[
"performance_forest = pd.DataFrame(index = Xdict.keys(), # subject\n columns=['naive_train_accuracy',\n 'naive_test_accuracy',\n 'model_train_accuracy',\n 'model_test_accuracy'])",
"_____no_output_____"
],
[
"from sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()",
"_____no_output_____"
]
],
[
[
"### initialize dataframes to log predicted choice and true choice for each trial",
"_____no_output_____"
]
],
[
[
"feature_importances = dict()\n\npredictions_forest_train_master = pd.DataFrame(columns=['predicted_choice',\n 'true_choice'])\npredictions_forest_test_master = pd.DataFrame(columns=['predicted_choice',\n 'true_choice'])\n\nrandom_forest = RandomForestClassifier()\n\n# two subclasses to start\nfor subject in subjects:\n print(subject)\n X = resultframe.iloc[:,[0,4,6,7,8]][resultframe['Subject']==subject].drop(\"Subject\", axis=1) \n y = resultframe.iloc[:,[0,-1]][resultframe['Subject']==subject].drop('Subject', axis=1)\n \n # standardize data here\n scaler.fit(X)\n X = scaler.transform(X)\n \n # train-test split\n Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2, random_state=100)\n \n # get naive performance (guessing most frequent category, the max of guessing one vs the other)\n performance_forest.loc[subject,'naive_train_accuracy'] = max(float(np.mean(ytrain=='Gamble')),float(np.mean(ytrain=='Safebet')))\n performance_forest.loc[subject,'naive_test_accuracy'] = max(float(np.mean(ytrain=='Gamble')),float(np.mean(ytrain=='Safebet')))\n \n # make df to track predicted vs real choice for each subject\n predictions_forest_train = pd.DataFrame(columns=['predicted_choice',\n 'true_choice'])\n predictions_forest_test = pd.DataFrame(columns=['predicted_choice',\n 'true_choice'])\n \n predictions_forest_train['true_choice'] = ytrain['Result']\n predictions_forest_test['true_choice'] = ytest['Result']\n \n # model\n random_forest.fit(Xtrain, ytrain)\n performance_forest.loc[subject,'model_train_accuracy'] = random_forest.score(Xtrain,ytrain)\n performance_forest.loc[subject,'model_test_accuracy'] = random_forest.score(Xtest,ytest)\n \n # store feature importances\n feature_importances[subject] = random_forest.feature_importances_\n \n # complete the guesses for each person\n predictions_forest_train['predicted_choice'] = random_forest.predict(Xtrain)\n predictions_forest_test['predicted_choice'] = random_forest.predict(Xtest)\n \n # concatenate dfs\n predictions_forest_train_master = pd.concat([predictions_forest_train_master, predictions_forest_train])\n predictions_forest_test_master = pd.concat([predictions_forest_test_master, predictions_forest_test])",
"1\n"
],
[
"performance_forest",
"_____no_output_____"
],
[
"train_accuracy_total = np.mean(predictions_forest_train_master['true_choice'] == predictions_forest_train_master['predicted_choice']) \ntest_accuracy_total = np.mean(predictions_forest_test_master['true_choice'] == predictions_forest_test_master['predicted_choice'])\n\ntrain_accuracy_total, test_accuracy_total",
"_____no_output_____"
]
],
[
[
"## logistic regression with StandardScaler() *and* selecting K best features (reducing the number of features, should reduce overfitting)",
"_____no_output_____"
],
[
"### initialize dataframe to track model performance per subject",
"_____no_output_____"
]
],
[
[
"performance_logistic = pd.DataFrame(index = Xdict.keys(), # subject\n columns=['naive_train_accuracy',\n 'naive_test_accuracy',\n 'model_train_accuracy',\n 'model_test_accuracy'])",
"_____no_output_____"
]
],
[
[
"### initialize dataframes to log predicted choice and true choice for each trial",
"_____no_output_____"
]
],
[
[
"predictions_logistic_train_master = pd.DataFrame(columns=['predicted_choice',\n 'true_choice'])\n\npredictions_logistic_test_master = pd.DataFrame(columns=['predicted_choice',\n 'true_choice'])",
"_____no_output_____"
],
[
"LogisticRegressionModel = linear_model.LogisticRegression()",
"_____no_output_____"
],
[
"from sklearn.feature_selection import SelectKBest, f_classif # use f_regression for afresh feature selection\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import make_pipeline",
"_____no_output_____"
]
],
[
[
"#### try different numbers of num_k",
"_____no_output_____"
]
],
[
[
"num_k = [1,2,3,4] # max number of features is 4\n\nfor k in num_k:\n pipe = make_pipeline(SelectKBest(k=k), StandardScaler(), linear_model.LogisticRegressionCV())\n LogisticRegressionModel = pipe\n\n # two subclasses to start\n for subject in subjects:\n print(subject)\n X = resultframe.iloc[:,[0,4,6,7,8]][resultframe['Subject']==subject].drop(\"Subject\", axis=1) \n y = resultframe.iloc[:,[0,-1]][resultframe['Subject']==subject].drop('Subject', axis=1)\n # train-test split\n Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2, random_state=100)\n\n # get naive performance (guessing most frequent category, the max of guessing one vs the other)\n performance_logistic.loc[subject,'naive_train_accuracy'] = max(float(np.mean(ytrain=='Gamble')),float(np.mean(ytrain=='Safebet')))\n performance_logistic.loc[subject,'naive_test_accuracy'] = max(float(np.mean(ytrain=='Gamble')),float(np.mean(ytrain=='Safebet')))\n\n # make df to track predicted vs real choice for each subject\n predictions_logistic_train = pd.DataFrame(columns=['predicted_choice',\n 'true_choice'])\n predictions_logistic_test = pd.DataFrame(columns=['predicted_choice',\n 'true_choice'])\n\n predictions_logistic_train['true_choice'] = ytrain['Result']\n predictions_logistic_test['true_choice'] = ytest['Result']\n\n # logistic regression\n LogisticRegressionModel.fit(Xtrain, ytrain)\n performance_logistic.loc[subject,'model_train_accuracy'] = LogisticRegressionModel.score(Xtrain,ytrain)\n performance_logistic.loc[subject,'model_test_accuracy'] = LogisticRegressionModel.score(Xtest,ytest)\n\n # complete the guesses for each person\n predictions_logistic_train['predicted_choice'] = LogisticRegressionModel.predict(Xtrain)\n predictions_logistic_test['predicted_choice'] = LogisticRegressionModel.predict(Xtest)\n\n # concatenate dfs\n predictions_logistic_train_master = pd.concat([predictions_logistic_train_master, predictions_logistic_train])\n predictions_logistic_test_master = pd.concat([predictions_logistic_test_master, predictions_logistic_test])\n \n train_accuracy_total = np.mean(predictions_logistic_train_master['true_choice'] == predictions_logistic_train_master['predicted_choice']) \n test_accuracy_total = np.mean(predictions_logistic_test_master['true_choice'] == predictions_logistic_test_master['predicted_choice'])\n print(k, train_accuracy_total, test_accuracy_total)",
"1\n2\n3\n"
]
],
[
[
"### Trying other models",
"_____no_output_____"
]
],
[
[
"X = resultframe.iloc[:,[4,6,7,8]] \ny = resultframe.iloc[:,-1]",
"_____no_output_____"
],
[
"x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=100)\nprint ('Number of samples in training data:',len(x_train))\nprint ('Number of samples in test data:',len(x_test))",
"Number of samples in training data: 7017\nNumber of samples in test data: 1755\n"
],
[
"perceptron = Perceptron(max_iter=100)\nperceptron.fit(x_train, y_train)\nperceptron_train_acc = perceptron.score(x_train, y_train)\nperceptron_test_acc = perceptron.score(x_test, y_test)\nprint ('perceptron training acuracy= ',perceptron_train_acc)\nprint('perceptron test accuracy= ',perceptron_test_acc)",
"perceptron training acuracy= 0.5699016673792219\nperceptron test accuracy= 0.5698005698005698\n"
],
[
"adaboost = AdaBoostClassifier()\nadaboost.fit(x_train, y_train)\nadaboost_train_acc = adaboost.score(x_train, y_train)\nadaboost_test_acc = adaboost.score(x_test, y_test)\nprint ('adaboost training acuracy= ',adaboost_train_acc)\nprint('adaboost test accuracy= ',adaboost_test_acc)",
"adaboost training acuracy= 0.5768847085649138\nadaboost test accuracy= 0.5726495726495726\n"
],
[
"random_forest = RandomForestClassifier()\nrandom_forest.fit(x_train, y_train)\nrandom_forest_train_acc = random_forest.score(x_train, y_train)\nrandom_forest_test_acc = random_forest.score(x_test, y_test)\nprint('random_forest training acuracy= ',random_forest_train_acc)\nprint('random_forest test accuracy= ',random_forest_test_acc)",
"random_forest training acuracy= 0.7377796779250392\nrandom_forest test accuracy= 0.5162393162393163\n"
]
],
[
[
"#### ALL THREE MODELS WORSE THAN CHANCE! ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
d04a95abe67544f7d8edd17aa7c9cd48ab5005f8 | 24,722 | ipynb | Jupyter Notebook | examples/GaussianBosonSampling.ipynb | cclauss/strawberryfields | 512461c34cb86a5494d442d39bac0f6310deaa6e | [
"Apache-2.0"
] | null | null | null | examples/GaussianBosonSampling.ipynb | cclauss/strawberryfields | 512461c34cb86a5494d442d39bac0f6310deaa6e | [
"Apache-2.0"
] | null | null | null | examples/GaussianBosonSampling.ipynb | cclauss/strawberryfields | 512461c34cb86a5494d442d39bac0f6310deaa6e | [
"Apache-2.0"
] | null | null | null | 36.090511 | 788 | 0.581506 | [
[
[
"<table width=60% >\n <tr style=\"background-color: white;\">\n <td><img src='https://www.creativedestructionlab.com/wp-content/uploads/2018/05/xanadu.jpg'></td>></td>\n </tr>\n</table>\n\n---\n\n<img src='https://raw.githubusercontent.com/XanaduAI/strawberryfields/master/doc/_static/strawberry-fields-text.png'>\n\n---\n\n<br>\n\n<center> <h1> Gaussian boson sampling tutorial </h1></center>\n\nTo get a feel for how Strawberry Fields works, let's try coding a quantum program, Gaussian boson sampling.",
"_____no_output_____"
],
[
"## Background information: Gaussian states\n---\n\nA Gaussian state is one that can be described by a [Gaussian function](https://en.wikipedia.org/wiki/Gaussian_function) in the phase space. For example, for a single mode Gaussian state, squeezed in the $x$ quadrature by squeezing operator $S(r)$, could be described by the following [Wigner quasiprobability distribution](Wigner quasiprobability distribution):\n\n$$W(x,p) = \\frac{2}{\\pi}e^{-2\\sigma^2(x-\\bar{x})^2 - 2(p-\\bar{p})^2/\\sigma^2}$$\n\nwhere $\\sigma$ represents the **squeezing**, and $\\bar{x}$ and $\\bar{p}$ are the mean **displacement**, respectively. For multimode states containing $N$ modes, this can be generalised; Gaussian states are uniquely defined by a [multivariate Gaussian function](https://en.wikipedia.org/wiki/Multivariate_normal_distribution), defined in terms of the **vector of means** ${\\mu}$ and a **covariance matrix** $\\sigma$.\n\n### The position and momentum basis\n\nFor example, consider a single mode in the position and momentum quadrature basis (the default for Strawberry Fields). Assuming a Gaussian state with displacement $\\alpha = \\bar{x}+i\\bar{p}$ and squeezing $\\xi = r e^{i\\phi}$ in the phase space, it has a vector of means and a covariance matrix given by:\n\n$$ \\mu = (\\bar{x},\\bar{p}),~~~~~~\\sigma = SS\\dagger=R(\\phi/2)\\begin{bmatrix}e^{-2r} & 0 \\\\0 & e^{2r} \\\\\\end{bmatrix}R(\\phi/2)^T$$\n\nwhere $S$ is the squeezing operator, and $R(\\phi)$ is the standard two-dimensional rotation matrix. For multiple modes, in Strawberry Fields we use the convention \n\n$$ \\mu = (\\bar{x}_1,\\bar{x}_2,\\dots,\\bar{x}_N,\\bar{p}_1,\\bar{p}_2,\\dots,\\bar{p}_N)$$\n\nand therefore, considering $\\phi=0$ for convenience, the multimode covariance matrix is simply\n\n$$\\sigma = \\text{diag}(e^{-2r_1},\\dots,e^{-2r_N},e^{2r_1},\\dots,e^{2r_N})\\in\\mathbb{C}^{2N\\times 2N}$$\n\nIf a continuous-variable state *cannot* be represented in the above form (for example, a single photon Fock state or a cat state), then it is non-Gaussian.\n\n### The annihilation and creation operator basis\n\nIf we are instead working in the creation and annihilation operator basis, we can use the transformation of the single mode squeezing operator\n\n$$ S(\\xi) \\left[\\begin{matrix}\\hat{a}\\\\\\hat{a}^\\dagger\\end{matrix}\\right] = \\left[\\begin{matrix}\\cosh(r)&-e^{i\\phi}\\sinh(r)\\\\-e^{-i\\phi}\\sinh(r)&\\cosh(r)\\end{matrix}\\right] \\left[\\begin{matrix}\\hat{a}\\\\\\hat{a}^\\dagger\\end{matrix}\\right]$$\n\nresulting in\n\n$$\\sigma = SS^\\dagger = \\left[\\begin{matrix}\\cosh(2r)&-e^{i\\phi}\\sinh(2r)\\\\-e^{-i\\phi}\\sinh(2r)&\\cosh(2r)\\end{matrix}\\right]$$\n\nFor multiple Gaussian states with non-zero squeezing, the covariance matrix in this basis simply generalises to\n\n$$\\sigma = \\text{diag}(S_1S_1^\\dagger,\\dots,S_NS_N^\\dagger)\\in\\mathbb{C}^{2N\\times 2N}$$",
"_____no_output_____"
],
[
"## Introduction to Gaussian boson sampling\n---\n\n<div class=\"alert alert-info\">\n“If you need to wait exponential time for \\[your single photon sources to emit simultaneously\\], then there would seem to be no advantage over classical computation. This is the reason why so far, boson sampling has only been demonstrated with 3-4 photons. When faced with these problems, until recently, all we could do was shrug our shoulders.” - [Scott Aaronson](https://www.scottaaronson.com/blog/?p=1579)\n</div>\n\nWhile [boson sampling](https://en.wikipedia.org/wiki/Boson_sampling) allows the experimental implementation of a quantum sampling problem that it countably hard classically, one of the main issues it has in experimental setups is one of **scalability**, due to its dependence on an array of simultaneously emitting single photon sources.\n\nCurrently, most physical implementations of boson sampling make use of a process known as [Spontaneous Parametric Down-Conversion](http://en.wikipedia.org/wiki/Spontaneous_parametric_down-conversion) to generate the single photon source inputs. Unfortunately, this method is non-deterministic - as the number of modes in the apparatus increases, the average time required until every photon source emits a simultaneous photon increases *exponentially*.\n\nIn order to simulate a *deterministic* single photon source array, several variations on boson sampling have been proposed; the most well known being scattershot boson sampling ([Lund, 2014](https://link.aps.org/doi/10.1103/PhysRevLett.113.100502)). However, a recent boson sampling variation by [Hamilton et al.](https://link.aps.org/doi/10.1103/PhysRevLett.119.170501) negates the need for single photon Fock states altogether, by showing that **incident Gaussian states** - in this case, single mode squeezed states - can produce problems in the same computational complexity class as boson sampling. Even more significantly, this negates the scalability problem with single photon sources, as single mode squeezed states can be easily simultaneously generated experimentally.\n\nAside from changing the input states from single photon Fock states to Gaussian states, the Gaussian boson sampling scheme appears quite similar to that of boson sampling:\n\n1. $N$ single mode squeezed states $\\left|{\\xi_i}\\right\\rangle$, with squeezing parameters $\\xi_i=r_ie^{i\\phi_i}$, enter an $N$ mode linear interferometer with unitary $U$.\n <br>\n \n2. The output of the interferometer is denoted $\\left|{\\psi'}\\right\\rangle$. Each output mode is then measured in the Fock basis, $\\bigotimes_i n_i\\left|{n_i}\\middle\\rangle\\middle\\langle{n_i}\\right|$.\n\nWithout loss of generality, we can absorb the squeezing parameter $\\phi$ into the interferometer, and set $\\phi=0$ for convenience. The covariance matrix **in the creation and annihilation operator basis** at the output of the interferometer is then given by:\n\n$$\\sigma_{out} = \\frac{1}{2} \\left[ \\begin{matrix}U&0\\\\0&U^*\\end{matrix} \\right]\\sigma_{in} \\left[ \\begin{matrix}U^\\dagger&0\\\\0&U^T\\end{matrix} \\right]$$\n\nUsing phase space methods, [Hamilton et al.](https://link.aps.org/doi/10.1103/PhysRevLett.119.170501) showed that the probability of measuring a Fock state is given by\n\n$$\\left|\\left\\langle{n_1,n_2,\\dots,n_N}\\middle|{\\psi'}\\right\\rangle\\right|^2 = \\frac{\\left|\\text{Haf}[(U\\bigoplus_i\\tanh(r_i)U^T)]_{st}\\right|^2}{n_1!n_2!\\cdots n_N!\\sqrt{|\\sigma_{out}+I/2|}},$$\n\ni.e. the sampled single photon probability distribution is proportional to the **Hafnian** of a submatrix of $U\\bigoplus_i\\tanh(r_i)U^T$, dependent upon the output covariance matrix.\n\n<div class=\"alert alert-success\" style=\"border: 0px; border-left: 3px solid #119a68; color: black; background-color: #daf0e9\">\n\n<p style=\"color: #119a68;\">**The Hafnian**</p>\n\nThe Hafnian of a matrix is defined by\n<br><br>\n$$\\text{Haf}(A) = \\frac{1}{n!2^n}\\sum_{\\sigma=S_{2N}}\\prod_{i=1}^N A_{\\sigma(2i-1)\\sigma(2i)}$$\n<br>\n\n$S_{2N}$ is the set of all permutations of $2N$ elements. In graph theory, the Hafnian calculates the number of perfect <a href=\"https://en.wikipedia.org/wiki/Matching_(graph_theory)\">matchings</a> in an **arbitrary graph** with adjacency matrix $A$.\n<br>\n\nCompare this to the permanent, which calculates the number of perfect matchings on a *bipartite* graph - the Hafnian turns out to be a generalisation of the permanent, with the relationship\n\n$$\\begin{align}\n\\text{Per(A)} = \\text{Haf}\\left(\\left[\\begin{matrix}\n0&A\\\\\nA^T&0\n\\end{matrix}\\right]\\right)\n\\end{align}$$\n\nAs any algorithm that could calculate (or even approximate) the Hafnian could also calculate the permanent - a #P problem - it follows that calculating or approximating the Hafnian must also be a classically hard problem.\n</div>\n\n### Equally squeezed input states\n\nIn the case where all the input states are squeezed equally with squeezing factor $\\xi=r$ (i.e. so $\\phi=0$), we can simplify the denominator into a much nicer form. It can be easily seen that, due to the unitarity of $U$,\n\n$$\\left[ \\begin{matrix}U&0\\\\0&U^*\\end{matrix} \\right] \\left[ \\begin{matrix}U^\\dagger&0\\\\0&U^T\\end{matrix} \\right] = \\left[ \\begin{matrix}UU^\\dagger&0\\\\0&U^*U^T\\end{matrix} \\right] =I$$\n\nThus, we have \n\n$$\\begin{align}\n\\sigma_{out} +\\frac{1}{2}I &= \\sigma_{out} + \\frac{1}{2} \\left[ \\begin{matrix}U&0\\\\0&U^*\\end{matrix} \\right] \\left[ \\begin{matrix}U^\\dagger&0\\\\0&U^T\\end{matrix} \\right] = \\left[ \\begin{matrix}U&0\\\\0&U^*\\end{matrix} \\right] \\frac{1}{2} \\left(\\sigma_{in}+I\\right) \\left[ \\begin{matrix}U^\\dagger&0\\\\0&U^T\\end{matrix} \\right]\n\\end{align}$$\n\nwhere we have subtituted in the expression for $\\sigma_{out}$. Taking the determinants of both sides, the two block diagonal matrices containing $U$ are unitary, and thus have determinant 1, resulting in\n\n$$\\left|\\sigma_{out} +\\frac{1}{2}I\\right| =\\left|\\frac{1}{2}\\left(\\sigma_{in}+I\\right)\\right|=\\left|\\frac{1}{2}\\left(SS^\\dagger+I\\right)\\right| $$\n\nBy expanding out the right hand side, and using various trig identities, it is easy to see that this simply reduces to $\\cosh^{2N}(r)$ where $N$ is the number of modes; thus the Gaussian boson sampling problem in the case of equally squeezed input modes reduces to\n\n$$\\left|\\left\\langle{n_1,n_2,\\dots,n_N}\\middle|{\\psi'}\\right\\rangle\\right|^2 = \\frac{\\left|\\text{Haf}[(UU^T\\tanh(r))]_{st}\\right|^2}{n_1!n_2!\\cdots n_N!\\cosh^N(r)},$$",
"_____no_output_____"
],
[
"## The Gaussian boson sampling circuit\n---\nThe multimode linear interferometer can be decomposed into two-mode beamsplitters (`BSgate`) and single-mode phase shifters (`Rgate`) (<a href=\"https://doi.org/10.1103/physrevlett.73.58\">Reck, 1994</a>), allowing for an almost trivial translation into a continuous-variable quantum circuit.\n\nFor example, in the case of a 4 mode interferometer, with arbitrary $4\\times 4$ unitary $U$, the continuous-variable quantum circuit for Gaussian boson sampling is given by\n\n<img src=\"https://s3.amazonaws.com/xanadu-img/gaussian_boson_sampling.svg\" width=70%/>\n\nIn the above,\n\n* the single mode squeeze states all apply identical squeezing $\\xi=r$,\n* the detectors perform Fock state measurements (i.e. measuring the photon number of each mode),\n* the parameters of the beamsplitters and the rotation gates determines the unitary $U$.\n\nFor $N$ input modes, we must have a minimum of $N$ columns in the beamsplitter array ([Clements, 2016](https://arxiv.org/abs/1603.08788)).",
"_____no_output_____"
],
[
"## Simulating boson sampling in Strawberry Fields\n---\n\n",
"_____no_output_____"
]
],
[
[
"import strawberryfields as sf\nfrom strawberryfields.ops import *\nfrom strawberryfields.utils import random_interferometer",
"_____no_output_____"
]
],
[
[
"Strawberry Fields makes this easy; there is an `Interferometer` quantum operation, and a utility function that allows us to generate the matrix representing a random interferometer.",
"_____no_output_____"
]
],
[
[
"U = random_interferometer(4)",
"_____no_output_____"
]
],
[
[
"The lack of Fock states and non-linear operations means we can use the Gaussian backend to simulate Gaussian boson sampling. In this example program, we are using input states with squeezing parameter $\\xi=1$, and the randomly chosen interferometer generated above.",
"_____no_output_____"
]
],
[
[
"eng, q = sf.Engine(4)\n\nwith eng:\n # prepare the input squeezed states\n S = Sgate(1)\n All(S) | q\n\n # interferometer\n Interferometer(U) | q\n \nstate = eng.run('gaussian')",
"_____no_output_____"
]
],
[
[
"We can see the decomposed beamsplitters and rotation gates, by calling `eng.print_applied()`:",
"_____no_output_____"
]
],
[
[
"eng.print_applied()",
"Run 0:\nSgate(1, 0) | (q[0])\nSgate(1, 0) | (q[1])\nSgate(1, 0) | (q[2])\nSgate(1, 0) | (q[3])\nRgate(-1.77) | (q[0])\nBSgate(0.3621, 0) | (q[0], q[1])\nRgate(0.4065) | (q[2])\nBSgate(0.7524, 0) | (q[2], q[3])\nRgate(-0.5894) | (q[1])\nBSgate(0.9441, 0) | (q[1], q[2])\nRgate(0.2868) | (q[0])\nBSgate(0.8913, 0) | (q[0], q[1])\nRgate(-1.631) | (q[0])\nRgate(-1.74) | (q[1])\nRgate(3.074) | (q[2])\nRgate(-0.9618) | (q[3])\nBSgate(-1.482, 0) | (q[2], q[3])\nRgate(2.383) | (q[2])\nBSgate(-0.9124, 0) | (q[1], q[2])\nRgate(2.188) | (q[1])\n"
]
],
[
[
"<div class=\"alert alert-success\" style=\"border: 0px; border-left: 3px solid #119a68; color: black; background-color: #daf0e9\">\n<p style=\"color: #119a68;\">**Available decompositions**</p>\n\nCheck out our <a href=\"https://strawberryfields.readthedocs.io/en/stable/conventions/decompositions.html\">documentation</a> to see the available CV decompositions available in Strawberry Fields.\n</div>\n",
"_____no_output_____"
],
[
"## Analysis\n---\n\nLet's now verify the Gaussian boson sampling result, by comparing the output Fock state probabilities to the Hafnian, using the relationship\n\n$$\\left|\\left\\langle{n_1,n_2,\\dots,n_N}\\middle|{\\psi'}\\right\\rangle\\right|^2 = \\frac{\\left|\\text{Haf}[(UU^T\\tanh(r))]_{st}\\right|^2}{n_1!n_2!\\cdots n_N!\\cosh^N(r)}$$",
"_____no_output_____"
],
[
"### Calculating the Hafnian\n\nFor the right hand side numerator, we first calculate the submatrix $[(UU^T\\tanh(r))]_{st}$:",
"_____no_output_____"
]
],
[
[
"B = (np.dot(U, U.T) * np.tanh(1))",
"_____no_output_____"
]
],
[
[
"In Gaussian boson sampling, we determine the submatrix by taking the rows and columns corresponding to the measured Fock state. For example, to calculate the submatrix in the case of the output measurement $\\left|{1,1,0,0}\\right\\rangle$,",
"_____no_output_____"
]
],
[
[
"B[:,[0,1]][[0,1]]",
"_____no_output_____"
]
],
[
[
"To calculate the Hafnian in Python, we can use the direct definition\n\n$$\\text{Haf}(A) = \\frac{1}{n!2^n} \\sum_{\\sigma \\in S_{2n}} \\prod_{j=1}^n A_{\\sigma(2j - 1), \\sigma(2j)}$$\n\nNotice that this function counts each term in the definition multiple times, and renormalizes to remove the multiple counts by dividing by a factor $\\frac{1}{n!2^n}$. **This function is extremely slow!**",
"_____no_output_____"
]
],
[
[
"from itertools import permutations\nfrom scipy.special import factorial\n\ndef Haf(M):\n n=len(M)\n m=int(n/2)\n haf=0.0\n for i in permutations(range(n)):\n prod=1.0\n for j in range(m):\n prod*=M[i[2*j],i[2*j+1]]\n haf+=prod\n return haf/(factorial(m)*(2**m))",
"_____no_output_____"
]
],
[
[
"## Comparing to the SF result",
"_____no_output_____"
],
[
"In Strawberry Fields, both Fock and Gaussian states have the method `fock_prob()`, which returns the probability of measuring that particular Fock state.\n\n#### Let's compare the case of measuring at the output state $\\left|0,1,0,1\\right\\rangle$:",
"_____no_output_____"
]
],
[
[
"B = (np.dot(U,U.T) * np.tanh(1))[:, [1,3]][[1,3]]\nnp.abs(Haf(B))**2 / np.cosh(1)**4",
"_____no_output_____"
],
[
"state.fock_prob([0,1,0,1])",
"_____no_output_____"
]
],
[
[
"#### For the measurement result $\\left|2,0,0,0\\right\\rangle$:",
"_____no_output_____"
]
],
[
[
"B = (np.dot(U,U.T) * np.tanh(1))[:, [0,0]][[0,0]]\nnp.abs(Haf(B))**2 / (2*np.cosh(1)**4)",
"_____no_output_____"
],
[
"state.fock_prob([2,0,0,0])",
"_____no_output_____"
]
],
[
[
"#### For the measurement result $\\left|1,1,0,0\\right\\rangle$:",
"_____no_output_____"
]
],
[
[
"B = (np.dot(U,U.T) * np.tanh(1))[:, [0,1]][[0,1]]\nnp.abs(Haf(B))**2 / np.cosh(1)**4",
"_____no_output_____"
],
[
"state.fock_prob([1,1,0,0])",
"_____no_output_____"
]
],
[
[
"#### For the measurement result $\\left|1,1,1,1\\right\\rangle$, this corresponds to the full matrix $B$:",
"_____no_output_____"
]
],
[
[
"B = (np.dot(U,U.T) * np.tanh(1))\nnp.abs(Haf(B))**2 / np.cosh(1)**4",
"_____no_output_____"
],
[
"state.fock_prob([1,1,1,1])",
"_____no_output_____"
]
],
[
[
"#### For the measurement result $\\left|0,0,0,0\\right\\rangle$, this corresponds to a **null** submatrix, which has a Hafnian of 1:",
"_____no_output_____"
]
],
[
[
"1/np.cosh(1)**4",
"_____no_output_____"
],
[
"state.fock_prob([0,0,0,0])",
"_____no_output_____"
]
],
[
[
"As you can see, like in the boson sampling tutorial, they agree with almost negligable difference.\n\n<div class=\"alert alert-success\" style=\"border: 0px; border-left: 3px solid #119a68; color: black; background-color: #daf0e9\">\n<p style=\"color: #119a68;\">**Exercises**</p>\n\nRepeat this notebook with \n<ol>\n <li> A Fock backend such as NumPy, instead of the Gaussian backend</li>\n <li> Different beamsplitter and rotation parameters</li>\n <li> Input states with *differing* squeezed values $r_i$. You will need to modify the code to take into account the fact that the output covariance matrix determinant must now be calculated!\n</ol>\n</div>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
d04a9f73ea2f0dbc36b8961276125a1743cf84c1 | 55,006 | ipynb | Jupyter Notebook | [Preliminary] 00 Linear regression with pytorch.ipynb | Junyoungpark/2021-lg-AI-camp | 3c0e5dd689e8e3dd61cc80243ad90cab951c06de | [
"MIT"
] | 4 | 2021-11-14T14:25:02.000Z | 2021-11-23T06:23:51.000Z | [Preliminary] 00 Linear regression with pytorch.ipynb | Junyoungpark/2021-lg-AI-camp | 3c0e5dd689e8e3dd61cc80243ad90cab951c06de | [
"MIT"
] | null | null | null | [Preliminary] 00 Linear regression with pytorch.ipynb | Junyoungpark/2021-lg-AI-camp | 3c0e5dd689e8e3dd61cc80243ad90cab951c06de | [
"MIT"
] | 2 | 2021-11-15T02:11:21.000Z | 2021-11-15T23:57:47.000Z | 66.59322 | 34,012 | 0.789587 | [
[
[
"import torch\nimport torch.nn as nn\n\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"# Pytorch: An automatic differentiation tool\n\n`Pytorch`를 활용하면 복잡한 함수의 미분을 손쉽게 + 효율적으로 계산할 수 있습니다!\n`Pytorch`를 활용해서 복잡한 심층 신경망을 훈련할 때, 오차함수에 대한 파라미터의 편미분치를 계산을 손쉽게 수행할수 있습니다!",
"_____no_output_____"
],
[
"## Pytorch 첫만남\n\n우리에게 아래와 같은 간단한 선형식이 주어져있다고 생각해볼까요?\n$$ y = wx $$ \n\n그러면 $\\frac{\\partial y}{\\partial w}$ 을 어떻게 계산 할 수 있을까요?\n일단 직접 미분을 해보면$\\frac{\\partial y}{\\partial w} = x$ 이 되니, 간단한\n예제에서 `pytorch`로 해당 값을 계산하는 방법을 알아보도록 합시다!",
"_____no_output_____"
]
],
[
[
"# 랭크1 / 사이즈1 이며 값은 1*2 인 pytorch tensor를 하나 만듭니다.\nx = torch.ones(1) * 2\n\n# 랭크1 / 사이즈1 이며 값은 1 인 pytorch tensor를 하나 만듭니다.\nw = torch.ones(1, requires_grad=True)\n\ny = w * x",
"_____no_output_____"
],
[
"y",
"_____no_output_____"
]
],
[
[
"## 편미분 계산하기!\n\npytorch에서는 미분값을 계산하고 싶은 텐서에 `.backward()` 를 붙여주는 것으로, 해당 텐서 계산에 연결 되어있는 텐서 중 `gradient`를 계산해야하는 텐서(들)에 대한 편미분치들을 계산할수 있습니다. `requires_grad=True`를 통해서 어떤 텐서에 미분값을 계산할지 할당해줄 수 있습니다.",
"_____no_output_____"
]
],
[
[
"y.backward()",
"_____no_output_____"
]
],
[
[
"## 편미분값 확인하기!\n\n`텐서.grad` 를 활용해서 특정 텐서의 gradient 값을 확인해볼 수 있습니다. 한번 `w.grad`를 활용해서 `y` 에 대한 `w`의 편미분값을 확인해볼까요?",
"_____no_output_____"
]
],
[
[
"w.grad",
"_____no_output_____"
]
],
[
[
"## 그러면 requires_grad = False 인 경우는?",
"_____no_output_____"
]
],
[
[
"x.grad",
"_____no_output_____"
]
],
[
[
"## `torch.nn`, Neural Network 패키지\n\n`pytorch`에는 이미 다양한 neural network들의 모듈들을 구현해 놓았습니다. 그 중에 가장 간단하지만 정말 자주 쓰이는 `nn.Linear` 에 대해 알아보면서 `pytorch`의 `nn.Module`에 대해서 알아보도록 합시다.",
"_____no_output_____"
],
[
"## `nn.Linear` 돌아보기\n\n`nn.Linear` 은 앞서 배운 선형회귀 및 다층 퍼셉트론 모델의 한 층에 해당하는 파라미터 $w$, $b$ 를 가지고 있습니다. 예시로 입력의 dimension 이 10이고 출력의 dimension 이 1인 `nn.Linear` 모듈을 만들어 봅시다!",
"_____no_output_____"
]
],
[
[
"lin = nn.Linear(in_features=10, out_features=1)",
"_____no_output_____"
],
[
"for p in lin.parameters():\n print(p)\n print(p.shape)\n print('\\n')",
"Parameter containing:\ntensor([[ 0.0561, 0.1509, 0.0586, -0.0598, -0.1934, 0.2985, -0.0112, 0.0390,\n 0.2597, -0.1488]], requires_grad=True)\ntorch.Size([1, 10])\n\n\nParameter containing:\ntensor([-0.2357], requires_grad=True)\ntorch.Size([1])\n\n\n"
]
],
[
[
"## `Linear` 모듈로 $y = Wx+b$ 계산하기\n\n선형회귀식도 그랬지만, 다층 퍼셉트론 모델도 하나의 레이어는 아래의 수식을 계산했던 것을 기억하시죠?\n$$y = Wx+b$$\n\n`nn.Linear`를 활용해서 저 수식을 계산해볼까요?\n\n검산을 쉽게 하기 위해서 W의 값은 모두 1.0 으로 b 는 5.0 으로 만들어두겠습니다.",
"_____no_output_____"
]
],
[
[
"lin.weight.data = torch.ones_like(lin.weight.data)\nlin.bias.data = torch.ones_like(lin.bias.data) * 5.0\n\nfor p in lin.parameters():\n print(p)\n print(p.shape)\n print('\\n')",
"Parameter containing:\ntensor([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]], requires_grad=True)\ntorch.Size([1, 10])\n\n\nParameter containing:\ntensor([5.], requires_grad=True)\ntorch.Size([1])\n\n\n"
],
[
"x = torch.ones(3, 10) # rank2 tensor를 만듭니다. : mini batch size = 3\ny_hat = lin(x)",
"_____no_output_____"
],
[
"print(y_hat.shape)\nprint(y_hat)",
"torch.Size([3, 1])\ntensor([[15.],\n [15.],\n [15.]], grad_fn=<AddmmBackward>)\n"
]
],
[
[
"## 지금 무슨일이 일어난거죠?\n\n>Q1. 왜 Rank 2 tensor 를 입력으로 사용하나요? <br>\n>A1. 파이토치의 `nn` 에 정의되어있는 클래스들은 입력의 가장 첫번째 디멘젼을 `배치 사이즈`로 해석합니다. \n\n>Q2. lin(x) 는 도대체 무엇인가요? <br>\n>A2. 파이썬에 익숙하신 분들은 `object()` 는 `object.__call__()`에 정의되어있는 함수를 실행시키신다는 것을 아실텐데요. 파이토치의 `nn.Module`은 `__call__()`을 오버라이드하는 함수인 `forward()`를 구현하는 것을 __권장__ 하고 있습니다. 일반적으로, `forward()`안에서 실제로 파라미터와 인풋을 가지고 특정 레이어의 연산과 정을 구현하게 됩니다.\n\n여러가지 이유가 있겠지만, 파이토치가 내부적으로 foward() 의 실행의 전/후로 사용자 친화적인 환경을 제공하기위해서 추가적인 작업들을 해줍니다. 이 부분은 다음 실습에서 다층 퍼셉트론 모델을 만들면서 조금 더 자세히 설명해볼게요!",
"_____no_output_____"
],
[
"## Pytorch 로 간단히! 선형회귀 구현하기\n\n저번 실습에서 numpy 로 구현했던 Linear regression 모델을 다시 한번 파이토치로 구현해볼까요? <br>\n몇 줄이면 끝날 정도로 간단합니다 :)",
"_____no_output_____"
]
],
[
[
"def generate_samples(n_samples: int, \n w: float = 1.0, \n b: float = 0.5,\n x_range=[-1.0,1.0]):\n \n xs = np.random.uniform(low=x_range[0], high=x_range[1], size=n_samples)\n ys = w * xs + b\n \n xs = torch.tensor(xs).view(-1,1).float() # 파이토치 nn.Module 은 배치가 첫 디멘젼!\n ys = torch.tensor(ys).view(-1,1).float()\n return xs, ys",
"_____no_output_____"
],
[
"w = 1.0\nb = 0.5\nxs, ys = generate_samples(30, w=w, b=b)",
"_____no_output_____"
],
[
"lin_model = nn.Linear(in_features=1, out_features=1) # lim_model 생성\n\nfor p in lin_model.parameters():\n print(p)\n print(p.grad)",
"Parameter containing:\ntensor([[-0.5781]], requires_grad=True)\nNone\nParameter containing:\ntensor([0.6029], requires_grad=True)\nNone\n"
],
[
"ys_hat = lin_model(xs) # lin_model 로 예측하기",
"_____no_output_____"
]
],
[
[
"## Loss 함수는? MSE!\n\n`pytorch`에서는 자주 쓰이는 loss 함수들에 대해서도 미리 구현을 해두었습니다.\n이번 실습에서는 __numpy로 선형회귀 모델 만들기__ 에서 사용됐던 MSE 를 오차함수로 사용해볼까요?",
"_____no_output_____"
]
],
[
[
"criteria = nn.MSELoss()\nloss = criteria(ys_hat, ys)",
"_____no_output_____"
]
],
[
[
"## 경사하강법을 활용해서 파라미터 업데이트하기!\n\n`pytorch`는 여러분들을 위해서 다양한 optimizer들을 구현해 두었습니다. 일단은 가장 간단한 stochastic gradient descent (SGD)를 활용해 볼까요? optimizer에 따라서 다양한 인자들을 활용하지만 기본적으로 `params` 와 `lr`을 지정해주면 나머지는 optimizer 마다 잘되는 것으로 알려진 인자들로 optimizer을 손쉽게 생성할수 있습니다.",
"_____no_output_____"
]
],
[
[
"opt = torch.optim.SGD(params=lin_model.parameters(), lr=0.01)",
"_____no_output_____"
]
],
[
[
"## 잊지마세요! opt.zero_grad()\n\n`pytorch`로 편미분을 계산하기전에, 꼭 `opt.zero_grad()` 함수를 이용해서 편미분 계산이 필요한 텐서들의 편미분값을 초기화 해주는 것을 권장드립니다.",
"_____no_output_____"
]
],
[
[
"opt.zero_grad()\nfor p in lin_model.parameters():\n print(p)\n print(p.grad)",
"Parameter containing:\ntensor([[-0.5781]], requires_grad=True)\nNone\nParameter containing:\ntensor([0.6029], requires_grad=True)\nNone\n"
],
[
"loss.backward()",
"_____no_output_____"
],
[
"opt.step()\nfor p in lin_model.parameters():\n print(p)\n print(p.grad)",
"Parameter containing:\ntensor([[-0.5666]], requires_grad=True)\ntensor([[-1.1548]])\nParameter containing:\ntensor([0.6042], requires_grad=True)\ntensor([-0.1280])\n"
]
],
[
[
"## 경사하강법을 활용해서 최적 파라미터를 찾아봅시다!",
"_____no_output_____"
]
],
[
[
"def run_sgd(n_steps: int = 1000,\n report_every: int = 100,\n verbose=True):\n \n lin_model = nn.Linear(in_features=1, out_features=1)\n opt = torch.optim.SGD(params=lin_model.parameters(), lr=0.01)\n sgd_losses = []\n\n for i in range(n_steps):\n ys_hat = lin_model(xs)\n loss = criteria(ys_hat, ys)\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n if i % report_every == 0:\n if verbose:\n print('\\n')\n print(\"{}th update: {}\".format(i,loss))\n for p in lin_model.parameters():\n print(p)\n sgd_losses.append(loss.log10().detach().numpy())\n return sgd_losses",
"_____no_output_____"
],
[
"_ = run_sgd()",
"\n\n0th update: 0.8393566012382507\nParameter containing:\ntensor([[0.1211]], requires_grad=True)\nParameter containing:\ntensor([-0.1363], requires_grad=True)\n\n\n100th update: 0.060856711119413376\nParameter containing:\ntensor([[0.6145]], requires_grad=True)\nParameter containing:\ntensor([0.4634], requires_grad=True)\n\n\n200th update: 0.012306183576583862\nParameter containing:\ntensor([[0.8169]], requires_grad=True)\nParameter containing:\ntensor([0.5173], requires_grad=True)\n\n\n300th update: 0.002916797064244747\nParameter containing:\ntensor([[0.9110]], requires_grad=True)\nParameter containing:\ntensor([0.5130], requires_grad=True)\n\n\n400th update: 0.0006996632437221706\nParameter containing:\ntensor([[0.9565]], requires_grad=True)\nParameter containing:\ntensor([0.5069], requires_grad=True)\n\n\n500th update: 0.00016797447460703552\nParameter containing:\ntensor([[0.9787]], requires_grad=True)\nParameter containing:\ntensor([0.5035], requires_grad=True)\n\n\n600th update: 4.032816650578752e-05\nParameter containing:\ntensor([[0.9896]], requires_grad=True)\nParameter containing:\ntensor([0.5017], requires_grad=True)\n\n\n700th update: 9.681772098701913e-06\nParameter containing:\ntensor([[0.9949]], requires_grad=True)\nParameter containing:\ntensor([0.5008], requires_grad=True)\n\n\n800th update: 2.3242985207616584e-06\nParameter containing:\ntensor([[0.9975]], requires_grad=True)\nParameter containing:\ntensor([0.5004], requires_grad=True)\n\n\n900th update: 5.579695425694808e-07\nParameter containing:\ntensor([[0.9988]], requires_grad=True)\nParameter containing:\ntensor([0.5002], requires_grad=True)\n"
]
],
[
[
"## 다른 Optimizer도 사용해볼까요?\n\n수업시간에 배웠던 Adam 으로 최적화를 하면 어떤결과가 나올까요?",
"_____no_output_____"
]
],
[
[
"def run_adam(n_steps: int = 1000,\n report_every: int = 100,\n verbose=True):\n \n lin_model = nn.Linear(in_features=1, out_features=1)\n opt = torch.optim.Adam(params=lin_model.parameters(), lr=0.01)\n adam_losses = []\n\n for i in range(n_steps):\n ys_hat = lin_model(xs)\n loss = criteria(ys_hat, ys)\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n if i % report_every == 0:\n if verbose:\n print('\\n')\n print(\"{}th update: {}\".format(i,loss))\n for p in lin_model.parameters():\n print(p)\n adam_losses.append(loss.log10().detach().numpy())\n \n \n \n return adam_losses",
"_____no_output_____"
],
[
"_ = run_adam()",
"\n\n0th update: 1.2440284490585327\nParameter containing:\ntensor([[0.4118]], requires_grad=True)\nParameter containing:\ntensor([-0.4825], requires_grad=True)\n\n\n100th update: 0.05024972930550575\nParameter containing:\ntensor([[1.0383]], requires_grad=True)\nParameter containing:\ntensor([0.2774], requires_grad=True)\n\n\n200th update: 0.0004788984660990536\nParameter containing:\ntensor([[1.0159]], requires_grad=True)\nParameter containing:\ntensor([0.4793], requires_grad=True)\n\n\n300th update: 4.6914931317587616e-07\nParameter containing:\ntensor([[1.0005]], requires_grad=True)\nParameter containing:\ntensor([0.4994], requires_grad=True)\n\n\n400th update: 3.263671667988466e-12\nParameter containing:\ntensor([[1.0000]], requires_grad=True)\nParameter containing:\ntensor([0.5000], requires_grad=True)\n\n\n500th update: 4.133082697160666e-14\nParameter containing:\ntensor([[1.0000]], requires_grad=True)\nParameter containing:\ntensor([0.5000], requires_grad=True)\n\n\n600th update: 4.133082697160666e-14\nParameter containing:\ntensor([[1.0000]], requires_grad=True)\nParameter containing:\ntensor([0.5000], requires_grad=True)\n\n\n700th update: 4.133082697160666e-14\nParameter containing:\ntensor([[1.0000]], requires_grad=True)\nParameter containing:\ntensor([0.5000], requires_grad=True)\n\n\n800th update: 4.133082697160666e-14\nParameter containing:\ntensor([[1.0000]], requires_grad=True)\nParameter containing:\ntensor([0.5000], requires_grad=True)\n\n\n900th update: 4.133082697160666e-14\nParameter containing:\ntensor([[1.0000]], requires_grad=True)\nParameter containing:\ntensor([0.5000], requires_grad=True)\n"
]
],
[
[
"## 좀 더 상세하게 비교해볼까요?\n\n`pytorch`에서 `nn.Linear`를 비롯한 많은 모듈들은 특별한 경우가 아닌이상,\n모듈내에 파라미터가 임의의 값으로 __잘!__ 초기화 됩니다. \n\n> \"잘!\" 에 대해서는 수업에서 다루지 않았지만, 확실히 현대 딥러닝이 잘 작동하게 하는 중요한 요소중에 하나입니다. Parameter initialization 이라고 부르는 기법들이며, 대부분의 `pytorch` 모듈들은 각각의 모듈에 따라서 일반적으로 잘 작동하는것으로 알려져있는 방식으로 파라미터들이 초기화 되게 코딩되어 있습니다.\n\n그래서 매 번 모듈을 생성할때마다 파라미터의 초기값이 달라지게 됩니다. 이번에는 조금 공정한 비교를 위해서 위에서 했던 실험을 여러번 반복해서 평균적으로도 Adam이 좋은지 확인해볼까요?",
"_____no_output_____"
]
],
[
[
"sgd_losses = [run_sgd(verbose=False) for _ in range(50)]\nsgd_losses = np.stack(sgd_losses)\nsgd_loss_mean = np.mean(sgd_losses, axis=0)\nsgd_loss_std = np.std(sgd_losses, axis=-0)",
"_____no_output_____"
],
[
"adam_losses = [run_adam(verbose=False) for _ in range(50)]\nadam_losses = np.stack(adam_losses)\nadam_loss_mean = np.mean(adam_losses, axis=0)\nadam_loss_std = np.std(adam_losses, axis=-0)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(1,1, figsize=(10,5))\nax.grid()\nax.fill_between(x=range(sgd_loss_mean.shape[0]),\n y1=sgd_loss_mean + sgd_loss_std,\n y2=sgd_loss_mean - sgd_loss_std,\n alpha=0.3)\nax.plot(sgd_loss_mean, label='SGD')\nax.fill_between(x=range(adam_loss_mean.shape[0]),\n y1=adam_loss_mean + adam_loss_std,\n y2=adam_loss_mean - adam_loss_std,\n alpha=0.3)\nax.plot(adam_loss_mean, label='Adam')\nax.legend()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
d04aa135251aac0f548ff561ef66ff3bb077a627 | 19,749 | ipynb | Jupyter Notebook | 4. Deep Learning/IMDB_In_Keras.ipynb | Arwa-Ibrahim/ML_Nano_Projects | 7f335b352cc4b335ae97aea2bf962188bc454204 | [
"MIT"
] | null | null | null | 4. Deep Learning/IMDB_In_Keras.ipynb | Arwa-Ibrahim/ML_Nano_Projects | 7f335b352cc4b335ae97aea2bf962188bc454204 | [
"MIT"
] | null | null | null | 4. Deep Learning/IMDB_In_Keras.ipynb | Arwa-Ibrahim/ML_Nano_Projects | 7f335b352cc4b335ae97aea2bf962188bc454204 | [
"MIT"
] | null | null | null | 46.57783 | 1,805 | 0.342498 | [
[
[
"# Analyzing IMDB Data in Keras",
"_____no_output_____"
]
],
[
[
"# Imports\nimport numpy as np\nimport keras\nfrom keras.datasets import imdb\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation\nfrom keras.preprocessing.text import Tokenizer\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nnp.random.seed(42)",
"Using TensorFlow backend.\n"
]
],
[
[
"## 1. Loading the data\nThis dataset comes preloaded with Keras, so one simple command will get us training and testing data. There is a parameter for how many words we want to look at. We've set it at 1000, but feel free to experiment.",
"_____no_output_____"
]
],
[
[
"# Loading the data (it's preloaded in Keras)\n(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=1000)\n\nprint(x_train.shape)\nprint(x_test.shape)",
"(25000,)\n(25000,)\n"
]
],
[
[
"## 2. Examining the data\nNotice that the data has been already pre-processed, where all the words have numbers, and the reviews come in as a vector with the words that the review contains. For example, if the word 'the' is the first one in our dictionary, and a review contains the word 'the', then there is a 1 in the corresponding vector.\n\nThe output comes as a vector of 1's and 0's, where 1 is a positive sentiment for the review, and 0 is negative.",
"_____no_output_____"
]
],
[
[
"print(x_train[0])\nprint(y_train[0])",
"[1, 11, 2, 11, 4, 2, 745, 2, 299, 2, 590, 2, 2, 37, 47, 27, 2, 2, 2, 19, 6, 2, 15, 2, 2, 17, 2, 723, 2, 2, 757, 46, 4, 232, 2, 39, 107, 2, 11, 4, 2, 198, 24, 4, 2, 133, 4, 107, 7, 98, 413, 2, 2, 11, 35, 781, 8, 169, 4, 2, 5, 259, 334, 2, 8, 4, 2, 10, 10, 17, 16, 2, 46, 34, 101, 612, 7, 84, 18, 49, 282, 167, 2, 2, 122, 24, 2, 8, 177, 4, 392, 531, 19, 259, 15, 934, 40, 507, 39, 2, 260, 77, 8, 162, 2, 121, 4, 65, 304, 273, 13, 70, 2, 2, 8, 15, 745, 2, 5, 27, 322, 2, 2, 2, 70, 30, 2, 88, 17, 6, 2, 2, 29, 100, 30, 2, 50, 21, 18, 148, 15, 26, 2, 12, 152, 157, 10, 10, 21, 19, 2, 46, 50, 5, 4, 2, 112, 828, 6, 2, 4, 162, 2, 2, 517, 6, 2, 7, 4, 2, 2, 4, 351, 232, 385, 125, 6, 2, 39, 2, 5, 29, 69, 2, 2, 6, 162, 2, 2, 232, 256, 34, 718, 2, 2, 8, 6, 226, 762, 7, 2, 2, 5, 517, 2, 6, 2, 7, 4, 351, 232, 37, 9, 2, 8, 123, 2, 2, 2, 188, 2, 857, 11, 4, 86, 22, 121, 29, 2, 2, 10, 10, 2, 61, 514, 11, 14, 22, 9, 2, 2, 14, 575, 208, 159, 2, 16, 2, 5, 187, 15, 58, 29, 93, 6, 2, 7, 395, 62, 30, 2, 493, 37, 26, 66, 2, 29, 299, 4, 172, 243, 7, 217, 11, 4, 2, 2, 22, 4, 2, 2, 13, 70, 243, 7, 2, 19, 2, 11, 15, 236, 2, 136, 121, 29, 5, 2, 26, 112, 2, 180, 34, 2, 2, 5, 320, 4, 162, 2, 568, 319, 4, 2, 2, 2, 269, 8, 401, 56, 19, 2, 16, 142, 334, 88, 146, 243, 7, 11, 2, 2, 150, 11, 4, 2, 2, 10, 10, 2, 828, 4, 206, 170, 33, 6, 52, 2, 225, 55, 117, 180, 58, 11, 14, 22, 48, 50, 16, 101, 329, 12, 62, 30, 35, 2, 2, 22, 2, 11, 4, 2, 2, 35, 735, 18, 118, 204, 881, 15, 291, 10, 10, 2, 82, 93, 52, 361, 7, 4, 162, 2, 2, 5, 4, 785, 2, 49, 7, 4, 172, 2, 7, 665, 26, 303, 343, 11, 23, 4, 2, 11, 192, 2, 11, 4, 2, 9, 44, 84, 24, 2, 54, 36, 66, 144, 11, 68, 205, 118, 602, 55, 729, 174, 8, 23, 4, 2, 10, 10, 2, 11, 4, 2, 127, 316, 2, 37, 16, 2, 19, 12, 150, 138, 426, 2, 2, 79, 49, 542, 162, 2, 2, 84, 11, 4, 392, 555]\n1\n"
]
],
[
[
"## 3. One-hot encoding the output\nHere, we'll turn the input vectors into (0,1)-vectors. For example, if the pre-processed vector contains the number 14, then in the processed vector, the 14th entry will be 1.",
"_____no_output_____"
]
],
[
[
"# One-hot encoding the output into vector mode, each of length 1000\ntokenizer = Tokenizer(num_words=1000)\nx_train = tokenizer.sequences_to_matrix(x_train, mode='binary')\nx_test = tokenizer.sequences_to_matrix(x_test, mode='binary')\nprint(x_train[0])",
"[ 0. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n 1. 1. 0. 1. 1. 1. 1. 0. 1. 1. 0. 1. 1. 0. 0. 1. 1. 1.\n 1. 1. 0. 1. 1. 0. 0. 0. 1. 0. 1. 1. 1. 1. 1. 0. 1. 0.\n 1. 1. 1. 0. 1. 0. 0. 1. 1. 0. 0. 1. 1. 0. 1. 1. 1. 0.\n 0. 0. 0. 0. 0. 1. 0. 1. 0. 0. 1. 0. 1. 0. 1. 0. 1. 0.\n 0. 0. 0. 1. 0. 0. 0. 0. 1. 0. 1. 1. 0. 0. 0. 0. 0. 1.\n 0. 0. 0. 0. 1. 0. 0. 0. 0. 1. 1. 0. 0. 1. 1. 1. 0. 1.\n 0. 1. 0. 0. 0. 0. 0. 1. 0. 0. 1. 0. 1. 0. 0. 0. 1. 0.\n 1. 0. 1. 0. 1. 0. 1. 0. 1. 0. 0. 0. 0. 1. 0. 1. 0. 0.\n 1. 0. 0. 0. 0. 1. 0. 1. 1. 0. 1. 0. 1. 0. 0. 1. 0. 0.\n 1. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0. 1. 0. 0. 0. 0. 0.\n 1. 0. 0. 0. 0. 0. 1. 1. 1. 0. 1. 0. 0. 0. 0. 0. 0. 0.\n 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0. 0. 0. 1. 0.\n 0. 0. 1. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 1. 0. 0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 1.\n 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0.\n 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 1. 1. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 1. 1. 0. 1. 0.\n 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0.\n 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 1. 0. 0. 1.\n 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 1. 0. 0. 1. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 1.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0.\n 0. 0. 0. 1. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 1. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 1. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n"
],
[
"print(x_train.shape)\nx_train[1]",
"(25000, 1000)\n"
]
],
[
[
"And we'll also one-hot encode the output.",
"_____no_output_____"
]
],
[
[
"# One-hot encoding the output\nnum_classes = 2\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\nprint(y_train.shape)\nprint(y_test.shape)",
"(25000, 2)\n(25000, 2)\n"
]
],
[
[
"## 4. Building the model architecture\nBuild a model here using sequential. Feel free to experiment with different layers and sizes! Also, experiment adding dropout to reduce overfitting.",
"_____no_output_____"
]
],
[
[
"# TODO: Build the model architecture\nmodel = Sequential()\nmodel.add(Dense(128, input_dim = x_train.shape[1]))\nmodel.add(Activation('relu'))\nmodel.add(Dense(2))\nmodel.add(Activation('softmax'))\n\n# TODO: Compile the model using a loss function and an optimizer.\nmodel.compile(loss = 'categorical_crossentropy', optimizer = 'Adam', metrics = ['accuracy'])",
"_____no_output_____"
]
],
[
[
"## 5. Training the model\nRun the model here. Experiment with different batch_size, and number of epochs!",
"_____no_output_____"
]
],
[
[
"# TODO: Run the model. Feel free to experiment with different batch sizes and number of epochs.\nmodel.fit(x_train, y_train, 10000 , verbose = 0)",
"_____no_output_____"
]
],
[
[
"## 6. Evaluating the model\nThis will give you the accuracy of the model, as evaluated on the testing set. Can you get something over 85%?",
"_____no_output_____"
]
],
[
[
"score = model.evaluate(x_test, y_test, verbose=0)\nprint(\"Accuracy: \", score[1])",
"Accuracy: 0.85832\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d04ab7d0f4272a88ca7d9d07204a815e2027d608 | 3,421 | ipynb | Jupyter Notebook | gail_experts/Untitled.ipynb | Bonennult/ppo-sample | 816d631dd84917b2b6a13b43bd79734caf9e4063 | [
"MIT"
] | null | null | null | gail_experts/Untitled.ipynb | Bonennult/ppo-sample | 816d631dd84917b2b6a13b43bd79734caf9e4063 | [
"MIT"
] | null | null | null | gail_experts/Untitled.ipynb | Bonennult/ppo-sample | 816d631dd84917b2b6a13b43bd79734caf9e4063 | [
"MIT"
] | null | null | null | 28.272727 | 91 | 0.474715 | [
[
[
"# 查看 h5 文件内容\n\nimport argparse\nimport os\nimport sys\n\nimport h5py\nimport numpy as np\nimport torch\n\n\nh5_file = 'trajs_walker.h5'\n\nwith h5py.File(h5_file, 'r') as f:\n dataset_size = f['obs_B_T_Do'].shape[0] # full dataset size\n\n print(f['obs_B_T_Do'].shape)\n print(f['a_B_T_Da'].shape)\n print(f['r_B_T'].shape)\n print(f['len_B'].shape)\n print('-'*20)\n \n states = f['obs_B_T_Do'][:dataset_size, ...][...]\n actions = f['a_B_T_Da'][:dataset_size, ...][...]\n rewards = f['r_B_T'][:dataset_size, ...][...]\n lens = f['len_B'][:dataset_size, ...][...]\n\n print(type(states))\n print(states.shape)\n print(actions.shape)\n print(rewards.shape)\n print(lens.shape)\n print(lens)\n \n states = torch.from_numpy(states).float()\n actions = torch.from_numpy(actions).float()\n rewards = torch.from_numpy(rewards).float()\n lens = torch.from_numpy(lens).long()\n \n i = 0\n j = 80\n print('-'*10+' value '+'-'*10)\n print('states ',states[i][j])\n print('actions ',actions[i][j])\n print('rewards ',rewards[i][j])\n\ndata = {\n 'states': states,\n 'actions': actions,\n 'rewards': rewards,\n 'lengths': lens\n}\n",
"(53, 1000, 17)\n(53, 1000, 6)\n(53, 1000)\n(53,)\n--------------------\n<class 'numpy.ndarray'>\n(53, 1000, 17)\n(53, 1000, 6)\n(53, 1000)\n(53,)\n[1000 1000 1000 1000 1000 1000 1000 1000 894 1000 1000 1000 870 1000\n 1000 1000 1000 1000 1000 1000 1000 1000 1000 1000 548 1000 1000 1000\n 1000 1000 1000 995 1000 1000 1000 1000 1000 765 148 1000 1000 1000\n 718 1000 1000 687 1000 1000 1000 1000 1000 1000 1000]\n---------- value ----------\nstates tensor([ 9.7591e-01, 4.9660e-01, -1.7497e-02, -7.4942e-01, 8.7715e-03,\n -2.6524e-01, 1.3913e-01, 6.6671e-01, 1.5276e+00, 6.6244e-01,\n -8.0999e+00, -2.8552e+00, -1.0000e+01, -1.0000e+01, -1.0000e+01,\n 3.0397e-01, -1.0000e+01])\nactions tensor([ 0.4441, -1.2433, -2.8770, -0.7248, 1.4835, -1.8718])\nrewards tensor(2.3810)\n"
]
]
] | [
"code"
] | [
[
"code"
]
] |
d04ac1dd736dec6f45db22eb906e647ddcc6542d | 22,606 | ipynb | Jupyter Notebook | Module_5_LeNet_on_MNIST (1).ipynb | vigneshb-it19/AWS-Computer-Vision-GluonCV | 73f861c707f888c88fc7ed26fc3e5b42e63731de | [
"MIT"
] | null | null | null | Module_5_LeNet_on_MNIST (1).ipynb | vigneshb-it19/AWS-Computer-Vision-GluonCV | 73f861c707f888c88fc7ed26fc3e5b42e63731de | [
"MIT"
] | null | null | null | Module_5_LeNet_on_MNIST (1).ipynb | vigneshb-it19/AWS-Computer-Vision-GluonCV | 73f861c707f888c88fc7ed26fc3e5b42e63731de | [
"MIT"
] | null | null | null | 37.242175 | 492 | 0.526409 | [
[
[
"<a href=\"https://colab.research.google.com/github/phreakyphoenix/MXNet-GluonCV-AWS-Coursera/blob/master/Module_5_LeNet_on_MNIST.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Graded Assessment\n\nIn this assessment you will write a full end-to-end training process using gluon and MXNet. We will train the LeNet-5 classifier network on the MNIST dataset. The network will be defined for you but you have to fill in code to prepare the dataset, train the network, and evaluate it's performance on a held out dataset.",
"_____no_output_____"
]
],
[
[
"#Check CUDA version\n!nvcc --version",
"nvcc: NVIDIA (R) Cuda compiler driver\nCopyright (c) 2005-2019 NVIDIA Corporation\nBuilt on Sun_Jul_28_19:07:16_PDT_2019\nCuda compilation tools, release 10.1, V10.1.243\n"
],
[
"#Install appropriate MXNet version\n'''\nFor eg if CUDA version is 10.0 choose mxnet cu100mkl \nwhere cu adds CUDA GPU support\nand mkl adds Intel CPU Math Kernel Library support\n'''\n!pip install mxnet-cu101mkl gluoncv",
"Collecting mxnet-cu101mkl\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/3d/4b/e51dc49ca5fe6564028e7c91b10a3f79c00d710dd691b408c77597df5883/mxnet_cu101mkl-1.6.0-py2.py3-none-manylinux1_x86_64.whl (711.0MB)\n\u001b[K |████████████████████████████████| 711.0MB 26kB/s \n\u001b[?25hCollecting gluoncv\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/69/4d/d9d6b9261af8f7251977bb97be669a3908f72bdec9d3597e527712d384c2/gluoncv-0.6.0-py2.py3-none-any.whl (693kB)\n\u001b[K |████████████████████████████████| 696kB 43.7MB/s \n\u001b[?25hRequirement already satisfied: requests<3,>=2.20.0 in /usr/local/lib/python3.6/dist-packages (from mxnet-cu101mkl) (2.21.0)\nRequirement already satisfied: numpy<2.0.0,>1.16.0 in /usr/local/lib/python3.6/dist-packages (from mxnet-cu101mkl) (1.18.2)\nCollecting graphviz<0.9.0,>=0.8.1\n Downloading https://files.pythonhosted.org/packages/53/39/4ab213673844e0c004bed8a0781a0721a3f6bb23eb8854ee75c236428892/graphviz-0.8.4-py2.py3-none-any.whl\nCollecting portalocker\n Downloading https://files.pythonhosted.org/packages/53/84/7b3146ec6378d28abc73ab484f09f47dfa008ad6f03f33d90a369f880e25/portalocker-1.7.0-py2.py3-none-any.whl\nRequirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from gluoncv) (3.2.1)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from gluoncv) (1.4.1)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from gluoncv) (4.38.0)\nRequirement already satisfied: Pillow in /usr/local/lib/python3.6/dist-packages (from gluoncv) (7.0.0)\nRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.20.0->mxnet-cu101mkl) (3.0.4)\nRequirement already satisfied: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.20.0->mxnet-cu101mkl) (1.24.3)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.20.0->mxnet-cu101mkl) (2020.4.5.1)\nRequirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.20.0->mxnet-cu101mkl) (2.8)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->gluoncv) (1.2.0)\nRequirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->gluoncv) (2.8.1)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib->gluoncv) (0.10.0)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->gluoncv) (2.4.7)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.1->matplotlib->gluoncv) (1.12.0)\nInstalling collected packages: graphviz, mxnet-cu101mkl, portalocker, gluoncv\n Found existing installation: graphviz 0.10.1\n Uninstalling graphviz-0.10.1:\n Successfully uninstalled graphviz-0.10.1\nSuccessfully installed gluoncv-0.6.0 graphviz-0.8.4 mxnet-cu101mkl-1.6.0 portalocker-1.7.0\n"
],
[
"from pathlib import Path\nfrom mxnet import gluon, metric, autograd, init, nd\nimport os\nimport mxnet as mx",
"_____no_output_____"
],
[
"#I downloaded the files from Coursera and hosted on my gdrive:\nfrom google.colab import drive\ndrive.mount('/content/drive')",
"Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n"
],
[
"# M5_DATA = Path(os.getenv('DATA_DIR', '../../data'), 'module_5')\nM5_DATA = Path('/content/drive/My Drive/CourseraWork/MXNetAWS/data/module_5')\nM5_IMAGES = Path(M5_DATA, 'images')",
"_____no_output_____"
]
],
[
[
"---\n## Question 1\n\n### Prepare and the data and construct the dataloader\n\n* First, get the MNIST dataset from `gluon.data.vision.datasets`. Use\n* Don't forget the ToTensor and normalize Transformations. Use `0.13` and `0.31` as the mean and standard deviation respectively\n* Construct the dataloader with the batch size provide. Ensure that the train_dataloader is shuffled.\n\n<font color='red'>**CAUTION!**</font>: Although the notebook interface has internet connectivity, the **autograders are not permitted to access the internet**. We have already downloaded the correct models and data for you to use so you don't need access to the internet. Set the `root` parameter to `M5_IMAGES` when using a preset dataset. Usually, in the real world, you have internet access, so setting the `root` parameter isn't required (and it's set to `~/.mxnet` by default).",
"_____no_output_____"
]
],
[
[
"import os\nfrom pathlib import Path\nfrom mxnet.gluon.data.vision import transforms\nimport numpy as np\ndef get_mnist_data(batch=128):\n \"\"\"\n Should construct a dataloader with the MNIST Dataset with the necessary transforms applied.\n \n :param batch: batch size for the DataLoader.\n :type batch: int\n \n :return: a tuple of the training and validation DataLoaders\n :rtype: (gluon.data.DataLoader, gluon.data.DataLoader)\n \"\"\"\n \n def transformer(data, label):\n data = data.flatten().expand_dims(0).astype(np.float32)/255\n data = data-0.13/0.31\n label = label.astype(np.float32)\n return data, label\n\n train_dataset = gluon.data.vision.datasets.MNIST(root=M5_IMAGES, train=True, transform=transformer)\n validation_dataset = gluon.data.vision.datasets.MNIST(root=M5_IMAGES, train=False, transform=transformer)\n train_dataloader = gluon.data.DataLoader(train_dataset, batch_size=batch, last_batch='keep',shuffle=True)\n validation_dataloader = gluon.data.DataLoader(validation_dataset, batch_size=batch, last_batch='keep')\n \n return train_dataloader, validation_dataloader",
"_____no_output_____"
],
[
"t, v = get_mnist_data()\nassert isinstance(t, gluon.data.DataLoader)\nassert isinstance(v, gluon.data.DataLoader)\n\nd, l = next(iter(t))\nassert d.shape == (128, 1, 28, 28) #check Channel First and Batch Size\nassert l.shape == (128,)\n\nassert nd.max(d).asscalar() <= 2.9 # check for normalization\nassert nd.min(d).asscalar() >= -0.5 # check for normalization",
"_____no_output_____"
]
],
[
[
"---\n\n## Question 2\n\n### Write the training loop\n\n* Create the loss function. This should be a loss function suitable for multi-class classification.\n* Create the metric accumulator. This should the compute and store the accuracy of the model during training\n* Create the trainer with the `adam` optimizer and learning rate of `0.002`\n* Write the training loop",
"_____no_output_____"
]
],
[
[
"def train(network, training_dataloader, batch_size, epochs):\n \"\"\"\n Should take an initialized network and train that network using data from the data loader.\n \n :param network: initialized gluon network to be trained\n :type network: gluon.Block\n \n :param training_dataloader: the training DataLoader provides batches for data for every iteration\n :type training_dataloader: gluon.data.DataLoader\n \n :param batch_size: batch size for the DataLoader.\n :type batch_size: int\n \n :param epochs: number of epochs to train the DataLoader\n :type epochs: int\n \n :return: tuple of trained network and the final training accuracy\n :rtype: (gluon.Block, float)\n \"\"\"\n trainer = gluon.Trainer(network.collect_params(), 'adam',\n {'learning_rate': 0.002})\n metric = mx.metric.Accuracy()\n \n for epoch in range(epochs):\n train_loss =0.\n for data,label in training_dataloader:\n \n# print (data.shape)\n# print (label.shape)\n with autograd.record():\n output = network(data)\n loss=mx.ndarray.softmax_cross_entropy(output,label)\n loss.backward()\n\n trainer.step(batch_size)\n train_loss += loss.mean().asscalar()\n metric.update(label, output)\n \n print (epoch , metric.get()[1]) \n training_accuracy = metric.get()[1]\n return network, training_accuracy",
"_____no_output_____"
]
],
[
[
"Let's define and initialize a network to test the train function.",
"_____no_output_____"
]
],
[
[
"net = gluon.nn.Sequential()\nnet.add(gluon.nn.Conv2D(channels=6, kernel_size=5, activation='relu'),\n gluon.nn.MaxPool2D(pool_size=2, strides=2),\n gluon.nn.Conv2D(channels=16, kernel_size=3, activation='relu'),\n gluon.nn.MaxPool2D(pool_size=2, strides=2),\n gluon.nn.Flatten(),\n gluon.nn.Dense(120, activation=\"relu\"),\n gluon.nn.Dense(84, activation=\"relu\"),\n gluon.nn.Dense(10))\nnet.initialize(init=init.Xavier())",
"_____no_output_____"
],
[
"n, ta = train(net, t, 128, 5)\nassert ta >= .95\n\nd, l = next(iter(v))\np = (n(d).argmax(axis=1))\nassert (p.asnumpy() == l.asnumpy()).sum()/128.0 > .95",
"0 0.93415\n1 0.9572583333333333\n2 0.9668111111111111\n3 0.972375\n4 0.97606\n"
]
],
[
[
"---\n## Question 3\n\n### Write the validation loop\n\n* Create the metric accumulator. This should the compute and store the accuracy of the model on the validation set\n* Write the validation loop",
"_____no_output_____"
]
],
[
[
"def validate(network, validation_dataloader):\n \"\"\"\n Should compute the accuracy of the network on the validation set.\n \n :param network: initialized gluon network to be trained\n :type network: gluon.Block\n \n :param validation_dataloader: the training DataLoader provides batches for data for every iteration\n :type validation_dataloader: gluon.data.DataLoader\n \n :return: validation accuracy\n :rtype: float\n \"\"\"\n val_acc = mx.metric.Accuracy()\n for data,label in validation_dataloader:\n output = network(data)\n val_acc.update(label,output)\n print (val_acc.get()[1])\n return val_acc.get()[1]",
"_____no_output_____"
],
[
"assert validate(n, v) > .95",
"0.9896\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
d04ad898987882a338f1c3056ae84bf07d57bd83 | 103,850 | ipynb | Jupyter Notebook | train_classifier.ipynb | rainyrainyguo/sentiment-analysis | 88c1e330b525ad81f0b4f05d892d7f7c16e15023 | [
"MIT"
] | null | null | null | train_classifier.ipynb | rainyrainyguo/sentiment-analysis | 88c1e330b525ad81f0b4f05d892d7f7c16e15023 | [
"MIT"
] | null | null | null | train_classifier.ipynb | rainyrainyguo/sentiment-analysis | 88c1e330b525ad81f0b4f05d892d7f7c16e15023 | [
"MIT"
] | null | null | null | 36.618477 | 192 | 0.442195 | [
[
[
"import torch\nfrom torchtext import data\nimport numpy as np\nimport pandas as pd\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nSEED = 1\n\ntorch.manual_seed(SEED)\ntorch.cuda.manual_seed(SEED)",
"_____no_output_____"
],
[
"with open('../stanford-corenlp-full-2018-10-05/stanfordSentimentTreebank/dictionary.txt','r') as f:\n dic = f.readlines()",
"_____no_output_____"
],
[
"dic[:20]",
"_____no_output_____"
],
[
"BeautyTEXT = data.Field(tokenize='spacy')\nBeautyLABEL = data.LabelField(tensor_type=torch.FloatTensor)\n\nprint(\"loading dataset clean_Beauty300.tsv...\")\nBeautytrain = data.TabularDataset.splits(\n path='../counter-sent-generation3/VAE/data/official_Amazon/', \n train='clean_Beauty300.tsv',\n format='tsv',\n fields=[('Text', BeautyTEXT),('Label', BeautyLABEL)])[0]\n\nBeautyTEXT.build_vocab(Beautytrain, max_size=60000, vectors=\"fasttext.en.300d\",min_freq=1)\nBeautyLABEL.build_vocab(Beautytrain)\n\nBeautyLABEL.vocab.stoi['1']=1\nBeautyLABEL.vocab.stoi['2']=2\nBeautyLABEL.vocab.stoi['3']=3\nBeautyLABEL.vocab.stoi['4']=4\nBeautyLABEL.vocab.stoi['5']=5",
"loading dataset clean_Beauty300.tsv...\n"
],
[
"ApparelTEXT = data.Field(tokenize='spacy')\nApparelLABEL = data.LabelField(tensor_type=torch.FloatTensor)\n\nprint(\"loading dataset clean_Apparel300.tsv...\")\nAppareltrain = data.TabularDataset.splits(\n path='../counter-sent-generation3/VAE/data/official_Amazon/', \n train='clean_Apparel300.tsv',\n format='tsv',\n fields=[('Text', ApparelTEXT),('Label', ApparelLABEL)])[0]\n\nApparelTEXT.build_vocab(Appareltrain, max_size=60000, vectors=\"fasttext.en.300d\",min_freq=1)\nApparelLABEL.build_vocab(Appareltrain)\n\nApparelLABEL.vocab.stoi['1']=1\nApparelLABEL.vocab.stoi['2']=2\nApparelLABEL.vocab.stoi['3']=3\nApparelLABEL.vocab.stoi['4']=4\nApparelLABEL.vocab.stoi['5']=5",
"loading dataset clean_Apparel300.tsv...\n"
],
[
"JewelryTEXT = data.Field(tokenize='spacy')\nJewelryLABEL = data.LabelField(tensor_type=torch.FloatTensor)\n\nprint(\"loading dataset clean_Jewelry300.tsv...\")\nJewelrytrain = data.TabularDataset.splits(\n path='../counter-sent-generation3/VAE/data/official_Amazon/', \n train='clean_Jewelry300.tsv',\n format='tsv',\n fields=[('Text', JewelryTEXT),('Label', JewelryLABEL)])[0]\n\nJewelryTEXT.build_vocab(Jewelrytrain, max_size=60000, vectors=\"fasttext.en.300d\",min_freq=1)\nJewelryLABEL.build_vocab(Jewelrytrain)\n\nJewelryLABEL.vocab.stoi['1']=1\nJewelryLABEL.vocab.stoi['2']=2\nJewelryLABEL.vocab.stoi['3']=3\nJewelryLABEL.vocab.stoi['4']=4\nJewelryLABEL.vocab.stoi['5']=5",
"loading dataset clean_Jewelry300.tsv...\n"
],
[
"ShoesTEXT = data.Field(tokenize='spacy')\nShoesLABEL = data.LabelField(tensor_type=torch.FloatTensor)\n\nprint(\"loading dataset clean_Shoes300.tsv...\")\nShoestrain = data.TabularDataset.splits(\n path='../counter-sent-generation3/VAE/data/official_Amazon/', \n train='clean_Shoes300.tsv',\n format='tsv',\n fields=[('Text', ShoesTEXT),('Label', ShoesLABEL)])[0]\n\nShoesTEXT.build_vocab(Shoestrain, max_size=60000, vectors=\"fasttext.en.300d\",min_freq=1)\nShoesLABEL.build_vocab(Shoestrain)\n\nShoesLABEL.vocab.stoi['1']=1\nShoesLABEL.vocab.stoi['2']=2\nShoesLABEL.vocab.stoi['3']=3\nShoesLABEL.vocab.stoi['4']=4\nShoesLABEL.vocab.stoi['5']=5",
"loading dataset clean_Shoes300.tsv...\n"
],
[
"import operator\nsorted_Beautyvocab = sorted(BeautyTEXT.vocab.freqs.items(), key=operator.itemgetter(1),reverse=False)",
"_____no_output_____"
],
[
"common1 = set.intersection(set(BeautyTEXT.vocab.itos),set(ShoesTEXT.vocab.itos))",
"_____no_output_____"
],
[
"common2 = set.intersection(set(ApparelTEXT.vocab.itos),set(ShoesTEXT.vocab.itos))",
"_____no_output_____"
],
[
"common3 = set.intersection(set(JewelryTEXT.vocab.itos),set(ShoesTEXT.vocab.itos))",
"_____no_output_____"
],
[
"cdict={}\ncdict['<unk>']=0\ncdict['<pad>']=1\ni=2\nfor x in common:\n if x!='<unk>' and x!='<pad>':\n cdict[x]=i\n i=i+1",
"_____no_output_____"
],
[
"len(ShoesTEXT.vocab.stoi)",
"_____no_output_____"
],
[
"len(JewelryTEXT.vocab.stoi)",
"_____no_output_____"
],
[
"len(common2)",
"_____no_output_____"
],
[
"len(ApparelTEXT.vocab.stoi)",
"_____no_output_____"
],
[
"len(BeautyTEXT.vocab.itos)",
"_____no_output_____"
],
[
"len(common3)",
"_____no_output_____"
],
[
"ApparelTEXT.vocab.itos[0]",
"_____no_output_____"
],
[
"BeautyTEXT.vocab.stoi",
"_____no_output_____"
],
[
"import json\nwith open('Apparel300_vocab','w') as f:\n json.dump(ApparelTEXT.vocab.stoi,f)\n \nwith open('Beauty300_vocab','w') as f:\n json.dump(BeautyTEXT.vocab.stoi,f)\n \nwith open('Jewelry300_vocab','w') as f:\n json.dump(JewelryTEXT.vocab.stoi,f)",
"_____no_output_____"
],
[
"BATCH_SIZE = 32\n\nBeautytrain, Beautyvalid = Beautytrain.split(split_ratio=0.8)\nBeautytrain_iterator, Beautyvalid_iterator = data.BucketIterator.splits(\n (Beautytrain, Beautyvalid), \n batch_size=BATCH_SIZE, \n sort_key=lambda x: len(x.Text), \n repeat=False)\n\nAppareltrain, Apparelvalid = Appareltrain.split(split_ratio=0.999)\nAppareltrain_iterator, Apparelvalid_iterator = data.BucketIterator.splits(\n (Appareltrain, Apparelvalid), \n batch_size=BATCH_SIZE, \n sort_key=lambda x: len(x.Text), \n repeat=False)\n\nJewelrytrain, Jewelryvalid = Jewelrytrain.split(split_ratio=0.8)\nJewelrytrain_iterator, Jewelryvalid_iterator = data.BucketIterator.splits(\n (Jewelrytrain, Jewelryvalid), \n batch_size=BATCH_SIZE, \n sort_key=lambda x: len(x.Text), \n repeat=False)\n\nShoestrain, Shoesvalid = Shoestrain.split(split_ratio=0.8)\nShoestrain_iterator, Shoesvalid_iterator = data.BucketIterator.splits(\n (Shoestrain, Shoesvalid), \n batch_size=BATCH_SIZE, \n sort_key=lambda x: len(x.Text), \n repeat=False)\n'''\ntrain_iterator = data.BucketIterator.splits(\n train, \n batch_size=BATCH_SIZE, \n sort_key=lambda x: len(x.Text), \n repeat=False)\n'''",
"_____no_output_____"
],
[
"class RNN(nn.Module):\n def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, bidirectional, dropout):\n super().__init__()\n \n self.embedding = nn.Embedding(vocab_size, embedding_dim)\n self.rnn = nn.LSTM(embedding_dim, hidden_dim, num_layers=n_layers, bidirectional=bidirectional, dropout=dropout)\n self.fc = nn.Linear(hidden_dim*2, output_dim)\n self.dropout = nn.Dropout(dropout)\n \n \n def forward(self, x):\n \n #x = [sent len, batch size]\n \n embedded = self.dropout(self.embedding(x))\n #print(\"embedded shape: \", embedded.shape)\n \n #embedded = [sent len, batch size, emb dim]\n \n output, (hidden, cell) = self.rnn(embedded)\n #print(\"output.shape: \",output.shape)\n #print(\"output[-1].shape: \",output[-1].shape)\n #print(\"hidden.shape: \",hidden.shape)\n #print(\"cell.shape: \",cell.shape)\n \n #output = [sent len, batch size, hid dim * num directions]\n #hidden = [num layers * num directions, batch size, hid. dim]\n #cell = [num layers * num directions, batch size, hid. dim]\n \n hidden = self.dropout(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim=1))\n #print(\"hidden.shape: \",hidden.shape)\n \n y = self.fc(hidden.squeeze(0))\n \n #hidden [batch size, hid. dim * num directions]\n \n #return self.fc(hidden.squeeze(0))\n return y",
"_____no_output_____"
],
[
"# Beauty classifier",
"_____no_output_____"
],
[
"len(BeautyTEXT.vocab)",
"_____no_output_____"
],
[
"BeautyINPUT_DIM = len(BeautyTEXT.vocab)\nEMBEDDING_DIM = 300\nHIDDEN_DIM = 500\nOUTPUT_DIM = 1\nN_LAYERS = 2\nBIDIRECTIONAL = True\nDROPOUT = 0.5\n\nBeautymodel = RNN(BeautyINPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM, N_LAYERS, BIDIRECTIONAL, DROPOUT)\nprint(\"Beautymodel parameters: \")\nprint(Beautymodel.parameters)\n\npretrained_embeddings = BeautyTEXT.vocab.vectors\n\nBeautymodel.embedding.weight.data.copy_(pretrained_embeddings)\n\nimport torch.optim as optim\nBeautyoptimizer = optim.Adam(Beautymodel.parameters(),lr=0.0003)\ncriterion = nn.MSELoss()\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n#device=torch.device('cpu')\nBeautymodel = Beautymodel.to(device)\ncriterion = criterion.to(device)",
"Beautymodel parameters: \n<bound method Module.parameters of RNN(\n (embedding): Embedding(20219, 300)\n (rnn): LSTM(300, 500, num_layers=2, dropout=0.5, bidirectional=True)\n (fc): Linear(in_features=1000, out_features=1, bias=True)\n (dropout): Dropout(p=0.5)\n)>\n"
],
[
"ApparelINPUT_DIM = len(ApparelTEXT.vocab)\nEMBEDDING_DIM = 300\nHIDDEN_DIM = 500\nOUTPUT_DIM = 1\nN_LAYERS = 2\nBIDIRECTIONAL = True\nDROPOUT = 0.5\n\nApparelmodel = RNN(ApparelINPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM, N_LAYERS, BIDIRECTIONAL, DROPOUT)\nprint(\"Apparelmodel parameters: \")\nprint(Apparelmodel.parameters)\n\npretrained_embeddings = ApparelTEXT.vocab.vectors\n\nApparelmodel.embedding.weight.data.copy_(pretrained_embeddings)\n\nimport torch.optim as optim\nAppareloptimizer = optim.Adam(Apparelmodel.parameters(),lr=0.0003)\ncriterion = nn.MSELoss()\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n#device=torch.device('cpu')\nApparelmodel = Apparelmodel.to(device)\ncriterion = criterion.to(device)",
"Apparelmodel parameters: \n<bound method Module.parameters of RNN(\n (embedding): Embedding(18035, 300)\n (rnn): LSTM(300, 500, num_layers=2, dropout=0.5, bidirectional=True)\n (fc): Linear(in_features=1000, out_features=1, bias=True)\n (dropout): Dropout(p=0.5)\n)>\n"
],
[
"JewelryINPUT_DIM = len(JewelryTEXT.vocab)\nEMBEDDING_DIM = 300\nHIDDEN_DIM = 500\nOUTPUT_DIM = 1\nN_LAYERS = 2\nBIDIRECTIONAL = True\nDROPOUT = 0.5\n\nJewelrymodel = RNN(JewelryINPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM, N_LAYERS, BIDIRECTIONAL, DROPOUT)\nprint(\"Jewelrymodel parameters: \")\nprint(Jewelrymodel.parameters)\n\npretrained_embeddings = JewelryTEXT.vocab.vectors\n\nJewelrymodel.embedding.weight.data.copy_(pretrained_embeddings)\n\nimport torch.optim as optim\nJewelryoptimizer = optim.Adam(Jewelrymodel.parameters(),lr=0.0003)\ncriterion = nn.MSELoss()\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n#device=torch.device('cpu')\nJewelrymodel = Jewelrymodel.to(device)\ncriterion = criterion.to(device)",
"Jewelrymodel parameters: \n<bound method Module.parameters of RNN(\n (embedding): Embedding(16904, 300)\n (rnn): LSTM(300, 500, num_layers=2, dropout=0.5, bidirectional=True)\n (fc): Linear(in_features=1000, out_features=1, bias=True)\n (dropout): Dropout(p=0.5)\n)>\n"
],
[
"ShoesINPUT_DIM = len(ShoesTEXT.vocab)\nEMBEDDING_DIM = 300\nHIDDEN_DIM = 500\nOUTPUT_DIM = 1\nN_LAYERS = 2\nBIDIRECTIONAL = True\nDROPOUT = 0.5\n\nShoesmodel = RNN(ShoesINPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM, N_LAYERS, BIDIRECTIONAL, DROPOUT)\nprint(\"Shoesmodel parameters: \")\nprint(Shoesmodel.parameters)\n\npretrained_embeddings = ShoesTEXT.vocab.vectors\n\nShoesmodel.embedding.weight.data.copy_(pretrained_embeddings)\n\nimport torch.optim as optim\nShoesoptimizer = optim.Adam(Shoesmodel.parameters(),lr=0.0003)\ncriterion = nn.MSELoss()\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n#device=torch.device('cpu')\nShoesmodel = Shoesmodel.to(device)\ncriterion = criterion.to(device)",
"Shoesmodel parameters: \n<bound method Module.parameters of RNN(\n (embedding): Embedding(17486, 300)\n (rnn): LSTM(300, 500, num_layers=2, dropout=0.5, bidirectional=True)\n (fc): Linear(in_features=1000, out_features=1, bias=True)\n (dropout): Dropout(p=0.5)\n)>\n"
],
[
"import torch.nn.functional as F\n\ndef accuracy(preds,y):\n rounded_preds = torch.round(preds)\n correct = (rounded_preds==y).float()\n acc = correct.sum()/len(correct)\n return acc\n\ndef train(model, iterator, optimizer, criterion):\n \n epoch_loss = 0\n epoch_acc = 0\n \n model.train() # turns on dropout and batch normalization and allow gradient update\n \n i=0\n for batch in iterator:\n i=i+1\n \n optimizer.zero_grad() # set accumulated gradient to 0 for every start of a batch\n \n predictions = model(batch.Text).squeeze(1)\n \n loss = criterion(predictions, batch.Label)\n \n acc = accuracy(predictions, batch.Label)\n \n loss.backward() # calculate gradient\n \n optimizer.step() # update parameters\n \n if i%100==0:\n print(\"train batch loss: \", loss.item())\n print(\"train accuracy: \", acc.item())\n \n epoch_loss += loss.item()\n epoch_acc += acc.item()\n \n return epoch_loss / len(iterator), epoch_acc / len(iterator)\n\n\ndef evaluate(model, iterator, criterion):\n \n epoch_loss = 0\n epoch_acc = 0\n \n model.eval() #turns off dropout and batch normalization\n \n with torch.no_grad():\n i=0\n for batch in iterator:\n i=i+1\n predictions = model(batch.Text).squeeze(1)\n \n loss = criterion(predictions, batch.Label)\n \n acc = accuracy(predictions, batch.Label)\n\n epoch_loss += loss.item()\n epoch_acc += acc.item()\n \n if i%200 ==0:\n print(\"eval batch loss: \", loss.item())\n print(\"eval accuracy: \", acc.item())\n \n return epoch_loss / len(iterator), epoch_acc / len(iterator)\n\n#model = torch.load('fmodel')\n\nimport timeit\n#start = timeit.default_timer()",
"_____no_output_____"
],
[
"N_EPOCHS = 20\n#print(\"loading previous frnn3 model...\")\n#model = torch.load('frnn3')\ntry:\n for epoch in range(N_EPOCHS):\n start = timeit.default_timer()\n\n train_loss, train_acc = train(Shoesmodel, Shoestrain_iterator, Shoesoptimizer, criterion)\n valid_loss, valid_acc = evaluate(Shoesmodel, Shoesvalid_iterator, criterion)\n #print(\"saving model: frnn8\")\n #torch.save(model,'frnn8')\n\n print(f'Epoch: {epoch+1:02}, Train Loss: {train_loss:.3f}, Train Acc: {train_acc*100:.2f}%, Val. Loss: {valid_loss:.3f}, Val. Acc: {valid_acc*100:.2f}%')\n #print(f'Epoch: {epoch+1:02}, Train Loss: {train_loss:.3f}, Train Acc: {train_acc*100:.2f}%')\n\n stop = timeit.default_timer()\n print(\"time duration: \", stop - start)\n\nexcept KeyboardInterrupt:\n print(\"interrupt\")\n print('Exiting from training early')\n\n#print(\"save frnn8 again:\")\n#torch.save(model,'frnn8')",
"train batch loss: 0.6944787502288818\ntrain accuracy: 0.25\ntrain batch loss: 1.1130222082138062\ntrain accuracy: 0.1875\ntrain batch loss: 0.42840540409088135\ntrain accuracy: 0.625\ntrain batch loss: 0.49681568145751953\ntrain accuracy: 0.53125\ntrain batch loss: 0.7590948343276978\ntrain accuracy: 0.5\ntrain batch loss: 1.0518995523452759\ntrain accuracy: 0.59375\ntrain batch loss: 0.5709025263786316\ntrain accuracy: 0.5625\ntrain batch loss: 1.2898609638214111\ntrain accuracy: 0.625\ntrain batch loss: 0.5770605206489563\ntrain accuracy: 0.59375\ntrain batch loss: 0.5210078954696655\ntrain accuracy: 0.46875\ntrain batch loss: 0.6203491687774658\ntrain accuracy: 0.5\ntrain batch loss: 0.4728987216949463\ntrain accuracy: 0.5625\ntrain batch loss: 0.4706098437309265\ntrain accuracy: 0.59375\ntrain batch loss: 0.7257290482521057\ntrain accuracy: 0.5625\ntrain batch loss: 0.46271830797195435\ntrain accuracy: 0.65625\ntrain batch loss: 1.1187175512313843\ntrain accuracy: 0.5\ntrain batch loss: 0.7729892730712891\ntrain accuracy: 0.625\n"
],
[
"####################\n# prediction\n####################\n\n'''\nprint('loading frnn4:')\nmodel = torch.load('frnn4',map_location=lambda storage,loc:storage)\n'''\n\n\n\nvalid_loss, valid_acc = evaluate(model, valid_iterator, criterion)\nprint(\"valid loss: \",valid_loss)\nprint(\"valid acc: \",valid_acc)\n\n \nprint(\"prediction of frnn8.....\")\n \nimport spacy\nnlp = spacy.load('en')\n\ndef predict_sentiment(sentence,model):\n tokenized = [tok.text for tok in nlp.tokenizer(sentence)]\n indexed = [TEXT.vocab.stoi[t] for t in tokenized]\n tensor = torch.LongTensor(indexed).to(device)\n tensor = tensor.unsqueeze(1)\n model.eval()\n prediction = model(tensor)\n return prediction.item()\n\n\nwith open('../sent/ori_gender_data/male_sent_test_less700.tsv','r') as f:\n mtest = f.readlines()\n\nwith open('../sent/ori_gender_data/female_sent_test_less700.tsv','r') as f:\n ftest = f.readlines()\n\nfs = [line.split('\\t')[0] for line in ftest]\nms = [line.split('\\t')[0] for line in mtest]\n\nmlabel = [int(line.split('\\t')[1].strip('\\n')) for line in mtest]\nflabel = [int(line.split('\\t')[1].strip('\\n')) for line in ftest]\n\nfprem = [predict_sentiment(x,model) for x in ms]\nfpref = [predict_sentiment(x,model) for x in fs]\n\nprint(\"10 fprem:\")\nprint(fprem[:10])\nprint(\"10 fpref:\")\nprint(fpref[:10])\n \n \nprint(\"writing fpref to file fpref_frnn8.txt...\")\nwith open('fpref_frnn8.txt','w') as f:\n f.write(str(fpref))\nprint(\"writing fprem to file fprem_frnn8.txt...\")\nwith open('fprem_frnn8.txt','w') as f:\n f.write(str(fprem))\n\nprint(\"fpref accuracy: \",(np.array([round(x) for x in fpref])==np.array(flabel)).mean())\nprint(\"fprem accuracy: \",(np.array([round(x) for x in fprem])==np.array(mlabel)).mean())\n\n\n'''\nwith open('../sent/ori_gender_data/male_sent_tmp_train.tsv','r') as f:\n mtrain = f.readlines()\n\nwith open('../sent/ori_gender_data/female_sent_tmp_train.tsv','r') as f:\n ftrain = f.readlines()\n\nfs = [line.split('\\t')[0] for line in ftrain]\nms = [line.split('\\t')[0] for line in mtrain]\n\nmlabel = [int(line.split('\\t')[1].strip('\\n')) for line in mtrain]\nflabel = [int(line.split('\\t')[1].strip('\\n')) for line in ftrain]\n\nfprem = [predict_sentiment(x,model) for x in ms]\nfpref = [predict_sentiment(x,model) for x in fs]\n\nprint(\"10 fpref on female_sent_tmp_train.tsv:\")\nprint(fpref[:10])\nprint(\"10 fprem on male_sent_tmp_train.tsv:\")\nprint(fprem[:10])\n \n \nprint(\"writing fpref to file :fpre_female_sent_tmp_train_frnn4.txt...\")\nwith open('fpre_female_sent_tmp_train_frnn4.txt','w') as f:\n f.write(str(fpref))\nprint(\"writing fprem to file :fpre_male_sent_tmp_train_frnn4.txt...\")\nwith open('fpre_male_sent_tmp_train_frnn4.txt','w') as f:\n f.write(str(fprem))\n\n\nprint(\"fpref accuracy: \",(np.array([round(x) for x in fpref])==np.array(flabel)).mean())\nprint(\"fprem accuracy: \",(np.array([round(x) for x in fprem])==np.array(mlabel)).mean())\n'''",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d04aeb12ef5e60aec872f16f44543a1e3a3b4396 | 5,828 | ipynb | Jupyter Notebook | solutions_do_not_open/Lab_05_DL Callbacks and Multiple Inputs_solution.ipynb | Dataweekends/global_AI_conference_Jan_2018 | f29766c7f9f8e2f83d8bdb6eb622ad8003938044 | [
"MIT"
] | 8 | 2018-01-17T18:17:20.000Z | 2020-07-21T18:55:47.000Z | solutions_do_not_open/Lab_05_DL Callbacks and Multiple Inputs_solution.ipynb | Dataweekends/global_AI_conference_Jan_2018 | f29766c7f9f8e2f83d8bdb6eb622ad8003938044 | [
"MIT"
] | null | null | null | solutions_do_not_open/Lab_05_DL Callbacks and Multiple Inputs_solution.ipynb | Dataweekends/global_AI_conference_Jan_2018 | f29766c7f9f8e2f83d8bdb6eb622ad8003938044 | [
"MIT"
] | 7 | 2018-01-17T03:28:57.000Z | 2020-01-02T03:53:41.000Z | 24.487395 | 103 | 0.530371 | [
[
[
"# Callbacks and Multiple inputs",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\nfrom sklearn.preprocessing import scale\n\nfrom keras.optimizers import SGD\nfrom keras.layers import Dense, Input, concatenate, BatchNormalization\nfrom keras.callbacks import EarlyStopping, TensorBoard, ModelCheckpoint\nfrom keras.models import Model\nimport keras.backend as K",
"_____no_output_____"
],
[
"df = pd.read_csv(\"../data/titanic-train.csv\")\nY = df['Survived']",
"_____no_output_____"
],
[
"df.info()",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"num_features = df[['Age', 'Fare', 'SibSp', 'Parch']].fillna(0)\nnum_features.head()",
"_____no_output_____"
],
[
"cat_features = pd.get_dummies(df[['Pclass', 'Sex', 'Embarked']].astype('str'))\ncat_features.head()",
"_____no_output_____"
],
[
"X1 = scale(num_features.values)\nX2 = cat_features.values",
"_____no_output_____"
],
[
"K.clear_session()\n\n# Numerical features branch\ninputs1 = Input(shape = (X1.shape[1],))\nb1 = BatchNormalization()(inputs1)\nb1 = Dense(3, kernel_initializer='normal', activation = 'tanh')(b1)\nb1 = BatchNormalization()(b1)\n\n# Categorical features branch\ninputs2 = Input(shape = (X2.shape[1],))\nb2 = Dense(8, kernel_initializer='normal', activation = 'relu')(inputs2)\nb2 = BatchNormalization()(b2)\nb2 = Dense(4, kernel_initializer='normal', activation = 'relu')(b2)\nb2 = BatchNormalization()(b2)\nb2 = Dense(2, kernel_initializer='normal', activation = 'relu')(b2)\nb2 = BatchNormalization()(b2)\n\nmerged = concatenate([b1, b2])\npreds = Dense(1, activation = 'sigmoid')(merged)\n\n# final model\nmodel = Model([inputs1, inputs2], preds)\n\nmodel.compile(loss = 'binary_crossentropy',\n optimizer = 'rmsprop',\n metrics = ['accuracy'])\n\nmodel.summary()",
"_____no_output_____"
],
[
"outpath='/tmp/tensorflow_logs/titanic/'\n\nearly_stopper = EarlyStopping(monitor='val_acc', patience=10)\ntensorboard = TensorBoard(outpath+'tensorboard/', histogram_freq=1)\ncheckpointer = ModelCheckpoint(outpath+'weights_epoch_{epoch:02d}_val_acc_{val_acc:.2f}.hdf5',\n monitor='val_acc')",
"_____no_output_____"
],
[
"# You may have to run this a couple of times if stuck on local minimum\nnp.random.seed(2017)\nh = model.fit([X1, X2],\n Y.values,\n batch_size = 32,\n epochs = 40,\n verbose = 1,\n validation_split=0.2,\n callbacks=[early_stopper,\n tensorboard,\n checkpointer])",
"_____no_output_____"
],
[
"import os\nsorted(os.listdir(outpath))",
"_____no_output_____"
]
],
[
[
"Now check the tensorboard.\n\n- If using provided aws instance, just browse to: `http://<your-ip>:6006`\n\n- If using local, open a terminal, activate the environment and run:\n```\ntensorboard --logdir=/tmp/tensorflow_logs/titanic/tensorboard/\n```\nthen open a browser at `localhost:6006`\n\nYou should see something like this:\n\n",
"_____no_output_____"
],
[
"## Exercise 1\n\n- try modifying the parameters of the 3 callbacks provided. What are they for? What do they do?",
"_____no_output_____"
],
[
"*Copyright © 2017 CATALIT LLC. All rights reserved.*",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
d04b0032273aadc71c6c59cb421f03b8dd0c38b1 | 92,157 | ipynb | Jupyter Notebook | Credit Risk Evaluator.ipynb | J-Schea29/Supervised-Machine-Learning-Challenge | 1bb10d05680407b39df526ebaa9e6ac009204f81 | [
"MIT"
] | null | null | null | Credit Risk Evaluator.ipynb | J-Schea29/Supervised-Machine-Learning-Challenge | 1bb10d05680407b39df526ebaa9e6ac009204f81 | [
"MIT"
] | null | null | null | Credit Risk Evaluator.ipynb | J-Schea29/Supervised-Machine-Learning-Challenge | 1bb10d05680407b39df526ebaa9e6ac009204f81 | [
"MIT"
] | null | null | null | 37.955931 | 259 | 0.305262 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom pathlib import Path",
"_____no_output_____"
],
[
"from sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler",
"_____no_output_____"
],
[
"train_df = pd.read_csv(Path('./Resources/2019loans.csv'))\ntest_df = pd.read_csv(Path('./Resources/2020Q1loans.csv'))",
"_____no_output_____"
],
[
"train_df",
"_____no_output_____"
],
[
"test_df",
"_____no_output_____"
],
[
"# Convert categorical data to numeric and separate target feature for training data\nX_1 = train_df.drop('target', axis=1)\nX_dummies_train = pd.get_dummies(X_1)\nprint(X_dummies_train.columns)\nX_dummies_train",
"Index(['loan_amnt', 'int_rate', 'installment', 'annual_inc', 'dti',\n 'delinq_2yrs', 'inq_last_6mths', 'open_acc', 'pub_rec', 'revol_bal',\n 'total_acc', 'out_prncp', 'out_prncp_inv', 'total_pymnt',\n 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int',\n 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee',\n 'last_pymnt_amnt', 'collections_12_mths_ex_med', 'policy_code',\n 'acc_now_delinq', 'tot_coll_amt', 'tot_cur_bal', 'open_acc_6m',\n 'open_act_il', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il',\n 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc',\n 'all_util', 'total_rev_hi_lim', 'inq_fi', 'total_cu_tl', 'inq_last_12m',\n 'acc_open_past_24mths', 'avg_cur_bal', 'bc_open_to_buy', 'bc_util',\n 'chargeoff_within_12_mths', 'delinq_amnt', 'mo_sin_old_il_acct',\n 'mo_sin_old_rev_tl_op', 'mo_sin_rcnt_rev_tl_op', 'mo_sin_rcnt_tl',\n 'mort_acc', 'mths_since_recent_bc', 'mths_since_recent_inq',\n 'num_accts_ever_120_pd', 'num_actv_bc_tl', 'num_actv_rev_tl',\n 'num_bc_sats', 'num_bc_tl', 'num_il_tl', 'num_op_rev_tl',\n 'num_rev_accts', 'num_rev_tl_bal_gt_0', 'num_sats', 'num_tl_120dpd_2m',\n 'num_tl_30dpd', 'num_tl_90g_dpd_24m', 'num_tl_op_past_12m',\n 'pct_tl_nvr_dlq', 'percent_bc_gt_75', 'pub_rec_bankruptcies',\n 'tax_liens', 'tot_hi_cred_lim', 'total_bal_ex_mort', 'total_bc_limit',\n 'total_il_high_credit_limit', 'home_ownership_ANY',\n 'home_ownership_MORTGAGE', 'home_ownership_OWN', 'home_ownership_RENT',\n 'verification_status_Not Verified',\n 'verification_status_Source Verified', 'verification_status_Verified',\n 'pymnt_plan_n', 'initial_list_status_f', 'initial_list_status_w',\n 'application_type_Individual', 'application_type_Joint App',\n 'hardship_flag_N', 'hardship_flag_Y', 'debt_settlement_flag_N',\n 'debt_settlement_flag_Y'],\n dtype='object')\n"
],
[
"from sklearn.preprocessing import LabelEncoder\ny_label_1 = LabelEncoder().fit_transform(train_df['target'])\ny_label_1",
"_____no_output_____"
],
[
"# Convert categorical data to numeric and separate target feature for testing data\nX_2 = test_df.drop('target', axis=1)\nX_dummies_test = pd.get_dummies(X_2)\nprint(X_dummies_test.columns)\nX_dummies_test",
"Index(['loan_amnt', 'int_rate', 'installment', 'annual_inc', 'dti',\n 'delinq_2yrs', 'inq_last_6mths', 'open_acc', 'pub_rec', 'revol_bal',\n 'total_acc', 'out_prncp', 'out_prncp_inv', 'total_pymnt',\n 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int',\n 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee',\n 'last_pymnt_amnt', 'collections_12_mths_ex_med', 'policy_code',\n 'acc_now_delinq', 'tot_coll_amt', 'tot_cur_bal', 'open_acc_6m',\n 'open_act_il', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il',\n 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc',\n 'all_util', 'total_rev_hi_lim', 'inq_fi', 'total_cu_tl', 'inq_last_12m',\n 'acc_open_past_24mths', 'avg_cur_bal', 'bc_open_to_buy', 'bc_util',\n 'chargeoff_within_12_mths', 'delinq_amnt', 'mo_sin_old_il_acct',\n 'mo_sin_old_rev_tl_op', 'mo_sin_rcnt_rev_tl_op', 'mo_sin_rcnt_tl',\n 'mort_acc', 'mths_since_recent_bc', 'mths_since_recent_inq',\n 'num_accts_ever_120_pd', 'num_actv_bc_tl', 'num_actv_rev_tl',\n 'num_bc_sats', 'num_bc_tl', 'num_il_tl', 'num_op_rev_tl',\n 'num_rev_accts', 'num_rev_tl_bal_gt_0', 'num_sats', 'num_tl_120dpd_2m',\n 'num_tl_30dpd', 'num_tl_90g_dpd_24m', 'num_tl_op_past_12m',\n 'pct_tl_nvr_dlq', 'percent_bc_gt_75', 'pub_rec_bankruptcies',\n 'tax_liens', 'tot_hi_cred_lim', 'total_bal_ex_mort', 'total_bc_limit',\n 'total_il_high_credit_limit', 'home_ownership_ANY',\n 'home_ownership_MORTGAGE', 'home_ownership_OWN', 'home_ownership_RENT',\n 'verification_status_Not Verified',\n 'verification_status_Source Verified', 'verification_status_Verified',\n 'pymnt_plan_n', 'initial_list_status_f', 'initial_list_status_w',\n 'application_type_Individual', 'application_type_Joint App',\n 'hardship_flag_N', 'hardship_flag_Y', 'debt_settlement_flag_N'],\n dtype='object')\n"
],
[
"from sklearn.preprocessing import LabelEncoder\ny_label_2 = LabelEncoder().fit_transform(test_df['target'])\ny_label_2",
"_____no_output_____"
],
[
"# add missing dummy variables to testing set\nX_dummies_test[\"debt_settlement_flag_Y\"] = X_dummies_test[\"debt_settlement_flag_N\"].apply(lambda x: 1 if x == 0 else 0)\nX_dummies_test",
"_____no_output_____"
]
],
[
[
"## Hypothesis\nI believe that random forest will have a better score since the data frame has a lot of catergorical data and a lot of columns in general. ",
"_____no_output_____"
]
],
[
[
"# Train the Logistic Regression model on the unscaled data and print the model score\nclassifier = LogisticRegression()\nclassifier.fit(X_dummies_train, y_label_1)",
"C:\\Users\\15303\\anaconda3\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\n"
],
[
"print(f\"Training Data Score: {classifier.score(X_dummies_train, y_label_1)}\")\nprint(f\"Testing Data Score: {classifier.score(X_dummies_test, y_label_2)}\")",
"Training Data Score: 0.6541050903119868\nTesting Data Score: 0.5082943428328371\n"
],
[
"# Train a Random Forest Classifier model and print the model score\nclf = RandomForestClassifier(random_state=1, n_estimators=500).fit(X_dummies_train, y_label_1)\nprint(f'Training Score: {clf.score(X_dummies_train, y_label_1)}')\nprint(f'Testing Score: {clf.score(X_dummies_test, y_label_2)}')",
"Training Score: 1.0\nTesting Score: 0.646958740961293\n"
]
],
[
[
"## Hypothesis 2\nI think that by scaling my scores are going to get better and that the testing and training will be less spread out.",
"_____no_output_____"
]
],
[
[
"# Scale the data\nscaler = StandardScaler().fit(X_dummies_train)\nX_train_scaled = scaler.transform(X_dummies_train)\nX_test_scaled = scaler.transform(X_dummies_test)\nX_test_scaled",
"_____no_output_____"
],
[
"# Train the Logistic Regression model on the scaled data and print the model score\nclassifier = LogisticRegression()\nclassifier.fit(X_train_scaled, y_label_1)",
"C:\\Users\\15303\\anaconda3\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\n"
],
[
"print(f\"Training Data Score: {classifier.score(X_train_scaled, y_label_1)}\")\nprint(f\"Testing Data Score: {classifier.score(X_test_scaled, y_label_2)}\")",
"Training Data Score: 0.710919540229885\nTesting Data Score: 0.7598894087622289\n"
],
[
"# Train a Random Forest Classifier model on the scaled data and print the model score\nclf = RandomForestClassifier(random_state=1, n_estimators=500).fit(X_train_scaled, y_label_1)\nprint(f'Training Score: {clf.score(X_train_scaled, y_label_1)}')\nprint(f'Testing Score: {clf.score(X_test_scaled, y_label_2)}')",
"Training Score: 1.0\nTesting Score: 0.6480221182475542\n"
]
],
[
[
"## Conclusion\nUltimately, the logistic regression did a better job of analysising the data. Not only does the scaled logistic regression testing score beat out the non-scaled and scaled random forest score, but the training and testing scores are less spread out. \nThe second conlcusion is that scaling the data did improve the logistic regression score. THe random forest, however, remained unchanged.\n",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
d04b0fb8544a60c0779923a7ed9640df2218a374 | 603,436 | ipynb | Jupyter Notebook | pyfolio/examples/single_stock_example.ipynb | MBounouar/pyfolio-reloaded | 3194df9e7270f1062b75ae3e53def292cae6e652 | [
"Apache-2.0"
] | 52 | 2021-04-19T21:27:59.000Z | 2022-03-26T02:54:10.000Z | pyfolio/examples/single_stock_example.ipynb | MBounouar/pyfolio-reloaded | 3194df9e7270f1062b75ae3e53def292cae6e652 | [
"Apache-2.0"
] | 4 | 2021-05-01T15:25:20.000Z | 2021-11-30T03:17:29.000Z | pyfolio/examples/single_stock_example.ipynb | MBounouar/pyfolio-reloaded | 3194df9e7270f1062b75ae3e53def292cae6e652 | [
"Apache-2.0"
] | 23 | 2021-04-27T07:19:03.000Z | 2022-03-31T01:32:11.000Z | 1,563.305699 | 593,408 | 0.956236 | [
[
[
"# Intro\n\nHere's a simple example where we produce a set of plots, called a tear sheet, for a single stock.",
"_____no_output_____"
],
[
"## Imports and Settings",
"_____no_output_____"
]
],
[
[
"# silence warnings\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
],
[
"import yfinance as yf\nimport pyfolio as pf\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Download daily stock prices using yfinance",
"_____no_output_____"
],
[
"Pyfolio expects tz-aware input set to UTC timezone.",
"_____no_output_____"
],
[
"You may have to import `yfinance` first by running:\n```bash\npip install yfinance\n```",
"_____no_output_____"
]
],
[
[
"fb = yf.Ticker('FB')\nhistory = fb.history('max')\nhistory.index = history.index.tz_localize('utc')",
"_____no_output_____"
],
[
"history.info()",
"<class 'pandas.core.frame.DataFrame'>\nDatetimeIndex: 2243 entries, 2012-05-18 00:00:00+00:00 to 2021-04-19 00:00:00+00:00\nData columns (total 7 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Open 2243 non-null float64\n 1 High 2243 non-null float64\n 2 Low 2243 non-null float64\n 3 Close 2243 non-null float64\n 4 Volume 2243 non-null int64 \n 5 Dividends 2243 non-null int64 \n 6 Stock Splits 2243 non-null int64 \ndtypes: float64(4), int64(3)\nmemory usage: 140.2 KB\n"
],
[
"returns = history.Close.pct_change()",
"_____no_output_____"
]
],
[
[
"## Create returns tear sheet\nThis will show charts and analysis about returns of the single stock.",
"_____no_output_____"
]
],
[
[
"pf.create_returns_tear_sheet(returns, live_start_date='2020-1-1')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
d04b1586b654530b82c5a37c5673fed1e9173116 | 220,812 | ipynb | Jupyter Notebook | cnn_classifier.ipynb | Poxls88/triggerword | 920c185040d5aed1783610a52c00da0f0e9a1cb0 | [
"MIT"
] | 1 | 2021-06-26T00:35:29.000Z | 2021-06-26T00:35:29.000Z | cnn_classifier.ipynb | Poxls88/triggerword | 920c185040d5aed1783610a52c00da0f0e9a1cb0 | [
"MIT"
] | null | null | null | cnn_classifier.ipynb | Poxls88/triggerword | 920c185040d5aed1783610a52c00da0f0e9a1cb0 | [
"MIT"
] | null | null | null | 220,812 | 220,812 | 0.907894 | [
[
[
"#Import Data",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom sklearn.model_selection import GridSearchCV\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"# load data\nimport os\nfrom google.colab import drive\ndrive.mount('/content/drive')\nfiledir = './drive/My Drive/Final/CNN_data'\nwith open(filedir + '/' + 'feature_extracted', 'rb') as f:\n X = np.load(f)\nwith open(filedir + '/' + 'Y', 'rb') as f:\n Y = np.load(f).astype(np.int32)",
"Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"
],
[
"# import MFCC data\nwith open('./drive/My Drive/Final/mfcc_data/X', 'rb') as f:\n X_mfcc = np.load(f)\nwith open('./drive/My Drive/Final/mfcc_data/Y', 'rb') as f:\n Y_mfcc = np.load(f)\nprint('X_shape: {}\\nY_shape: {}'.format(X_mfcc.shape, Y_mfcc.shape))",
"X_shape: (300, 756)\nY_shape: (300,)\n"
],
[
"import warnings\nwarnings.filterwarnings(\"ignore\")",
"_____no_output_____"
],
[
"'''\nX_new = np.zeros([300,0])\nfor i in range(X.shape[1]):\n col = X[:,i,None]\n if((np.abs(col) > 1e-6).any()):\n X_new = np.hstack([X_new, col])\n else:\n print('Yes')\nprint('X.shape: {}\\nX_new.shape: {}\\nY.shape: {}'.format(X.shape, X_new.shape, Y.shape))\n\nprint(X_new.shape)\nprint(np.max(X_new, axis=1) != np.max(X, axis=1))\nprint(np.min(X_new, axis=1))\n'''",
"_____no_output_____"
]
],
[
[
"#CLF1 Ridge Classifier",
"_____no_output_____"
]
],
[
[
"'''\nfrom sklearn.linear_model import RidgeClassifier\nparameters = {'alpha':[1]}\nrc = RidgeClassifier(alpha = 1)\nclf = GridSearchCV(rc, parameters, cv=3)\nclf.fit(X[:30], Y[:30])\n\nclf.best_estimator_.fit(X[:30], Y[:30]).score(X, Y)\nclf.best_index_\n'''",
"_____no_output_____"
],
[
"from sklearn.linear_model import RidgeClassifier\ndef clf_RidgeClassifier(training_set, training_lable, testing_set, testing_lable):\n parameters = {'alpha':[10, 1, 1e-1, 1e-2, 1e-3]}\n rc = RidgeClassifier(alpha = 1)\n clf = GridSearchCV(rc, parameters, cv=3, return_train_score=True, iid=False)\n clf.fit(training_set, training_lable)\n results = clf.cv_results_\n opt_index = clf.best_index_\n training_score = results['mean_train_score'][opt_index]\n validation_score = results['mean_test_score'][opt_index]\n testing_score = clf.best_estimator_.fit(training_set, training_lable).score(testing_set, testing_lable)\n return [training_score, validation_score, testing_score], clf.best_params_",
"_____no_output_____"
],
[
"clf_RidgeClassifier(X[:240], Y[:240], X[240:], Y[240:])",
"_____no_output_____"
]
],
[
[
"#CLF2 SVM",
"_____no_output_____"
]
],
[
[
"from sklearn.svm import SVC\ndef clf_SVM(X_train, Y_train, X_test, Y_test):\n parameters = {'C':[10, 1, 1e-1, 1e-2, 1e-3]}\n svc = SVC(kernel='linear')\n clf = GridSearchCV(svc, parameters, cv=3, return_train_score=True, iid=False)\n clf.fit(X_train, Y_train)\n results = clf.cv_results_\n opt_index = clf.best_index_\n training_score = results['mean_train_score'][opt_index]\n validation_score = results['mean_test_score'][opt_index]\n testing_score = clf.best_estimator_.fit(X_train, Y_train).score(X_test, Y_test)\n return [training_score, validation_score, testing_score], clf.best_params_",
"_____no_output_____"
],
[
"clf_SVM(X[:240], Y[:240], X[240:], Y[240:])",
"_____no_output_____"
]
],
[
[
"#CLF3 LDA",
"_____no_output_____"
]
],
[
[
"from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\ndef clf_lda(Xtrain, Ytrain, Xtest, Ytest):\n \"\"\"\n Input: training data, labels, testing data, labels\n Output: training set mean prediciton accuracy, validation accuracy = None, testing set mean prediction accuracy\n \n Note: LDA has no hyperparameters to tune because a model is solved in closed form\n therefore there is no need for model selection via grid search cross validation\n therefore there is no validation accuracy\n \"\"\"\n clf = LinearDiscriminantAnalysis()\n clf.fit(Xtrain, Ytrain)\n \n \n train_acc = clf.score(Xtrain,Ytrain)\n val_acc = None\n test_acc = clf.score(Xtest,Ytest)\n return [train_acc,val_acc,test_acc], None",
"_____no_output_____"
],
[
"clf_lda(X[:240],Y[:240],X[240:],Y[240:])",
"_____no_output_____"
]
],
[
[
"#CLF4 KNN",
"_____no_output_____"
]
],
[
[
"from sklearn.neighbors import KNeighborsClassifier\ndef clf_KNN(X_train, Y_train, X_test, Y_test):\n parameters = {'n_neighbors':[1,5,20]}\n knn = KNeighborsClassifier(algorithm='auto', weights='uniform')\n clf = GridSearchCV(knn, parameters, cv=3, return_train_score=True, iid=False)\n clf.fit(X_train, Y_train)\n results = clf.cv_results_\n opt_index = clf.best_index_\n training_score = results['mean_train_score'][opt_index]\n validation_score = results['mean_test_score'][opt_index]\n testing_score = clf.best_estimator_.fit(X_train, Y_train).score(X_test, Y_test)\n return [training_score, validation_score, testing_score], clf.best_params_",
"_____no_output_____"
],
[
"clf_KNN(X[:240], Y[:240], X[240:], Y[240:])",
"_____no_output_____"
]
],
[
[
"#CLF5 Decision Tree",
"_____no_output_____"
]
],
[
[
"from sklearn.tree import DecisionTreeClassifier\ndef clf_DecisionTree(X_train, Y_train, X_test, Y_test):\n parameters = {'max_depth':[5,10,15,20,25], 'criterion':['entropy', 'gini']}\n dtc = DecisionTreeClassifier()\n clf = GridSearchCV(dtc, parameters, cv=3, return_train_score=True, iid=False)\n clf.fit(X_train, Y_train)\n results = clf.cv_results_\n opt_index = clf.best_index_\n training_score = results['mean_train_score'][opt_index]\n validation_score = results['mean_test_score'][opt_index]\n testing_score = clf.best_estimator_.fit(X_train, Y_train).score(X_test, Y_test)\n return [training_score, validation_score, testing_score], clf.best_params_",
"_____no_output_____"
],
[
"clf_DecisionTree(X[:240], Y[:240], X[240:], Y[240:])",
"_____no_output_____"
]
],
[
[
"#Testing On Data",
"_____no_output_____"
]
],
[
[
"clf_list = [clf_RidgeClassifier, clf_SVM, clf_lda, clf_KNN, clf_DecisionTree]\ndef test_trial(X_shuffled, Y_shuffled):\n global clf_list\n error = np.zeros((3,5,3)) # partition(3) * clf(5) * error(3)\n # (8/2,5/5,2/8) * (clf_list) * (trn,val,tst)\n opt_param = np.empty((3,5), dtype=dict) # partition(3) * clf(5)\n sample_size = len(X_shuffled)\n # 80/20 split\n train_size = int(sample_size * 0.8)\n X_train = X_shuffled[:train_size]\n Y_train = Y_shuffled[:train_size]\n X_test = X_shuffled[train_size:]\n Y_test = Y_shuffled[train_size:]\n for i in range(len(clf_list)):\n clffn = clf_list[i]\n error[0,i,:], opt_param[0,i] = clffn(X_train, Y_train, X_test, Y_test)\n # 50/50 split\n train_size = int(sample_size * 0.5)\n X_train = X_shuffled[:train_size]\n Y_train = Y_shuffled[:train_size]\n X_test = X_shuffled[train_size:]\n Y_test = Y_shuffled[train_size:]\n for i in range(len(clf_list)):\n clffn = clf_list[i]\n error[1,i,:], opt_param[1,i] = clffn(X_train, Y_train, X_test, Y_test)\n # 80/20 split\n train_size = int(sample_size * 0.2)\n X_train = X_shuffled[:train_size]\n Y_train = Y_shuffled[:train_size]\n X_test = X_shuffled[train_size:]\n Y_test = Y_shuffled[train_size:]\n for i in range(len(clf_list)):\n clffn = clf_list[i]\n error[2,i,:], opt_param[2,i] = clffn(X_train, Y_train, X_test, Y_test)\n # return error array\n return error, opt_param",
"_____no_output_____"
],
[
"from sklearn.utils import shuffle\ndef test_data(X, Y):\n error = np.zeros((3,3,5,3)) # trial(3) * error_from_test_trial(3*5*3)\n opt_param = np.empty((3,3,5), dtype=dict) # trial(3) * opt_param_from_test_trial(3*5)\n # trial 1\n X_shuffled, Y_shuffled = shuffle(X, Y)\n error[0], opt_param[0] = test_trial(X_shuffled, Y_shuffled)\n # trial 2\n X_shuffled, Y_shuffled = shuffle(X_shuffled, Y_shuffled)\n error[1], opt_param[1] = test_trial(X_shuffled, Y_shuffled)\n # trial 3\n X_shuffled, Y_shuffled = shuffle(X_shuffled, Y_shuffled)\n error[2], opt_param[2] = test_trial(X_shuffled, Y_shuffled)\n return error, opt_param",
"_____no_output_____"
],
[
"# test on CNN-extracted features\nacc_CNN, opt_param_CNN = test_data(X, Y)",
"_____no_output_____"
],
[
"np.mean(acc_CNN[:,:,:,:], axis=0)",
"_____no_output_____"
],
[
"acc_clf, opt_param = test_data(X_mfcc, Y_mfcc)",
"_____no_output_____"
],
[
"avg_cnn_acc = np.mean(acc_CNN, axis=0)\navg_clf_acc = np.mean(acc_clf, axis=0)\nprint('cnn: {}'.format(avg_cnn_acc))\nprint('clf: {}'.format(avg_clf_acc))",
"cnn: [[[0.99930556 0.96107204 0.93888889]\n [0.99861542 0.96801822 0.95 ]\n [0.99861111 nan 0.77222222]\n [1. 0.93058464 0.92777778]\n [1. 0.92784116 0.9 ]]\n\n [[1. 0.93549598 0.94444444]\n [0.98553222 0.95545596 0.95555556]\n [0.99777778 nan 0.87111111]\n [1. 0.92402383 0.93777778]\n [1. 0.91971544 0.88666667]]\n\n [[1. 0.92857143 0.89444444]\n [0.98888889 0.9452381 0.93472222]\n [1. nan 0.80833333]\n [1. 0.90661376 0.88055556]\n [1. 0.92883598 0.87222222]]]\nclf: [[[1. 0.9860931 0.97777778]\n [1. 0.99166623 0.99444444]\n [1. nan 0.98888889]\n [0.99514309 0.98887131 1. ]\n [1. 0.9597385 0.9 ]]\n\n [[1. 0.98679739 0.99333333]\n [1. 0.98901961 0.99555556]\n [1. nan 0.99333333]\n [1. 0.97332978 0.99555556]\n [1. 0.92460807 0.94222222]]\n\n [[1. 0.96637427 0.99444444]\n [1. 0.96081871 0.99583333]\n [0.96666667 nan 0.97638889]\n [1. 0.94967975 0.97777778]\n [1. 0.88324979 0.89166667]]]\n"
],
[
"# partition_accuracy plot\nfrom matplotlib import rcParams\nrcParams['figure.figsize'] = (8,8)\ncolors = ['cyan', 'green', 'red', 'orange','black']\nclf = ['RidgeRegression', 'SVM', 'LDA', 'KNN', 'DecisionTree']\nfor clfid in range(5):\n plt.plot(avg_cnn_acc[:,clfid,-1], color=colors[clfid], linestyle='solid', label='CNN '+clf[clfid])\n plt.plot(avg_clf_acc[:,clfid,-1], color=colors[clfid], linestyle='dashed', label='MFCC '+clf[clfid])\nplt.legend(loc='lower left')\nplt.xticks((0,1,2),['80/20', '50/50', '20/80'])\nplt.xlabel('partition (train/test)')\nplt.ylabel('average test accuracy')\nplt.savefig('./drive/My Drive/Final/graphs/partition_accuracy.png', bbox='tight')",
"_____no_output_____"
],
[
"# SVM hyperparameter error plot\n\nparameters = {'C':[10, 1, 1e-1, 1e-2, 1e-3]}\nsvc = SVC(kernel='linear')\nclf = GridSearchCV(svc, parameters, cv=3, return_train_score=True, iid=False)\nclf.fit(X[:240], Y[:240])\nresults = clf.cv_results_\nopt_index = clf.best_index_\ntraining_score = results['mean_train_score']\nvalidation_score = results['mean_test_score']\n",
"_____no_output_____"
],
[
"param_x = results['param_C'].data.astype(np.float32)\nplt.plot(param_x, training_score, 'r-', label='training')\nplt.plot(param_x, validation_score, 'b-', label='validation')\nplt.legend(loc='lower left')\nplt.xticks([0,2.5,5,7.5,10], ['10','1','1e-1','1e-2','1e-3'])\nplt.xlabel('param_C')\nplt.ylabel('accuracy')\n#plt.show()\nplt.savefig('./drive/My Drive/Final/graphs/SVM_hyperparameter_accuracy.png')",
"_____no_output_____"
],
[
"# avg cross-partition accuracy\ncnn_cp_acc = np.mean(avg_cnn_acc[:,:,-1], axis=0)\nclf_cp_acc = np.mean(avg_clf_acc[:,:,-1], axis=0)\nprint('cnn_cp_acc: {}'.format(cnn_cp_acc))\nprint('clf_cp_acc: {}'.format(clf_cp_acc))",
"cnn_cp_acc: [0.92592593 0.94675926 0.81722222 0.91537037 0.8862963 ]\nclf_cp_acc: [0.98851852 0.99527778 0.9862037 0.99111111 0.9112963 ]\n"
],
[
"avg_totalcp_acc = (cnn_cp_acc + clf_cp_acc) / 2\nprint(avg_totalcp_acc)",
"[0.95722222 0.97101852 0.90171296 0.95324074 0.8987963 ]\n"
],
[
"(avg_cnn_acc + avg_clf_acc)/2",
"_____no_output_____"
],
[
"opt_param",
"_____no_output_____"
],
[
"opt_param_CNN",
"_____no_output_____"
],
[
"max_ind_cnn = np.argpartition(np.sum(X, axis=0), -2)[-2:]\nstd_ind_cnn = np.argpartition(np.std(X, axis=0), -2)[-2:]\nmax_ind_clf = np.argpartition(np.sum(X_mfcc, axis=0), -2)[-2:]\nstd_ind_clf = np.argpartition(np.std(X_mfcc, axis=0), -2)[-2:]",
"_____no_output_____"
],
[
"max_cnn = X[:,max_ind_cnn]\nstd_cnn = X[:,std_ind_cnn]\nmax_clf = X_mfcc[:,max_ind_clf]\nstd_clf = X_mfcc[:,std_ind_clf]",
"_____no_output_____"
],
[
"def plot_features(X, Y):\n return X[Y==0,:], X[Y==1,:]",
"_____no_output_____"
],
[
"# 2 max features from cnn plotted\nplt.clf()\nfeature0, feature1 = plot_features(max_cnn, Y)\nplt.plot(feature0[:,0], feature0[:,1],'ro', label='digit 0')\nplt.plot(feature1[:,0], feature1[:,1],'go', label='digit 1')\nplt.legend(loc='lower right')\nplt.show()\n#plt.savefig('./drive/My Drive/Final/graphs/2_max_sum_cnn_features.png')",
"_____no_output_____"
],
[
"# 2 var features from cnn plotted\nfeature0, feature1 = plot_features(std_cnn, Y)\nplt.plot(feature0[:,0], feature0[:,1],'ro', label='digit 0')\nplt.plot(feature1[:,0], feature1[:,1],'go', label='digit 1')\nplt.legend(loc='lower right')\n#plt.show()\nplt.savefig('./drive/My Drive/Final/graphs/2_max_var_cnn_features.png')",
"_____no_output_____"
],
[
"# 2 max features from mfcc plotted\nfeature0, feature1 = plot_features(max_clf, Y)\nplt.plot(feature0[:,0], feature0[:,1],'ro', label='digit 0')\nplt.plot(feature1[:,0], feature1[:,1],'go', label='digit 1')\nplt.legend(loc='lower right')\n#plt.show()\nplt.savefig('./drive/My Drive/Final/graphs/2_max_sum_mfcc_features.png')",
"_____no_output_____"
],
[
"# 2 var features from mfcc plotted\nfeature0, feature1 = plot_features(std_clf, Y)\nplt.plot(feature0[:,0], feature0[:,1],'ro', label='digit 0')\nplt.plot(feature1[:,0], feature1[:,1],'go', label='digit 1')\nplt.legend(loc='lower right')\n#plt.show()\nplt.savefig('./drive/My Drive/Final/graphs/2_max_var_mfcc_features.png')",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d04b169fd141376c848740b919c2debbf3ba534e | 7,536 | ipynb | Jupyter Notebook | Image/extract_value_to_points.ipynb | YuePanEdward/earthengine-py-notebooks | cade6a81dd4dbbfb1b9b37aaf6955de42226cfc5 | [
"MIT"
] | 1 | 2020-11-16T08:00:11.000Z | 2020-11-16T08:00:11.000Z | Image/extract_value_to_points.ipynb | mllzl/earthengine-py-notebooks | cade6a81dd4dbbfb1b9b37aaf6955de42226cfc5 | [
"MIT"
] | null | null | null | Image/extract_value_to_points.ipynb | mllzl/earthengine-py-notebooks | cade6a81dd4dbbfb1b9b37aaf6955de42226cfc5 | [
"MIT"
] | null | null | null | 44.857143 | 1,031 | 0.583599 | [
[
[
"<table class=\"ee-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://github.com/giswqs/earthengine-py-notebooks/tree/master/Image/extract_value_to_points.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /> View source on GitHub</a></td>\n <td><a target=\"_blank\" href=\"https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Image/extract_value_to_points.ipynb\"><img width=26px src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png\" />Notebook Viewer</a></td>\n <td><a target=\"_blank\" href=\"https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Image/extract_value_to_points.ipynb\"><img width=58px src=\"https://mybinder.org/static/images/logo_social.png\" />Run in binder</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Image/extract_value_to_points.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /> Run in Google Colab</a></td>\n</table>",
"_____no_output_____"
],
[
"## Install Earth Engine API and geemap\nInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.\nThe following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.\n\n**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).",
"_____no_output_____"
]
],
[
[
"# Installs geemap package\nimport subprocess\n\ntry:\n import geemap\nexcept ImportError:\n print('geemap package not installed. Installing ...')\n subprocess.check_call([\"python\", '-m', 'pip', 'install', 'geemap'])\n\n# Checks whether this notebook is running on Google Colab\ntry:\n import google.colab\n import geemap.eefolium as emap\nexcept:\n import geemap as emap\n\n# Authenticates and initializes Earth Engine\nimport ee\n\ntry:\n ee.Initialize()\nexcept Exception as e:\n ee.Authenticate()\n ee.Initialize() ",
"_____no_output_____"
]
],
[
[
"## Create an interactive map \nThe default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function. ",
"_____no_output_____"
]
],
[
[
"Map = emap.Map(center=[40,-100], zoom=4)\nMap.add_basemap('ROADMAP') # Add Google Map\nMap",
"_____no_output_____"
]
],
[
[
"## Add Earth Engine Python script ",
"_____no_output_____"
]
],
[
[
"# Add Earth Engine dataset\n# Input imagery is a cloud-free Landsat 8 composite.\nl8 = ee.ImageCollection('LANDSAT/LC08/C01/T1')\n\nimage = ee.Algorithms.Landsat.simpleComposite(**{\n 'collection': l8.filterDate('2018-01-01', '2018-12-31'),\n 'asFloat': True\n})\n\n# Use these bands for prediction.\nbands = ['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B10', 'B11']\n\n# Load training points. The numeric property 'class' stores known labels.\npoints = ee.FeatureCollection('GOOGLE/EE/DEMOS/demo_landcover_labels')\n\n# This property of the table stores the land cover labels.\nlabel = 'landcover'\n\n# Overlay the points on the imagery to get training.\ntraining = image.select(bands).sampleRegions(**{\n 'collection': points,\n 'properties': [label],\n 'scale': 30\n})\n\n# Define visualization parameters in an object literal.\nvizParams = {'bands': ['B5', 'B4', 'B3'],\n 'min': 0, 'max': 1, 'gamma': 1.3}\n\n\nMap.centerObject(points, 10)\nMap.addLayer(image, vizParams, 'Image')\nMap.addLayer(points, {'color': \"yellow\"}, 'Training points')\n\nfirst = training.first()\nprint(first.getInfo())\n",
"_____no_output_____"
]
],
[
[
"## Display Earth Engine data layers ",
"_____no_output_____"
]
],
[
[
"Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.\nMap",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d04b199cda9d2e9f761c71d359e098bc8ab800fe | 19,562 | ipynb | Jupyter Notebook | NI-edu/fMRI-introduction/week_4/fmriprep.ipynb | lukassnoek/NI-edu | ceb0c0006ad1be7eaf6bcae41cc4557c4e72b7aa | [
"MIT"
] | 10 | 2021-02-23T16:06:06.000Z | 2022-03-31T10:28:39.000Z | NI-edu/fMRI-introduction/week_4/fmriprep.ipynb | Neuroimaging-UvA/NI-edu | c21874ab59b2c7f48658f603fc849d4d6597f5e3 | [
"MIT"
] | 1 | 2022-03-29T09:39:03.000Z | 2022-03-29T09:39:03.000Z | NI-edu/fMRI-introduction/week_4/fmriprep.ipynb | Neuroimaging-UvA/NI-edu | c21874ab59b2c7f48658f603fc849d4d6597f5e3 | [
"MIT"
] | 3 | 2021-01-15T14:32:53.000Z | 2022-03-28T22:33:06.000Z | 69.864286 | 1,077 | 0.704222 | [
[
[
"# Fmriprep\nToday, many excellent general-purpose, open-source neuroimaging software packages exist: [SPM](https://www.fil.ion.ucl.ac.uk/spm/) (Matlab-based), [FSL](https://fsl.fmrib.ox.ac.uk/fsl/fslwiki), [AFNI](https://afni.nimh.nih.gov/), and [Freesurfer](https://surfer.nmr.mgh.harvard.edu/) (with a shell interface). We argue that there is not one single package that is always the best choice for every step in your preprocessing pipeline. Fortunately, people from the [Poldrack lab](https://poldracklab.stanford.edu/) created [fmriprep](https://fmriprep.readthedocs.io/en/stable/), a software package that offers a preprocessing pipeline which \"glues together\" functionality from different neuroimaging software packages (such as Freesurfer and FSL), such that each step in the pipeline is executed by the software package that (arguably) does it best.\n\nWe have been using *Fmriprep* for preprocessing of our own data and we strongly recommend it. It is relatively simple to use, requires minimal user intervention, and creates extensive visual reports for users to do visual quality control (to check whether each step in the pipeline worked as expected). The *only* requirement to use Fmriprep is that your data is formatted as specified in the Brain Imaging Data Structure (BIDS).",
"_____no_output_____"
],
[
"## The BIDS-format\n[BIDS](https://bids.neuroimaging.io/) is a specification on how to format, name, and organize your MRI dataset. It specifies the file format of MRI files (i.e., compressed Nifti: `.nii.gz` files), lays out rules for how you should name your files (i.e., with \"key-value\" pairs, such as: `sub-01_ses-1_task-1back_run-1_bold.nii.gz`), and outlines the file/folder structure of your dataset (where each subject has its own directory with separate subdirectories for different MRI modalities, including fieldmaps, functional, diffusion, and anatomical MRI). Additionally, it specifies a way to include \"metadata\" about the (MRI) files in your dataset with [JSON](https://en.wikipedia.org/wiki/JSON) files: plain-text files with key-value pairs (in the form \"parameter: value\"). Given that your dataset is BIDS-formatted and contains the necessary metadata, you can use `fmriprep` on your dataset. (You can use the awesome [bids-validator](https://bids-standard.github.io/bids-validator/) to see whether your dataset is completely valid according to BIDS.)\n\nThere are different tools to convert your \"raw\" scanner data (e.g., in DICOM or PAR/REC format) to BIDS, including [heudiconv](https://heudiconv.readthedocs.io/en/latest/), [bidscoin](https://github.com/Donders-Institute/bidscoin), and [bidsify](https://github.com/NILAB-UvA/bidsify) (created by Lukas). We'll skip over this step and assume that you'll be able to convert your data to BIDS.",
"_____no_output_____"
],
[
"## Installing Fmriprep\nNow, having your data in BIDS is an important step in getting started with Fmriprep. The next step is installing the package. Technically, Fmriprep is a Python package, so it can be installed as such (using `pip install fmriprep`), but we do not recommend this \"bare metal\" installation, because it depends on a host of neuroimaging software packages (including FSL, Freesurfer, AFNI, and ANTs). So if you'd want to directly install Fmriprep, you'd need to install those extra neuroimaging software packages as well (which is not worth your time, trust us).\n\nFortunately, Fmriprep also offers a \"Docker container\" in which Fmriprep and all the associated dependencies are already installed. [Docker](https://www.docker.com/) is software that allows you to create \"containers\", which are like lightweight \"virtual machines\" ([VM](https://en.wikipedia.org/wiki/Virtual_machine)) that are like a separate (Linux-based) operating system with a specific software configuration. You can download the Fmriprep-specific docker \"image\", which is like a \"recipe\", build the Fmriprep-specific \"container\" according to this \"recipe\" on your computer, and finally use this container to run Fmriprep on your computer as if all dependencies were actually installed on your computer! Docker is available on Linux, Mac, and Windows. To install Docker, google something like \"install docker for {Windows,Mac,Linux}\" to find a google walkthrough.\n\nNote that you need administrator (\"root\") privilege on your computer (which is likely the case for your own computer, but not on shared analysis servers) to run Docker. If you don't have root access on your computer/server, ask you administrator/sysadmin to install [singularity](https://fmriprep.readthedocs.io/en/stable/installation.html#singularity-container), which allows you to convert Docker images to Singularity images, which you can run without administrator privileges.\n\nAssuming you have installed Docker, you can run the \"containerized\" Fmriprep from your command line directly, which involves a fairly long and complicated command (i.e., `docker run -it --rm -v bids_dir /data ... etc`), or using the `fmriprep-docker` Python package. This `fmriprep-docker` package is just a simple wrapper around the appropriate Docker command to run the complicated \"containerized\" Fmriprep command. We strongly recommend this method.\n\nTo install `fmriprep-docker`, you can use `pip` (from your command line):\n\n```\npip install fmriprep-docker\n```\n\nNow, you should have access to the `fmriprep-docker` command on your command line and you're ready to start preprocessing your dataset. For more detailed information about installing Fmriprep, check out their [website](https://fmriprep.readthedocs.io/en/stable/installation.html).",
"_____no_output_____"
],
[
"## Running Fmriprep\nAssuming you have Docker and `fmriprep-docker` installed, you're ready to run Fmriprep. The basic format of the `fmriprep-docker` command is as follows:\n\n```\nfmriprep-docker <your bids-folder> <your output-folder> \n```\n\nThis means that `fmriprep-docker` has two mandatory positional arguments: the first one being your BIDS-folder (i.e., the path to your folder with BIDS-formattefd data), and the second one being the output-folder (i.e., where you want Fmriprep to output the preprocessed data). We recommend setting your output-folder to a subfolder of your BIDS-folder named \"derivatives\": `<your bids-folder>/derivatives`.\n\nThen, you can add a bunch of extra \"flags\" (parameters) to the command to specify the preprocessing pipeline as you like it. We highlight a couple of important ones here, but for the full list of parameters, check out the [Fmriprep](https://fmriprep.readthedocs.io/en/stable/usage.html) website.\n\n### Freesurfer\nWhen running Fmriprep from Docker, you don't need to have Freesurfer installed, but you *do* need a Freesurfer license. You can download this here: https://surfer.nmr.mgh.harvard.edu/fswiki/License. Then, you need to supply the `--fs-license-file <path to license file>` parameter to your `fmriprep-docker` command:\n\n```\nfmriprep-docker <your bids-folder> <your output-folder> --fs-license-file /home/lukas/license.txt\n```\n\n### Configuring what is preprocessed\nIf you just run Fmriprep with the mandatory BIDS-folder and output-folder arguments, it will preprocess everything it finds in the BIDS-folder. Sometimes, however, you may just want to run one (or several) specific participants, or one (or more) specific tasks (e.g., only the MRI files associated with the localizer runs, but not the working memory runs). You can do this by adding the `--participant` and `--task` flags to the command:\n\n```\nfmriprep-docker <your bids-folder> <your output-folder> --participant sub-01 --task localizer\n```\n\nYou can also specify some things to be ignored during preprocessing using the `--ignore` parameters (like `fieldmaps`):\n\n```\nfmriprep-docker <your bids-folder> <your output-folder> --ignore fieldmaps\n```\n\n### Handling performance\nIt's very easy to parallelize the preprocessing pipeline by setting the `--nthreads` and `--omp-nthreads` parameters, which refer to the number of threads that should be used to run Fmriprep on. Note that laptops usually have 4 threads available (but analysis servers usually have more!). You can also specify the maximum of RAM that Fmriprep is allowed to use by the `--mem_mb` parameters. So, if you for example want to run Fmriprep with 3 threads and a maximum of 3GB of RAM, you can run:\n\n```\nfmriprep-docker <your bids-folder> <your output-folder> --nthreads 3 --omp-nthreads 3 --mem_mb 3000\n```\n\nIn our experience, however, specifying the `--mem_mb` parameter is rarely necessary if you don't parallelize too much. \n\n### Output spaces\nSpecifying your \"output spaces\" (with the `--output-spaces` flag) tells Fmriprep to what \"space(s)\" you want your preprocessed data registered to. For example, you can specify `T1w` to have your functional data registered to the participant's T1 scan. You can, instead or in addition to, also specify some standard template, like the MNI template (`MNI152NLin2009cAsym` or `MNI152NLin6Asym`). You can even specify surface templates if you want (like `fsaverage`), which will sample your volumetric functional data onto the surface (as computed by freesurfer). In addition to the specific output space(s), you can add a resolution \"modifier\" to the parameter to specify in what spatial resolution you want your resampled data to be. Without any resolution modifier, the native resolution of your functional files (e.g., $3\\times3\\times3$ mm.) will be kept intact. But if you want to upsample your resampled files to 2mm, you can add `YourTemplate:2mm`. For example, if you want to use the FSL-style MNI template (`MNI152NLin6Asym`) resampled at 2 mm, you'd use:\n\n```\nfmriprep-docker <your bids-folder> <your output-folder> --output-spaces MNI152NLin6Asym:2mm\n```\n\nYou can of course specify multiple output-spaces:\n\n```\nfmriprep-docker <your bids-folder> <your output-folder> --output-spaces MNI152NLin6Asym:2mm T1w fsaverage\n```\n\n### Other parameters\nThere are many options that you can set when running Fmriprep. Check out the [Fmriprep website](https://fmriprep.readthedocs.io/) (under \"Usage\") for a list of all options!",
"_____no_output_____"
],
[
"## Issues, errors, and troubleshooting\nWhile Fmriprep often works out-of-the-box (assuming your data are properly BIDS-formatted), it may happen that it crashes or otherwise gives unexpected results. A great place to start looking for help is [neurostars.org](https://neurostars.org). This website is dedicated to helping neuroscientists with neuroimaging/neuroscience-related questions. Make sure to check whether your question has been asked here already and, if not, pose it here!\n\nIf you encounter Fmriprep-specific bugs, you can also submit and issue at the [Github repository](https://github.com/poldracklab/fmriprep) of Fmriprep.",
"_____no_output_____"
],
[
"## Fmriprep output/reports\nAfter Fmriprep has run, it outputs, for each participants separately, a directory with results (i.e., preprocessed files) and an HTML-file with a summary and figures of the different steps in the preprocessing pipeline.\n\nWe ran Fmriprep on a single run/task (`flocBLOCKED`) from a single subject (`sub-03`) some data with the following command:\n\n```\nfmriprep-docker /home/lsnoek1/ni-edu/bids /home/lsnoek1/ni-edu/bids/derivatives --participant-label sub-03 --output-spaces T1w MNI152NLin2009cAsym\n```\n\nWe've copied the Fmriprep output for this subject (`sub-03`) in the `fmriprep` subdirectory of the `week_4` directory. Let's check its contents:",
"_____no_output_____"
]
],
[
[
"import os\nprint(os.listdir('bids/derivatives/fmriprep'))",
"_____no_output_____"
]
],
[
[
"As said, Fmriprep outputs a directory with results (`sub-03`) and an associated HTML-file with a summary of the (intermediate and final) results. Let's check the directory with results first:",
"_____no_output_____"
]
],
[
[
"from pprint import pprint # pprint stands for \"pretty print\", \n\nsub_path = os.path.join('bids/derivatives/fmriprep', 'sub-03')\npprint(sorted(os.listdir(sub_path)))",
"_____no_output_____"
]
],
[
[
"The `figures` directory contains several figures with the result of different preprocessing stages (like functional → high-res anatomical registration), but these figures are also included in the HTML-file, so we'll leave that for now. The other two directories, `anat` and `func`, contain the preprocessed anatomical and functional files, respectively. Let's inspect the `anat` directory:",
"_____no_output_____"
]
],
[
[
"anat_path = os.path.join(sub_path, 'anat')\npprint(os.listdir(anat_path))",
"_____no_output_____"
]
],
[
[
"Here, we see a couple of different files. There are both (preprocessed) nifti images (`*.nii.gz`) and associated meta-data (plain-text files in JSON format: `*.json`).\n\nImportantly, the nifti outputs are in two different spaces: one set of files are in the original \"T1 space\", so without any resampling to another space (these files have the same resolution and orientation as the original T1 anatomical scan). For example, the `sub_03_desc-preproc_T1w.nii.gz` scan is the preprocessed (i.e., bias-corrected) T1 scan. In addition, most files are also available in `MNI152NLin2009cAsym` space, a standard template. For example, the `sub-03_space-MNI152NLin2009cAsym_desc-preproc_T1w.nii.gz` is the same file as `sub_03_desc-preproc_T1w.nii.gz`, but resampled to the `MNI152NLin2009cAsym` template. In addition, there are subject-specific brain parcellations (the `*aparcaseg_dseg.nii.gz `and `*aseg_dseg.nii.gz` files), files with registration parameters (`*from- ... -to ...` files), probabilistic tissue segmentation files (`*label-{CSF,GM,WM}_probseg.nii.gz`) files, and brain masks (to outline what is brain and not skull/dura/etc; `*brain_mask.nii.gz`).\n\nAgain, on the [Fmriprep website](https://fmriprep.readthedocs.io/), you can find more information about the specific outputs.\n\nNow, let's check out the `func` directory:",
"_____no_output_____"
]
],
[
[
"func_path = os.path.join(sub_path, 'func')\npprint(os.listdir(func_path))",
"_____no_output_____"
]
],
[
[
"Again, like the files in the `anat` folder, the functional outputs are available in two spaces: `T1w` and `MNI152NLin2009cAsym`. In terms of actual images, there are preprocessed BOLD files (ending in `preproc_bold.nii.gz`), the functional volume used for \"functional → anatomical\" registration (ending in `boldref.nii.gz`), brain parcellations in functional space (ending in `dseg.nii.gz`), and brain masks (ending in `brain_mask.nii.gz`). In addition, there are files with \"confounds\" (ending in `confounds_regressors.tsv`) which contain variables that you might want to include as nuisance regressors in your first-level analysis. These confound files are speadsheet-like files (like `csv` files, but instead of being comma-delimited, they are tab-delimited) and can be easily loaded in Python using the [pandas](https://pandas.pydata.org/) package:",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nconf_path = os.path.join(func_path, 'sub-03_task-flocBLOCKED_desc-confounds_regressors.tsv')\nconf = pd.read_csv(conf_path, sep='\\t')\nconf.head()",
"_____no_output_____"
]
],
[
[
"Confound files from Fmriprep contain a large set of confounds, ranging from motion parameters (`rot_x`, `rot_y`, `rot_z`, `trans_x`, `trans_y`, and `trans_z`) and their derivatives (`*derivative1`) and squares (`*_power2`) to the average signal from the brain's white matter and cerebrospinal fluid (CSF), which should contain sources of noise such as respiratory, cardiac, or motion related signals (but not signal from neural sources, which should be largely constrained to gray matter). For a full list and explanation of Fmriprep's estimated confounds, check their website. Also, check [this thread](https://neurostars.org/t/confounds-from-fmriprep-which-one-would-you-use-for-glm/326) on Neurostars for a discussion on which confounds to include in your analyses.",
"_____no_output_____"
],
[
"In addition to the actual preprocessed outputs, Fmriprep also provides you with a nice (visual) summary of the different (major) preprocessing steps in an HTML-file, which you'd normally open in any standard browser to view. Here. we load this file for our example participants (`sub-03`) inside the notebook below. Scroll through it to see which preprocessing steps are highlighted. Note that the images from the HTML-file are not properly rendered in Jupyter notebooks, but you can right-click the image links (e.g., `sub-03/figures/sub-03_dseg.svg`) and click \"Open link in new tab\" to view the image.",
"_____no_output_____"
]
],
[
[
"from IPython.display import IFrame\nIFrame(src='./bids/derivatives/fmriprep/sub-03.html', width=700, height=600)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
d04b27e08435c126ad063cea71565ab953950e14 | 39,735 | ipynb | Jupyter Notebook | notebook/Unit7-4-Intro_SoftwareEngineering.ipynb | SEU03013050/notebook | 25e5e69d87cfb8c6ea45094365405e5c7cbffc6b | [
"MIT"
] | null | null | null | notebook/Unit7-4-Intro_SoftwareEngineering.ipynb | SEU03013050/notebook | 25e5e69d87cfb8c6ea45094365405e5c7cbffc6b | [
"MIT"
] | null | null | null | notebook/Unit7-4-Intro_SoftwareEngineering.ipynb | SEU03013050/notebook | 25e5e69d87cfb8c6ea45094365405e5c7cbffc6b | [
"MIT"
] | null | null | null | 47.931242 | 342 | 0.6642 | [
[
[
"# Software Engineering\n\nSoftware engineering was first introduced in the 1960s in an effort to treat more rigorously the often frustrating task of designing and developing computer programs. It was around this time that the computer community became increasingly worried about the fact that software projects were typically over budget and behind schedule. \n\nThe term **software crisis(软件危机)** came to signify that software development was the bottleneck in the advancement of computer technology.\n\n## 1 Introduction to Software Engineering\n\n\n### 1.1 Software Characteristics\n\nFrom an engineering viewpoint a software system is a product that serves a function.\n\nHowever, \n\n**1 A program can be changed**:\n\none unique attribute makes a computer program much different from a bridge or an airplane: a program can be changed. This malleability of software is both an advantage and a danger. \n * An advantage because it is often possible to correct an error in a program much easier than it would be to fix a defect in an airplane or automobile.\n \n * A danger because a modification in a program can introduce unanticipated side effects that may impair the functionality of those components that were executing correctly before the change.\n\n**2. The most important element of the Software product cost is the human effort in design and development**\n\nThe another notable characteristic of programs relate to the type of `resources necessary for their creation`. \n\nA software product is basically an intellectual commodity. The principal resource necessary for producing it is `human intelligence`. \n\nThe actual manufacturing of programs is `simple and inexpensive` compared to its design, coding, testing, and documenting. \n\nThis contrasts with many other engineered products in which the resources used in producing it are a substantial part of the product’s\nfinal cost. For example, a considerable portion of the price of a new automobile represents the cost of manufacturing it, while a less significant part goes to pay for the engineering costs of design and development.\n\nIn the case of a typical computer program the proportions are reversed. The most important element of the product cost is the human effort in design and development while the cost of manufacturing is proportionally insignificant.\n\n### 1.2 Software Qualities\n\nAn engineered product is usually associated with a list of qualities that define its usability. \n\nFor example, in performing its functions a bridge supports a predetermined weight and withstands a given wind force. An airplane is capable of transporting a\nspecific load, at a certain speed and altitude. \n\nBy the same token, a software product is associated with a given set of qualities that define its functionality. \n\nThe principal goals of software engineering is to define, specify, and measure software qualities and to describe the principles that can be applied to achieve them.\n\nThe classification of software qualities can be based on the relation with the software product. In this sense we can speak of qualities desirable to the user,to the developer, or to the manager. \n\nThe Table lists some qualities according to this classification.\n\n\n\n",
"_____no_output_____"
],
[
"### 1.3 Principles of Software Engineering",
"_____no_output_____"
],
[
"We started this chapter on the assumption that software development is a `creative activity` and that programming is `not an exact science`. \n\nFrom this point of view even the term software engineering may be considered unsuitable since we could preferably\nspeak of `software development technique`, which term does not imply the rigor of a formal engineering approach.\n\nIn our opinion it is a `mistake` to assume that programs can be mechanically generated by some `mechanical methodology`, no matter\nhow sophisticated. \n\nWhen software engineering falls short of producing the expected results it is because we `over-stressed the scientific and technical aspects` of program development over those that are `artistic or aesthetic` in nature or that depend on talent, personal endowments, or know-how. \n\nNevertheless, as there is `technique in art`, there is `technique in program development`.\n\nSoftware engineering is the `conventional` name that `groups` the technical and scientific aspects of program development.\n\n\n>**Software Engineering** is **a systematic approach** to the design, development, operation, and maintenance of a software system.\n>\n>* 软件工程是设计、开发、操作和维护软件系统的系统化方法。\n\n\n**Smaller software projects** usually take place within the constraints of a limited budget. Often financial resources do not extend to hiring trained software project managers or specialists in the field of software engineering. \n\nThe person in charge of the project usually wears many hats, including that of project manager and software engineer. In fact, it is not unusual that the project manager/engineer is also part-time designer, programmer, tester, and documentation specialist. \n\nWhat this all means is that the formality and rigor used in engineering a major project may not apply to one of lesser proportions. In other words, the strictness and rigidity of software engineering principles may have to be scaled down to accommodate the smaller projects.\n\nIn this sense we must distinguish between `principles, techniques, and tools` of software engineering. \n\n**Principles** are general guidelines that are applicable at any stage of the program production process. They are the abstract statements that describe desirable properties, but that are of little use in practical software development.\n\nFor example, the principle that encourages high program reliability does `not tell us how to` make a program reliable. \n\n**Techniques or methods** refer to `a particular approach` to solving a problem and help ensure that a product will have the desirable\nproperties.\n\n**Tools** are specific resources that are used in implementing a particular technique. \n\nIn this case we may state as a principle that floating-point numbers are a desirable format for representing decimals in a digital machine. Also that the floating-point techniques described in the ANSI standard 754 are suitable for our application\nand should be followed. Finally, that a particular library of floating-point routines, which complies with ANSI 754, would be an adequate tool for implementing the mathematical functions required in our application.\n\nThe Figure graphically shows the relationship between these three elements.\n\n",
"_____no_output_____"
],
[
"### 1.4 Objectives of Software Engineering:\n\n1. Maintainability\n \n * It should be feasible for the software to evolve to meet changing requirements.\n\n2. Correctness \n \n * A software product is correct, if the different requirements as specified in the SRS document have been correctly implemented.\n\n3. Reusability \n \n * A software product has good reusability, if the different modules of the product can easily be reused to develop new products.\n\n4. Testability \n \n * Here software facilitates both the establishment of test criteria and the evaluation of the software with respect to those criteria.\n\n5. Reliability \n \n * It is an attribute of software quality. The extent to which a program can be expected to perform its desired function, over an arbitrary time period.\n\n6. Portability \n * In this case, software can be transferred from one computer system or environment to another.\n\n7. Adaptability –\n * In this case, software allows differing system constraints and user needs to be satisfied by making changes to the software.",
"_____no_output_____"
],
[
"## 2 Software Engineering Paradigms\n\nComputer scientists refer to the process of planning and organizing a program as software development. \n\nIt includes project planning, systems and requirements analysis, data structure design, algorithm selection and evaluation, coding,\nestimation of program correctness, and maintenance procedures.\n\nThere are several paradigms to software development.\n\nThree of these paradigms have been extensively discussed in the literature:\n\n* the waterfall model(瀑布模型), the prototype methods(原型方法), and the spiral model(螺旋模式)\n\n### 2.1 Waterfall Model\n\nThis classical Waterfall Model of a software engineering project is based on the notion of a system life-cycle.\n\nThe waterfall model consists of several phases shown in the Figure.\n\nAs you can see, the figure resembles a waterfall, in which the results of each phase flow **down** to the next. \n\n\n\n**1 The specification phase(规范定义阶段)** consists of `a requirements gathering process` through analysis and systems engineering.\n\nWhenever the project must interface with existing software or hardware elements the specification phase must include a systems requirements definition. \n\nDuring this phase customer and developer work very closely: the customer provides the requirements and the developer reflects these requirements in a formal specification that is, in turn, reviewed by the customer. \n\nThe requirements/specification cycles continue until both parties agree that the project has been clearly and unambiguously defined.\n\n>The programmers determine **what the program** will do. \n>\n>This is a process of clarifying the **specifications(规范说明书)** for the problem\n\n\n**2 The design phase(设计阶段)** on four elements: data structures, program architecture, procedures, and interfaces(数据结构,程序架构,过程和接口).\n\nThe design stage is often the most critical and difficult one. \n\n>The programmers determine **how the program** will do its task\n\n**3 The coding phase(编码阶段)**: The programmers write the program,then convert the design into a machine-executable product.\n\n**4 The verification phase(测试阶段)** Once the code executes in a machine it must be evaluated for correctness. This means that we must ascertain that it meets the requirements developed during the specifications phase and that it is free from defects. \n\nAlthough this phase is sometimes associated with debugging, it should also include all formal and experimental verifications of program correctness\n\n**5 Maintenance phase(维护阶段)**—Programs usually have a long life; a life span of 5 to 15 years is common for software. \n\nDuring this time, requirements change, errors are detected, and minor or major modifications are made.\n\nMaintenance procedures require revisiting all the stages of the software life-cycle, as depicted by the dotted arrow in the above Figure ",
"_____no_output_____"
],
[
"A mistake detected in one phase often requires the developer to **back up** and redo some of the work in the **previous** phase. \n\nModifications made during maintenance also require backing up to earlier phases. \n\nTaken together, these phases are also called **the software development life cycle(软件生命周期)**.\n\n>软件生命周期(Software Life Cycle)是软件的产生直到报废或停止使用的生命周期\n\n\nAlthough the diagram depicts distinct phases, this does not mean that developers must analyze and design a complete system before coding it.\n\nModern software development is usually **incremental(增量)** and **iterative(迭代)**. \n\n* This means that analysis(specification) and design may produce a rough `draft, skeletal` version, or **prototype** of a system for coding, and then back up to earlier phases to fill in more details after some testing. \n\nPrograms rarely work as hoped the first time they are run; hence, they should be subjected to extensive and careful **testing**. \n\nMany people think that testing is an activity that applies only to the coding(implementation) and verification(Integration) phases; however, you should scrutinize the outputs of each phase carefully.\n\n* Keep in mind that mistakes found **early** are much less expensive to correct than those found late. \n\nThe Figure illustrates some relative costs of repairing mistakes when found in different phases. These are not just financial costs but also costs in time and effort.\n\n\n\nKeep in mind that the cost of developing software is not spread equally over the phases.The percentages shown in the Figure are typical.\n\n\n\nYou might think that coding(implementation) takes the most time and therefore costs the most.However, as you can see in the Figure, maintenance is the most expensive part of software development. \n\n**The cost of maintenance can be reduced by careful analysis, design, and implementation.**\n\nYou should remember two points:\n\n1. There is **more** to software development than `writing code`.\n\n2. If you want to reduce the overall cost of software development, **write programs that are easy to maintain**. This requires thorough analysis, careful design, and a good coding style. ",
"_____no_output_____"
],
[
"### 2.2 Prototyping\n\nMany software development projects are of an **experimental or speculative** nature.\n\nConsider the following examples:\n\n* A research group wishes to determine **if it is possible** to develop an expert system that uses data obtained by remote-sensing satellites in order determine pollution levels in the lakes and streams of the United States.\n\n* An entrepreneur wishes to determine **if it is feasible** to develop a word processing program in which the user is equipped with foot pedals that activate some of the program functions.\n\nIn either of these cases we can see that the software development project can **hardly be stated a priori**. \n\nThe objectives are described so generally that it is **difficult to define specific program requirements** that could serve as a base for a detailed design. \n\nIn both cases, as well as in many others in which `an initial detailed design is not possible or practical`, a **prototyping approach** could be a feasible alternative.\n",
"_____no_output_____"
],
[
"In prototyping the developer is able to create **a model of the software.** \n\n* This model can later be used to better `define the final product` or to `ascertain its feasibility`. \n\nThe prototype can be\n\n* `a simple paper model` of the software, which can be produced with little or no coding, \n\n* `a working prototype` that implements a subset of the program functions, or\n\n* `a complete program` in which some functions are not implemented.",
"_____no_output_____"
],
[
"The purpose of the prototype is to allow both customer and developer to `make decisions regarding the feasibility and practicality` of the project, and, if judged feasible and practical, to better define the final product.\n\nPrototyping is often depicted as a development cycle with the sequence of steps shown in the Figure\n\n",
"_____no_output_____"
],
[
"Prototype development\n\n* **begins** by collecting `requirements and specifications`.\n\nThen\n\n* the prototype is **designed**, usually by following an **abbreviated(缩略) process** which produces results quicker than conventional program design procedures. \n\n* The prototype is **built**, also shortening the development processes by **skipping all processing steps** that are not strictly necessary for the purpose at hand. \n\nThe prototype is finally `evaluated`, first by the developer and later by the customer. \n\nIf necessary, it is further `refined` and tuned in an `iterative` cycle. The finished prototype is used to further define the final software product.",
"_____no_output_____"
],
[
"### 2.3 Spiral Model\n\nThis model, first suggested by Barry W. Boehm in 1988(巴利·玻姆), proposes to merge the best features of the life-cycle and the prototyping paradigm with the principle of **incremental** development\n\nThe Figure shows a spiral progression through four different stages.\n\n",
"_____no_output_____"
],
[
"Notice that the drawing in the above Figure is meant as a **general** illustration of the method and is not be interpreted literally.\n\nFor example, the number of cycles around the spiral will `vary from project to project`\n\nunique feature of the spiral model is the introduction of a **risk analysis stage**, which culminates in a `go or no-go decision` at the conclusion of each development cycle. \n\nHowever, this risk analysis phase is also its most **controversial** feature.\n\nIn the first place, risk analysis requires a particular expertise, and is trivialized when performed by un-trained personnel.\n\nIn fact, the risk analysis phase is undesirable if it can lead to invalid interpretation of results.\n\nIn addition, customers often believe that they performed a risk analysis of the project before deciding to undertake it, and that\nfurther consideration of this matter is unnecessary. \n\nFurthermore, the possibility that at the conclusion of each development cycle the entire project could be scrapped by a no-go decision may lead to apprehensions on the part of the customer.\n\nOn the other hand, if the difficulties and perils associated with the risk analysis phase can be conjured, then the spiral model constitutes **the most satisfactory paradigm for the development of large software systems**. \n\nThe **incremental** development approach proposed by this model, with its repeated **prototyping and riske valuation** phases, provides a **realistic** framework for program development. \n\nBoth customer and developer have repeated opportunities in which to identify possible defects and shortcomings and make the necessary adjustments. \n\n",
"_____no_output_____"
],
[
"### 2.4 A Pragmatic Approach\n\nThe practicing software developer must decide, in each case, which model or combinations of models are most suitable to the project at hand.\n\nThe decision is often based on **practical** risks and limitations rather than on theoretical applicability. \n\nFor example, a developer may consider that the most adequate paradigm is the prototyping model, but a substantial risk that the customer will misinterpret the results advises against its use.\n\nIn another case a project may fit quite well within the spiral model but the fact that personnel trained in risk analysis will not be available suggests a modification of the paradigm or the adoption of the more conventional waterfall model.\n\nA wise man once said: **“All systems and no system: behold the best system.”**\n\nThis maxim is quite applicable to the various software engineering paradigms mentioned in the preceding sections, in particular when they concern smaller projects in which development time and resources are limited. \n\nThe most common scenario is a combination of paradigms.\n\nOften the most useful one is a spiral model. Since the spiral model is in itself a combination of the waterfall and the prototyping model, all of the mentioned paradigms will actually be used.\n\n### 2.5 `Concurrent` Documentation\n\nOne of the **most important lessons** of software engineering refers to the need for `adequate and rigorous project documentation`. \n\n* By the same token, the **most notable** difference between a correctly engineered development project and a haphazard effort\nis the **documentation**.\n\nToo often the tendency has been to consider program documentation as a `secondary` problem, one that can be addressed once the project\nis finished.\n\nThis tendency is probably traceable to the same human fallacy that makes some programmers believe that `comments` can be inserted into the code **after** the programming has concluded. \n\nAs `writing comments` is part of the `chore` of programming,`documentation` is part of the task of program `development`. \n\n* Either one **cannot** be approached as an **afterthought**, at risk of writing spaghetti code or of developing undecipherable projects.\n\nIt is only by realizing that documentation is one of the `fundamental` results of a development project, and that it `should never be an afterthought` at project conclusion time, that we can invalidate these argument\n\nIn regards to software project development the following types of documentation can be clearly identified:\n\n1. Written `reports` that `mark` the `conclusion` of a phase of the development cycle.\n\n * These documents are sometimes called the deliverables(可交付成果), since they are often presented to the client as each development phases concludes.\n\n * Typical deliverables are the feasibility study, the analysis and requirements document, and the detailed design document.\n\n\n2. User manuals and training guides, which can be printed or online.\n\n3. Operations documents, more often found in large computer environments, include run-time schedules, input and output forms and media, delivery, routing, and distribution charts, data file specifications, update schedules, recovery procedures, and security controls.\n\n4. The project scrapbook is used to **collect** memos, schedules, meeting minutes, and other communications generated during the project\n\nThe following are undeniable advantages of **concurrently documenting** the development project:\n\n1. A well-documented project is better able to resist personnel changes since new programmers can catch up by studying the project documents.\n\n2. Documentation can serve as a management tool by requiring that each development stage conclude in a document, which must be approved before the next stage can proceed.\n\n3. **Concurrent documentation** establishes a project history and can serve, among other things, as a progress report. Documents can be used as scheduling landmarks and therefore serve to measure and evaluate project progress\n\nThe **most important principle** of project documentation is that of **concurrency**. \n\nDocumentation must be a substantial part of the development effort and must take place **simultaneously** with each development phase. \n\nAt the same time, documentation is often the development activity most easily postponed or even sacrificed. When time is running short it is tempting to defer documentation.\n\nAt this point the project manager must be aware that when \n\n* `documentation loses its concurrency it also loses a great portion of its usefulness`.",
"_____no_output_____"
],
[
"## 3 The Recommended Practices for Scientific Computing\n\nGreg Wilson, Co-founder of Software Carpentry \n\n* [Best Practices for Scientific Computing](https://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.1001745)\n\n* [Good enough practices in scientific computing](https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1005510)\n\n* [Ten simple rules for making research software more robust](https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1005412)",
"_____no_output_____"
],
[
"<div id=\"section1\" class=\"section toc-section\"><a id=\"s1a\" name=\"s1a\" class=\"link-target\" title=\"Box 1. Summary of Best Practices\"></a>\n<h3>3.1 Best Practices for Scientific Computing</h3>\n\n\n<ol class=\"order\">\n\n<li>Write programs for people, not computers.\n\n<ol class=\"alpha-lower\">\n\n<li>A program should not require its readers to hold more than a handful of facts in memory at once.</li>\n\n<li>Make names consistent, distinctive, and meaningful.</li>\n\n<li>Make code style and formatting consistent.</li>\n\n</ol></li>\n\n<li>Let the computer do the work.\n\n<ol class=\"alpha-lower\">\n\n<li>Make the computer repeat tasks.</li>\n\n<li>Save recent commands in a file for re-use.</li>\n\n<li>Use a build tool to automate workflows.</li>\n\n</ol></li>\n\n<li>Make incremental changes.\n\n<ol class=\"alpha-lower\">\n\n<li>Work in small steps with frequent feedback and course correction.</li>\n\n<li>Use a version control system.</li>\n\n<li>Put everything that has been created manually in version control.</li>\n\n</ol></li>\n\n<li>Don't repeat yourself (or others).\n\n<ol class=\"alpha-lower\">\n\n<li>Every piece of data must have a single authoritative representation in the system.</li>\n\n<li>Modularize code rather than copying and pasting.</li>\n\n<li>Re-use code instead of rewriting it.</li>\n\n</ol></li>\n\n<li>Plan for mistakes.\n\n<ol class=\"alpha-lower\">\n\n<li>Add assertions to programs to check their operation.</li>\n\n<li>Use an off-the-shelf unit testing library.</li>\n\n<li>Turn bugs into test cases.</li>\n\n<li>Use a symbolic debugger.</li>\n\n</ol></li>\n\n<li>Optimize software only after it works correctly.\n\n<ol class=\"alpha-lower\">\n\n<li>Use a profiler to identify bottlenecks.</li>\n\n<li>Write code in the highest-level language possible.</li>\n\n</ol></li>\n\n<li>Document design and purpose, not mechanics.\n\n<ol class=\"alpha-lower\">\n\n<li>Document interfaces and reasons, not implementations.</li>\n\n<li>Refactor code in preference to explaining how it works.</li>\n\n<li>Embed the documentation for a piece of software in that software.</li>\n\n</ol></li>\n\n<li>Collaborate.\n\n<ol class=\"alpha-lower\">\n\n<li>Use pre-merge code reviews.</li>\n\n<li>Use pair programming when bringing someone new up to speed and when tackling particularly tricky problems.</li>\n\n<li>Use an issue tracking tool.</li>\n\n</ol></li>\n\n</ol></div>\n</div></div>\n",
"_____no_output_____"
],
[
"<div id=\"section1\" class=\"section toc-section\"><a id=\"sec003\" name=\"sec003\" class=\"link-target\" title=\"Box 1. Summary of practices\"></a>\n<h3>3.2 Good enough practices in scientific computing</h3>\n\n\n<ol class=\"order\">\n\n<li>Data management\n\n\n<ol class=\"alpha-lower\">\n\n<li>Save the raw data.</li>\n\n<li>Ensure that raw data are backed up in more than one location.</li>\n\n<li>Create the data you wish to see in the world.</li>\n\n<li>Create analysis-friendly data.</li>\n\n<li>Record all the steps used to process data.</li>\n\n<li>Anticipate the need to use multiple tables, and use a unique identifier for every record.</li>\n\n<li>Submit data to a reputable DOI-issuing repository so that others can access and cite it.</li>\n\n</ol></li>\n\n<li>Software\n\n\n<ol class=\"alpha-lower\">\n\n<li>Place a brief explanatory comment at the start of every program.</li>\n\n<li>Decompose programs into functions.</li>\n\n<li>Be ruthless about eliminating duplication.</li>\n\n<li>Always search for well-maintained software libraries that do what you need.</li>\n\n<li>Test libraries before relying on them.</li>\n\n<li>Give functions and variables meaningful names.</li>\n\n<li>Make dependencies and requirements explicit.</li>\n\n<li>Do not comment and uncomment sections of code to control a program's behavior.</li>\n\n<li>Provide a simple example or test data set.</li>\n\n<li>Submit code to a reputable DOI-issuing repository.</li>\n\n</ol></li>\n\n<li>Collaboration\n\n\n<ol class=\"alpha-lower\">\n\n<li>Create an overview of your project.</li>\n\n<li>Create a shared \"to-do\" list for the project.</li>\n\n<li>Decide on communication strategies.</li>\n\n<li>Make the license explicit.</li>\n\n<li>Make the project citable.</li>\n\n</ol></li>\n\n<li>Project organization\n\n\n<ol class=\"alpha-lower\">\n\n<li>Put each project in its own directory, which is named after the project.</li>\n\n<li>Put text documents associated with the project in the <span class=\"monospace\">doc</span> directory.</li>\n\n<li>Put raw data and metadata in a data directory and files generated during cleanup and analysis in a results directory.</li>\n\n<li>Put project source code in the <span class=\"monospace\">src</span> directory.</li>\n\n<li>Put external scripts or compiled programs in the <span class=\"monospace\">bin</span> directory.</li>\n\n<li>Name all files to reflect their content or function.</li>\n\n</ol></li>\n\n<li>Keeping track of changes\n\n\n<ol class=\"alpha-lower\">\n\n<li>Back up (almost) everything created by a human being as soon as it is created.</li>\n\n<li>Keep changes small.</li>\n\n<li>Share changes frequently.</li>\n\n<li>Create, maintain, and use a checklist for saving and sharing changes to the project.</li>\n\n<li>Store each project in a folder that is mirrored off the researcher's working machine.</li>\n\n<li>Add a file called <span class=\"monospace\">CHANGELOG.txt</span> to the project's <span class=\"monospace\">docs</span> subfolder.</li>\n\n<li>Copy the entire project whenever a significant change has been made.</li>\n\n<li>Use a version control system.</li>\n\n</ol></li>\n\n<li>Manuscripts\n\n\n<ol class=\"alpha-lower\">\n\n<li>Write manuscripts using online tools with rich formatting, change tracking, and reference management.</li>\n\n<li>Write the manuscript in a plain text format that permits version control.</li>\n\n</ol></li>\n\n</ol></div>\n</div></div>",
"_____no_output_____"
],
[
"### 3.3 Ten simple rules for making research software more robust\n\nhttps://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1005412\n\nWhat is **“robust”** software? \n\nWe implied above that it is software that works for people other than the original author and on machines other than its creator’s.\n\nMore specifically, we mean that:\n\n* it can be installed on more than one computer with relative ease,\n* it works consistently as advertised, and\n* it can be integrated with other tools.\n\nOur rules are generic and can be applied to all languages, libraries, packages, documentation styles, and operating systems for both closed-source and open-source software.\n\n1. Use version control\n\n2. Document your code and usage\n\n3. Make common operations easy to control\n\n4. Version your releases\n\n5. Reuse software (within reason)\n\n6. Rely on build tools and package managers for installation\n\n7. Do not require root or other special privileges to install or run\n\n8. Eliminate hard-coded paths\n\n9. Include a small test set that can be run to ensure the software is actually working\n\n10. Produce identical results when given identical inputs\n",
"_____no_output_____"
],
[
"## 4 The Diagrams for Visual Software Modelling \n\nMany diagrams have been proposed over the years for visual software modelling,in order to better understand, maintain, or document information.\n\nThese diagrams have achieved widespread acceptance:\n \n[Flow Chart](https://en.wikipedia.org/wiki/Flowchart)\n\n* A flowchart is a type of diagram that represents a workflow or process. A flowchart can also be defined as a diagrammatic representation of an algorithm, a step-by-step approach to solving a task.\n\n* The flowchart shows the steps as boxes of various kinds, and their order by connecting the boxes with arrows. This diagrammatic representation illustrates a solution model to a given problem. Flowcharts are used in analyzing, designing, documenting or managing a process or program in various fields\n\n[Data Flow Diagrams(DFD)](https://en.wikipedia.org/wiki/Data-flow_diagram)\n* A data-flow diagram is a way of representing a flow of data through a process or a system (usually an information system). The DFD also provides information about the outputs and inputs of each entity and the process itself. A data-flow diagram has no control flow, there are no decision rules and no loops\n\n[Entity-Relationship(E-R) Model Diagrams](https://en.wikipedia.org/wiki/Entity%E2%80%93relationship_model)\n* An entity–relationship model (or ER model) describes interrelated things of interest in a specific domain of knowledge. A basic ER model is composed of entity types (which classify the things of interest) and specifies relationships that can exist between entities (instances of those entity types).\n\n\n[Unified Modeling Language(UML) Model diagram](https://en.wikipedia.org/wiki/Unified_Modeling_Language)\n* The Unified Modeling Language (UML) is a general-purpose, developmental, modeling language in the field of software engineering that is intended to provide a standard way to visualize the design of a system\n* UML 2 has many types of diagrams, which are divided into two categories.Some types represent structural information, and the rest represent general types of behavior, including a few that represent different aspects of interactions. These diagrams can be categorized hierarchically as shown in the following class diagram\n",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
d04b3e9de30815ba6a811a0f61f049584b130585 | 6,062 | ipynb | Jupyter Notebook | LP-ADMM.ipynb | ehsaneshaghi/optimizers | 3202e1028a409427566892b6773e0d7d4dfa7df6 | [
"MIT"
] | null | null | null | LP-ADMM.ipynb | ehsaneshaghi/optimizers | 3202e1028a409427566892b6773e0d7d4dfa7df6 | [
"MIT"
] | null | null | null | LP-ADMM.ipynb | ehsaneshaghi/optimizers | 3202e1028a409427566892b6773e0d7d4dfa7df6 | [
"MIT"
] | null | null | null | 29.862069 | 199 | 0.472451 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
d04b424b7e7ccc8fadfe5d3a8ebf058a03e806c3 | 8,323 | ipynb | Jupyter Notebook | dataprep_TREC.ipynb | CaptainArnav/Sentence-Classification-using-1D-ConvNets | 713fc9e4f7cc65d713220d4acb81ad444d3553dc | [
"MIT"
] | null | null | null | dataprep_TREC.ipynb | CaptainArnav/Sentence-Classification-using-1D-ConvNets | 713fc9e4f7cc65d713220d4acb81ad444d3553dc | [
"MIT"
] | null | null | null | dataprep_TREC.ipynb | CaptainArnav/Sentence-Classification-using-1D-ConvNets | 713fc9e4f7cc65d713220d4acb81ad444d3553dc | [
"MIT"
] | null | null | null | 29.514184 | 118 | 0.563979 | [
[
[
"# dataset\n# https://cogcomp.seas.upenn.edu/Data/QA/QC/",
"_____no_output_____"
],
[
"import pandas as pd\nimport numpy as np\nfrom bs4 import BeautifulSoup\nimport pickle\nimport string\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\n\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.preprocessing.text import text_to_word_sequence, one_hot\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.utils import to_categorical",
"_____no_output_____"
],
[
"#only if GPU is available\nphysical_devices = tf.config.list_physical_devices(\"GPU\")\ntf.config.experimental.set_memory_growth(physical_devices[0], True)",
"_____no_output_____"
],
[
"def remove_html(text) :\n '''\n parameters : text - string\n removes HTML content from the text such (eg. tags - <title></title>)\n returns text_without_html - string\n '''\n soup = BeautifulSoup(text)\n text_without_html = soup.get_text()\n return text_without_html\n\n\ndef remove_punctuation(text) :\n '''\n parameters : text - string\n removes punctuation from the text (eg. '.', '!', '?')\n returns : text_without_puntuation - string\n '''\n text_without_puntuation = \" \".join([[char for char in text if char not in string.punctuation]])\n return text_without_puntuation\n\n\ndef remove_stop_words(token) :\n '''\n parameters : tokens - list of words\n removes stop words from the list (eg. 'a', 'the', 'are')\n returns : tokens_without_stop_words - list of words\n '''\n stop_words = stopwords.words('english')\n token_without_stop_words = [word for word in token if word not in stop_words]\n return token_without_stop_words\n\n\ndef stemmed_words(tokens) : \n '''\n parameters : tokens - list of words\n stems the words in the list (eg. playing -> play)\n returns : stemmed_words - list of words\n '''\n porter = PorterStemmer()\n stemmed_words = [porter.stem(word) for word in tokens]\n return stemmed_words",
"_____no_output_____"
],
[
"def clean_data(sentences) :\n '''\n parameters : sentences - list of sentences\n cleans the sentences by\n converting the sentences into tokens and removing stop words\n joins the tokens to form a sentence again\n returns : texts - list of cleaned sentences\n '''\n texts = []\n for sentence in sentences :\n tokens = text_to_word_sequence(sentence)\n tokens = remove_stop_words(tokens)\n sentence = \" \".join(tokens)\n texts.append(sentence)\n return texts",
"_____no_output_____"
],
[
"def make_tokenizer(dataset) :\n '''\n parameters : dataset - list of sentences\n creates a vocabulary of words based on the list of inputted sentences using the Tokenizer object\n returns : tokenizer - Tokenizer object\n '''\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(dataset)\n return tokenizer",
"_____no_output_____"
],
[
"def encode_texts(dataset, tokenizer) :\n '''\n parameters : dataset - list of sentences\n tokenizer - Tokenizer object initialized using dataset\n encodes the text sequences in the dataset by mapping the index of the word in the vocabulary to each word\n in the dataset\n returns : encoded_docs - list of encoded sentences\n '''\n encoded_docs = tokenizer.texts_to_sequences(dataset)\n return encoded_docs\n\ndef encode_labels(labels) :\n '''\n parameters - list of labels/classes for each input\n maps each label to an index and encodes the label's with its corresponding index\n returns - list of encoded labels/classes for each input\n '''\n le = LabelEncoder()\n le.fit(labels)\n return le.transform(labels), len(le.classes_)",
"_____no_output_____"
],
[
"train_trec = open('./data/TREC/train_5500.label')\nx_train = []\ny_train = []\nfor x in train_trec :\n data_split = x.split(':')\n x_train.append(data_split[1])\n y_train.append(data_split[0])\ntrain_trec.close()\n\ntest_trec = open('./data/TREC/TREC_10.label')\nx_test = []\ny_test = []\nfor x in test_trec :\n data_split = x.split(':')\n x_test.append(data_split[1])\n y_test.append(data_split[0])\ntest_trec.close()\n\nx_train = clean_data(x_train)\n\ny_train, num_classes= encode_labels(y_train)\ny_test, _ = encode_labels(y_test)\n\ny_train = to_categorical(y_train, num_classes=num_classes)\ny_test = to_categorical(y_test, num_classes=num_classes)",
"_____no_output_____"
],
[
"tokenizer = make_tokenizer(x_train)\nnum_words = len(tokenizer.word_index) + 1\n\nx_train = pad_sequences(encode_texts(x_train, tokenizer), padding='post')\nmax_length = x_train.shape[1]\nx_test = pad_sequences(encode_texts(x_test, tokenizer), maxlen=max_length, padding='post')",
"_____no_output_____"
],
[
"x_train = np.array(x_train)\nx_test = np.array(x_test)\ny_train = np.array(y_train)\ny_test = np.array(y_test)",
"_____no_output_____"
],
[
"np.savez('./data/encoded_dataset_trec.npz', name1=x_train, name2=y_train, name3=x_test, name4=y_test)",
"_____no_output_____"
],
[
"vocab_file = open(\"./data/vocab_trec.pkl\", \"wb\")\npickle.dump(tokenizer.word_index, vocab_file)\nvocab_file.close()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d04b4861b3e4cd1cc0e9d0c65a032d4414f0e49a | 94,809 | ipynb | Jupyter Notebook | desafios_aula01.ipynb | justapixel/QuarentenaDados | 6eb06ea8d02b822c0e98c5b611c67147fec0413f | [
"MIT"
] | null | null | null | desafios_aula01.ipynb | justapixel/QuarentenaDados | 6eb06ea8d02b822c0e98c5b611c67147fec0413f | [
"MIT"
] | null | null | null | desafios_aula01.ipynb | justapixel/QuarentenaDados | 6eb06ea8d02b822c0e98c5b611c67147fec0413f | [
"MIT"
] | null | null | null | 316.03 | 49,054 | 0.637334 | [
[
[
"import pandas as pd\nimport numpy as np\nmovies = pd.read_csv(\"ml-latest-small/movies.csv\")\nmovies_rating = pd.read_csv(\"ml-latest-small/ratings.csv\")",
"_____no_output_____"
]
],
[
[
"# Desafio 1 do [Paulo Silveira](https://twitter.com/paulo_caelum)\n## Encontrar quantos filmes não possuem avaliações e quais são esses filmes",
"_____no_output_____"
]
],
[
[
"count_rating_by_movieId = movies_rating.pivot_table(index=['movieId'], aggfunc='size').rename('votes')\ncount_rating_by_movieId",
"_____no_output_____"
],
[
"movies_with_votes = movies.join(count_rating_by_movieId, on=\"movieId\")\nmovies_with_votes[movies_with_votes['votes'].isnull()]",
"_____no_output_____"
]
],
[
[
"# Desafio 2 do [Guilherme Silveira](https://twitter.com/guilhermecaelum)\n## Alterar o nome da coluna nota do dataframe filmes_com_media para nota_média após o join.",
"_____no_output_____"
]
],
[
[
"rating = movies_rating.groupby(\"movieId\")['rating'].mean()\nrating",
"_____no_output_____"
],
[
"filmes_com_media = movies.join(rating, on=\"movieId\").rename(columns={'rating': 'nota_média'})\nfilmes_com_media",
"_____no_output_____"
]
],
[
[
"# Desafio 3 do [Guilherme Silveira](https://twitter.com/guilhermecaelum)\n## Adicionar ao filmes_com_media o total de votos de cada filme",
"_____no_output_____"
]
],
[
[
"movies_with_rating_and_votes = filmes_com_media.join(count_rating_by_movieId, on=\"movieId\")\nmovies_with_rating_and_votes",
"_____no_output_____"
]
],
[
[
"# Desafio 4 do [Thiago Gonçalves](https://twitter.com/tgcsantos)\n## Arredondar as médias (coluna de nota média) para duas casas decimais.",
"_____no_output_____"
]
],
[
[
"movies_with_rating_and_votes = movies_with_rating_and_votes.round({'nota_média':2})\nmovies_with_rating_and_votes",
"_____no_output_____"
]
],
[
[
"# Desafio 5 do [Allan Spadini](https://twitter.com/allanspadini)\n## Descobrir os generos dos filmes (quais são eles, únicos). (esse aqui o bicho pega)",
"_____no_output_____"
]
],
[
[
"genres_split = movies.genres.str.split(\"|\")\ngenres_split",
"_____no_output_____"
],
[
"genres = pd.DataFrame({'genre':np.concatenate(genres_split.values)})\nlist_genres = genres.groupby('genre').size().reset_index(name='count')\nlist_genres['genre']",
"_____no_output_____"
]
],
[
[
"# Desafio 6 da [Thais André](https://twitter.com/thais_tandre)\n## Contar o número de aparições de cada genero.",
"_____no_output_____"
]
],
[
[
"list_genres",
"_____no_output_____"
]
],
[
[
"# Desafio 7 do [Guilherme Silveira](https://twitter.com/guilhermecaelum)\n## Plotar o gráfico de aparições de cada genero. Pode ser um gráfico de tipo igual a barra.",
"_____no_output_____"
]
],
[
[
"list_genres[['genre', 'count']].sort_values(by=['genre'], ascending=True).plot(x='genre', kind='barh', title=\"Generos\")",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d04b51cf6be9f8c9092791e5e034ba5c6026db5a | 12,522 | ipynb | Jupyter Notebook | test.ipynb | sharaththota/Test | ded092d8b4f3b258be3b9ecace0b24459e1fa7f4 | [
"MIT"
] | null | null | null | test.ipynb | sharaththota/Test | ded092d8b4f3b258be3b9ecace0b24459e1fa7f4 | [
"MIT"
] | null | null | null | test.ipynb | sharaththota/Test | ded092d8b4f3b258be3b9ecace0b24459e1fa7f4 | [
"MIT"
] | null | null | null | 23.018382 | 133 | 0.393068 | [
[
[
"# Write a program to remove characters from a string starting from zero up to n and return a new string.\n\n__Example:__\n\nremove_char(\"Untitled\", 4) so output must be tled. Here we need to remove first four characters from a string",
"_____no_output_____"
]
],
[
[
"def remove_char(a, b):\n # Write your code here\n print(\"started\")\n\na=\"Untitled\"\nb=4\nremove_char(a,b)",
"started\n"
]
],
[
[
"# Write a program to find how many times substring appears in the given string.\n\n__Example:__\n\n\"You can use Markdown to format documentation you add to Markdown cells\" sub_string: Markdown\nIn the above the substring Markdown is appeared two times.So the count is two",
"_____no_output_____"
]
],
[
[
"def sub_string(m_string,s_string):\n # Write your code here\n print(\"started\")\n \n \nm_string=\"You can use Markdown to format documentation you add to Markdown cells\"\ns_string=\"Markdown\"\nsub_string(m_string,s_string)\n",
"started\n"
]
],
[
[
"# Write a program to check if the given number is a palindrome number.\n\n__Exapmle:__\n\nA palindrome number is a number that is same after reverse. For example 242, is the palindrome number\n",
"_____no_output_____"
]
],
[
[
"def palindrom_check(a):\n # Write your code here\n print(\"started\")\n\npalindrom_check(242)",
"started\n"
]
],
[
[
"# Write a program to Extract Unique values from dictionary values\n\n__Example:__\n\ntest= {\"gfg': [5, 6, 7, 8], 'is': [10, 11, 7, 5], 'best' : [6, 12, 10, 8], 'for': [1, 2, 5]}\nout_put: [1, 2, 5, 6, 7, 8, 10, 11, 12]",
"_____no_output_____"
]
],
[
[
"def extract_unique(a):\n # Write your code here\n print(\"started\")\n \n\n\ntest= {'gfg': [5, 6, 7, 8], 'is': [10, 11, 7, 5], 'best' : [6, 12, 10, 8], 'for': [1, 2, 5]}\nextract_unique(test)",
"started\n"
]
],
[
[
"# Write a program to find the dictionary with maximum count of pairs\n\n__Example:__\n\nInput: test_list = [{\"gfg\": 2, \"best\":4}, {\"gfg\": 2, \"is\" : 3, \"best\": 4, \"CS\":9}, {\"gfg\":2}] \nOutput: 4",
"_____no_output_____"
]
],
[
[
"def max_count(a):\n # Write your code here\n print(\"started\")\n \n \ntest_list = [{\"gfg\": 2, \"best\":4}, {\"gfg\": 2, \"is\" : 3, \"best\": 4, \"CS\":9}, {\"gfg\":2}]\nmax_count(test_list)\n \n",
"started\n"
]
],
[
[
"# Access the value of key 'history' from the below dict",
"_____no_output_____"
]
],
[
[
"def key_access(a):\n # Write your code here\n print(\"started\")\n \n \n \n\n\nsampleDict = {\n\"class\":{\n\"student\":{\n\"name\": \"Mike\",\n\"marks\" : {\n\"physics\":70,\n\"history\":80\n}\n}\n}\n}\nkey_access(sampleDict)",
"started\n"
]
],
[
[
"# Print the value of key hair\n# Print the third element of the key interested in",
"_____no_output_____"
]
],
[
[
"def third_ele(a):\n # Write your code here\n print(\"started\")\n \n \n \n \n \ninfo={\n \"personal data\":{\n \"name\":\"Lauren\",\n \"age\":20,\n \"major\":\"Information Science\",\n \"physical_features\":{\n \"color\":{\n \"eye\":\"blue\",\n \"hair\":\"brown\"\n },\n \"height\":\"5'8\"\n }\n },\n \"other\":{\n \"favorite_colors\":[\n \"purple\",\n \"green\",\n \"blue\"\n ],\n \"interested_in\":[\n \"social media\",\n \"intellectual property\",\n \"copyright\",\n \"music\",\n \"books\"\n ]\n }\n}\n\nthird_ele(info)",
"started\n"
],
[
"import pandas as pd\nimport numpy as np\nexam_data = {'name': ['Anastasia', 'Dima', 'Katherine', 'James', 'Emily', 'Michael', 'Matthew', 'Laura', 'Kevin', 'Jonas'],\n 'score': [12.5, 9, 16.5, np.nan, 9, 20, 14.5, np.nan, 8, 19],\n 'attempts': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],\n 'qualify': ['yes', 'no', 'yes', 'no', 'no', 'yes', 'yes', 'no', 'no', 'yes']}\nlabels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']\n\ndf = pd.DataFrame(exam_data , index=labels)\ndf",
"_____no_output_____"
]
],
[
[
"# Print the Unique values from attempts column",
"_____no_output_____"
]
],
[
[
"def un_values(df):\n # Write your code here\n print(\"started\")\n \n \n \n \nun_values(df)",
"started\n"
]
],
[
[
"# Print the top five rows from the data frame\n",
"_____no_output_____"
]
],
[
[
"def top_five(df):\n # Write your code here\n print(\"started\")\n \n \n \n \ntop_five(df)",
"started\n"
]
],
[
[
"# Print the max and min values of the coulmn attempts",
"_____no_output_____"
]
],
[
[
"def min_max(df):\n # Write your code here\n print(\"started\")\n \n \n \n \nmin_max(df)",
"started\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d04b5ad46d1e8ce047ea32bdfd7cdbcdc922ffeb | 143,231 | ipynb | Jupyter Notebook | matrix_two/day3.ipynb | kmwolowiec/data_workshop | 9405bac4dc42280083c54ff36b9d7da58e3a66f0 | [
"MIT"
] | null | null | null | matrix_two/day3.ipynb | kmwolowiec/data_workshop | 9405bac4dc42280083c54ff36b9d7da58e3a66f0 | [
"MIT"
] | null | null | null | matrix_two/day3.ipynb | kmwolowiec/data_workshop | 9405bac4dc42280083c54ff36b9d7da58e3a66f0 | [
"MIT"
] | null | null | null | 143,231 | 143,231 | 0.814258 | [
[
[
"!pip install eli5\n!pip install --upgrade tables\n!git config --global user.email \"[email protected]\"\n!git config --global user.name \"Krzysiek\"",
"Requirement already satisfied: eli5 in /usr/local/lib/python3.6/dist-packages (0.10.1)\nRequirement already satisfied: jinja2 in /usr/local/lib/python3.6/dist-packages (from eli5) (2.11.1)\nRequirement already satisfied: scikit-learn>=0.18 in /usr/local/lib/python3.6/dist-packages (from eli5) (0.22.1)\nRequirement already satisfied: tabulate>=0.7.7 in /usr/local/lib/python3.6/dist-packages (from eli5) (0.8.6)\nRequirement already satisfied: graphviz in /usr/local/lib/python3.6/dist-packages (from eli5) (0.10.1)\nRequirement already satisfied: numpy>=1.9.0 in /usr/local/lib/python3.6/dist-packages (from eli5) (1.17.5)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from eli5) (1.4.1)\nRequirement already satisfied: attrs>16.0.0 in /usr/local/lib/python3.6/dist-packages (from eli5) (19.3.0)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from eli5) (1.12.0)\nRequirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from jinja2->eli5) (1.1.1)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn>=0.18->eli5) (0.14.1)\nRequirement already up-to-date: tables in /usr/local/lib/python3.6/dist-packages (3.6.1)\nRequirement already satisfied, skipping upgrade: numexpr>=2.6.2 in /usr/local/lib/python3.6/dist-packages (from tables) (2.7.1)\nRequirement already satisfied, skipping upgrade: numpy>=1.9.3 in /usr/local/lib/python3.6/dist-packages (from tables) (1.17.5)\n"
],
[
"import pandas as pd\nimport numpy as np\n\nfrom sklearn.dummy import DummyRegressor\nfrom sklearn.tree import DecisionTreeRegressor\n\nfrom sklearn.metrics import mean_absolute_error as mae\nfrom sklearn.model_selection import cross_val_score\n\nimport eli5\nfrom eli5.sklearn import PermutationImportance\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom matplotlib.gridspec import GridSpec",
"_____no_output_____"
],
[
"cd \"drive/My Drive/Colab Notebooks/data_workshop\"",
"/content/drive/My Drive/Colab Notebooks/data_workshop\n"
]
],
[
[
"Import data",
"_____no_output_____"
]
],
[
[
"df = pd.read_hdf('data/car.h5')",
"_____no_output_____"
],
[
"df.shape",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
]
],
[
[
"# Dummy Model",
"_____no_output_____"
]
],
[
[
"df.select_dtypes(np.number).columns",
"_____no_output_____"
],
[
"X = df['car_id']\ny = df['price_value']\n\nmodel = DummyRegressor()\nmodel.fit(X, y)\ny_pred = model.predict(X)\n\nmae(y, y_pred)",
"_____no_output_____"
],
[
"[x for x in df.columns if 'price' in x]",
"_____no_output_____"
],
[
"df['price_currency'].value_counts()",
"_____no_output_____"
],
[
"df = df[ df.price_currency == 'PLN']\ndf.shape",
"_____no_output_____"
]
],
[
[
"# Features",
"_____no_output_____"
]
],
[
[
"df.sample(5)",
"_____no_output_____"
],
[
"suffix_cat = '__cat'\nfor feat in df.columns:\n if isinstance(df[feat][0], list):continue\n \n factorized_values = df[feat].factorize()[0]\n if suffix_cat in feat:\n df[feat] = factorized_values\n \n else:\n df[feat+suffix_cat] = factorized_values",
"_____no_output_____"
],
[
"cat_feats = [x for x in df.columns if suffix_cat in x]\ncat_feats = [x for x in cat_feats if 'price' not in x]\ncat_feats",
"_____no_output_____"
],
[
"len(cat_feats)",
"_____no_output_____"
],
[
"X = df[cat_feats].values\ny = df['price_value'].values\n\nmodel = DecisionTreeRegressor(max_depth=5)\nscores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error')\nnp.mean(scores)",
"_____no_output_____"
],
[
"m = DecisionTreeRegressor(max_depth=5)\nm.fit(X, y)\n\nimp = PermutationImportance(m, random_state=0).fit(X, y)\neli5.show_weights(imp, feature_names=cat_feats)",
"_____no_output_____"
],
[
"df[['param_napęd', 'price_value']].groupby('param_napęd').agg(['mean', 'median', 'std', 'count'])",
"_____no_output_____"
],
[
"df['param_rok-produkcji'] = df['param_rok-produkcji'].astype(float)",
"_____no_output_____"
],
[
"fig = plt.figure(constrained_layout=True, figsize=(16,8))\n\ngs = GridSpec(2, 4, figure=fig)\nax1 = fig.add_subplot(gs[0, :2])\nax2 = fig.add_subplot(gs[0, 2])\nax3 = fig.add_subplot(gs[0, 3])\nax4 = fig.add_subplot(gs[1, :])\n\nsns.boxplot(data=df, x='param_napęd', y='price_value', ax=ax1)\nsns.boxplot(data=df, x='param_faktura-vat__cat', y='price_value', ax=ax2)\nsns.boxplot(data=df, x='param_stan', y='price_value', ax=ax3)\nsns.scatterplot(x=\"param_rok-produkcji\", y=\"price_value\", data=df, alpha=0.1, linewidth=0, ax=ax4);",
"_____no_output_____"
],
[
"!git push origin master",
"Counting objects: 1 \rCounting objects: 4, done.\nDelta compression using up to 2 threads.\nCompressing objects: 25% (1/4) \rCompressing objects: 50% (2/4) \rCompressing objects: 75% (3/4) \rCompressing objects: 100% (4/4) \rCompressing objects: 100% (4/4), done.\nWriting objects: 25% (1/4) \rWriting objects: 50% (2/4) \rWriting objects: 75% (3/4) \rWriting objects: 100% (4/4) \rWriting objects: 100% (4/4), 76.21 KiB | 5.86 MiB/s, done.\nTotal 4 (delta 1), reused 0 (delta 0)\nremote: Resolving deltas: 100% (1/1), completed with 1 local object.\u001b[K\nremote: This repository moved. Please use the new location:\u001b[K\nremote: https://github.com/kmwolowiec/data_workshop.git\u001b[K\nTo https://github.com/ThePearsSon/data_workshop.git\n 874fb89..1c4aeef master -> master\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d04b6c01a7eb6803cbfca2521ca2ec897b5b2d39 | 182,188 | ipynb | Jupyter Notebook | todo/Pattern.ipynb | lucasgiutavares/mylibrary | 30213bd9ffc6ca97232c208b3c03deec8a1989c1 | [
"MIT"
] | 10 | 2017-07-18T20:28:02.000Z | 2021-09-16T02:20:59.000Z | todo/Pattern.ipynb | lucasgiutavares/mylibrary | 30213bd9ffc6ca97232c208b3c03deec8a1989c1 | [
"MIT"
] | null | null | null | todo/Pattern.ipynb | lucasgiutavares/mylibrary | 30213bd9ffc6ca97232c208b3c03deec8a1989c1 | [
"MIT"
] | 14 | 2017-07-19T13:31:39.000Z | 2021-09-16T02:21:01.000Z | 166.686185 | 85,540 | 0.806568 | [
[
[
"# Pattern Mining\n## Library\n",
"_____no_output_____"
]
],
[
[
"source(\"https://raw.githubusercontent.com/eogasawara/mylibrary/master/myPreprocessing.R\")\nloadlibrary(\"arules\")\nloadlibrary(\"arulesViz\")\nloadlibrary(\"arulesSequences\")",
"Loading required package: ggplot2\n\nLoading required package: scales\n\nLoading required package: ggpubr\n\nLoading required package: reshape\n\nLoading required package: caret\n\nLoading required package: lattice\n\nLoading required package: MASS\n\nLoading required package: DMwR\n\nLoading required package: grid\n\nRegistered S3 method overwritten by 'quantmod':\n method from\n as.zoo.data.frame zoo \n\nLoading required package: dplyr\n\n\nAttaching package: ‘dplyr’\n\n\nThe following object is masked from ‘package:MASS’:\n\n select\n\n\nThe following object is masked from ‘package:reshape’:\n\n rename\n\n\nThe following objects are masked from ‘package:stats’:\n\n filter, lag\n\n\nThe following objects are masked from ‘package:base’:\n\n intersect, setdiff, setequal, union\n\n\nLoading required package: arules\n\nLoading required package: Matrix\n\n\nAttaching package: ‘Matrix’\n\n\nThe following object is masked from ‘package:reshape’:\n\n expand\n\n\n\nAttaching package: ‘arules’\n\n\nThe following object is masked from ‘package:dplyr’:\n\n recode\n\n\nThe following objects are masked from ‘package:base’:\n\n abbreviate, write\n\n\nLoading required package: arulesViz\n\nRegistered S3 method overwritten by 'seriation':\n method from \n reorder.hclust gclus\n\nLoading required package: arulesSequences\n\n"
],
[
"data(AdultUCI)\ndim(AdultUCI)\nhead(AdultUCI)",
"_____no_output_____"
]
],
[
[
"## Removing attributes",
"_____no_output_____"
]
],
[
[
"AdultUCI$fnlwgt <- NULL\nAdultUCI$\"education-num\" <- NULL\n",
"_____no_output_____"
]
],
[
[
"## Conceptual Hierarchy and Binning",
"_____no_output_____"
]
],
[
[
"AdultUCI$age <- ordered(cut(AdultUCI$age, c(15,25,45,65,100)),\n labels = c(\"Young\", \"Middle-aged\", \"Senior\", \"Old\"))\n\nAdultUCI$\"hours-per-week\" <- ordered(cut(AdultUCI$\"hours-per-week\",\n c(0,25,40,60,168)),\n labels = c(\"Part-time\", \"Full-time\", \"Over-time\", \"Workaholic\"))\n\nAdultUCI$\"capital-gain\" <- ordered(cut(AdultUCI$\"capital-gain\",\n c(-Inf,0,median(AdultUCI$\"capital-gain\"[AdultUCI$\"capital-gain\">0]),\n Inf)), labels = c(\"None\", \"Low\", \"High\"))\n\nAdultUCI$\"capital-loss\" <- ordered(cut(AdultUCI$\"capital-loss\",\n c(-Inf,0, median(AdultUCI$\"capital-loss\"[AdultUCI$\"capital-loss\">0]),\n Inf)), labels = c(\"None\", \"Low\", \"High\"))\n\nhead(AdultUCI)",
"_____no_output_____"
]
],
[
[
"## Convert to transactions",
"_____no_output_____"
]
],
[
[
"AdultTrans <- as(AdultUCI, \"transactions\")\n",
"_____no_output_____"
]
],
[
[
"## A Priori\n",
"_____no_output_____"
]
],
[
[
"rules <- apriori(AdultTrans, parameter=list(supp = 0.5, conf = 0.9, minlen=2, maxlen= 10, target = \"rules\"), \n appearance=list(rhs = c(\"capital-gain=None\"), default=\"lhs\"), control=NULL)\ninspect(rules)",
"Apriori\n\nParameter specification:\n confidence minval smax arem aval originalSupport maxtime support minlen\n 0.9 0.1 1 none FALSE TRUE 5 0.5 2\n maxlen target ext\n 10 rules TRUE\n\nAlgorithmic control:\n filter tree heap memopt load sort verbose\n 0.1 TRUE TRUE FALSE TRUE 2 TRUE\n\nAbsolute minimum support count: 24421 \n\nset item appearances ...[1 item(s)] done [0.00s].\nset transactions ...[115 item(s), 48842 transaction(s)] done [0.08s].\nsorting and recoding items ... [9 item(s)] done [0.01s].\ncreating transaction tree ... done [0.05s].\nchecking subsets of size 1 2 3 4 done [0.00s].\nwriting ... [18 rule(s)] done [0.00s].\ncreating S4 object ... done [0.01s].\n lhs rhs support confidence coverage lift count\n[1] {hours-per-week=Full-time} => {capital-gain=None} 0.5435895 0.9290688 0.5850907 1.0127342 26550\n[2] {sex=Male} => {capital-gain=None} 0.6050735 0.9051455 0.6684820 0.9866565 29553\n[3] {workclass=Private} => {capital-gain=None} 0.6413742 0.9239073 0.6941976 1.0071078 31326\n[4] {race=White} => {capital-gain=None} 0.7817862 0.9143240 0.8550428 0.9966616 38184\n[5] {native-country=United-States} => {capital-gain=None} 0.8219565 0.9159062 0.8974243 0.9983862 40146\n[6] {capital-loss=None} => {capital-gain=None} 0.8706646 0.9133376 0.9532779 0.9955863 42525\n[7] {capital-loss=None, \n hours-per-week=Full-time} => {capital-gain=None} 0.5191638 0.9259787 0.5606650 1.0093657 25357\n[8] {race=White, \n sex=Male} => {capital-gain=None} 0.5313050 0.9030799 0.5883256 0.9844048 25950\n[9] {sex=Male, \n native-country=United-States} => {capital-gain=None} 0.5406003 0.9035349 0.5983170 0.9849008 26404\n[10] {workclass=Private, \n race=White} => {capital-gain=None} 0.5472339 0.9208931 0.5942427 1.0038221 26728\n[11] {workclass=Private, \n native-country=United-States} => {capital-gain=None} 0.5689570 0.9218444 0.6171942 1.0048592 27789\n[12] {workclass=Private, \n capital-loss=None} => {capital-gain=None} 0.6111748 0.9204465 0.6639982 1.0033354 29851\n[13] {race=White, \n native-country=United-States} => {capital-gain=None} 0.7194628 0.9128933 0.7881127 0.9951019 35140\n[14] {race=White, \n capital-loss=None} => {capital-gain=None} 0.7404283 0.9099693 0.8136849 0.9919147 36164\n[15] {capital-loss=None, \n native-country=United-States} => {capital-gain=None} 0.7793702 0.9117168 0.8548380 0.9938195 38066\n[16] {workclass=Private, \n race=White, \n capital-loss=None} => {capital-gain=None} 0.5204742 0.9171628 0.5674829 0.9997559 25421\n[17] {workclass=Private, \n capital-loss=None, \n native-country=United-States} => {capital-gain=None} 0.5414807 0.9182030 0.5897179 1.0008898 26447\n[18] {race=White, \n capital-loss=None, \n native-country=United-States} => {capital-gain=None} 0.6803980 0.9083504 0.7490480 0.9901500 33232\n"
],
[
"rules_a <- as(rules, \"data.frame\")\nhead(rules_a)",
"_____no_output_____"
]
],
[
[
"## Analysis of Rules",
"_____no_output_____"
]
],
[
[
"imrules <- interestMeasure(rules, transactions = AdultTrans)\nhead(imrules)",
"_____no_output_____"
]
],
[
[
"## Removing redundant rules",
"_____no_output_____"
]
],
[
[
"nrules <- rules[!is.redundant(rules)]",
"_____no_output_____"
],
[
"arules::inspect(nrules)",
" lhs rhs support confidence\n[1] {hours-per-week=Full-time} => {capital-gain=None} 0.5435895 0.9290688 \n[2] {sex=Male} => {capital-gain=None} 0.6050735 0.9051455 \n[3] {workclass=Private} => {capital-gain=None} 0.6413742 0.9239073 \n[4] {race=White} => {capital-gain=None} 0.7817862 0.9143240 \n[5] {native-country=United-States} => {capital-gain=None} 0.8219565 0.9159062 \n[6] {capital-loss=None} => {capital-gain=None} 0.8706646 0.9133376 \n coverage lift count\n[1] 0.5850907 1.0127342 26550\n[2] 0.6684820 0.9866565 29553\n[3] 0.6941976 1.0071078 31326\n[4] 0.8550428 0.9966616 38184\n[5] 0.8974243 0.9983862 40146\n[6] 0.9532779 0.9955863 42525\n"
]
],
[
[
"## Showing the transactions that support the rules\nIn this example, we can see the transactions (trans) that support rules 1. ",
"_____no_output_____"
]
],
[
[
"st <- supportingTransactions(nrules[1], AdultTrans)\ntrans <- unique(st@data@i)\nlength(trans)\nprint(c(length(trans)/length(AdultTrans), nrules[1]@quality$support))",
"_____no_output_____"
]
],
[
[
"Now we can see the transactions (trans) that support rules 1 and 2. \nAs can be observed, the support for both rules is not the sum of the support of each rule. ",
"_____no_output_____"
]
],
[
[
"st <- supportingTransactions(nrules[1:2], AdultTrans)\ntrans <- unique(st@data@i)\nlength(trans)\nprint(c(length(trans)/length(AdultTrans), nrules[1:2]@quality$support))",
"_____no_output_____"
]
],
[
[
"## Rules visualization",
"_____no_output_____"
]
],
[
[
"options(repr.plot.width=7, repr.plot.height=4)\nplot(rules)\n",
"_____no_output_____"
],
[
"options(repr.plot.width=7, repr.plot.height=4)\nplot(rules, method=\"paracoord\", control=list(reorder=TRUE))",
"_____no_output_____"
]
],
[
[
"# Sequence Mining",
"_____no_output_____"
]
],
[
[
"x <- read_baskets(con = system.file(\"misc\", \"zaki.txt\", package = \"arulesSequences\"), info = c(\"sequenceID\",\"eventID\",\"SIZE\"))\nas(x, \"data.frame\")\n",
"_____no_output_____"
],
[
"s1 <- cspade(x, parameter = list(support = 0.4), control = list(verbose = TRUE))\nas(s1, \"data.frame\")",
"\nparameter specification:\nsupport : 0.4\nmaxsize : 10\nmaxlen : 10\n\nalgorithmic control:\nbfstype : FALSE\nverbose : TRUE\nsummary : FALSE\ntidLists : FALSE\n\npreprocessing ... 1 partition(s), 0 MB [0.046s]\nmining transactions ... 0 MB [0.032s]\nreading sequences ... [0.027s]\n\ntotal elapsed time: 0.105s\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
d04b78ff18746d20cbe7d3ad0c537149866a5dd2 | 55,464 | ipynb | Jupyter Notebook | AString Integrals-Copy1.ipynb | SolitonScientific/AtomicString | e81f33d8760025910d2c3f1bb43496372adfefb6 | [
"MIT"
] | null | null | null | AString Integrals-Copy1.ipynb | SolitonScientific/AtomicString | e81f33d8760025910d2c3f1bb43496372adfefb6 | [
"MIT"
] | null | null | null | AString Integrals-Copy1.ipynb | SolitonScientific/AtomicString | e81f33d8760025910d2c3f1bb43496372adfefb6 | [
"MIT"
] | 1 | 2021-04-11T21:43:17.000Z | 2021-04-11T21:43:17.000Z | 201.687273 | 21,384 | 0.892002 | [
[
[
"# %load CommonFunctions.py\n\n\n# # COMMON ATOMIC AND ASTRING FUNCTIONS\n\n# In[14]:\n\n############### One String Pulse with width, shift and scale #############\ndef StringPulse(String1, t: float, a = 1., b = 0., c = 1., d = 0.) -> float:\n x = (t - b)/a\n if (x < -1):\n res = -0.5\n elif (x > 1):\n res = 0.5\n else:\n res = String1(x)\n res = d + res * c\n return res\n\n\n# In[16]:\n\n\n###### Atomic String Applied to list with width, shift and scale #############\ndef String(String1, x: list, a = 1., b = 0., c = 1., d = 0.) -> list:\n res = []\n for i in range(len(x)):\n res.append(StringPulse(String1, x[i], a, b, c, d))\n return res\n\n\n# In[17]:\n\n\n###### Summation of two lists #############\ndef Sum(x1: list, x2: list) -> list:\n res = []\n for i in range(len(x1)):\n res.append(x1[i] + x2[i])\n return res\n\n\n# In[18]:\n\n\n##########################################################\n##This script introduces Atomic Function \n################### One Pulse of atomic function\ndef up1(x: float) -> float:\n #Atomic function table\n up_y = [0.5, 0.48, 0.460000017,0.440000421,0.420003478,0.400016184, 0.380053256, 0.360139056, 0.340308139, 0.320605107,\n 0.301083436, 0.281802850, 0.262826445, 0.244218000, 0.226041554, 0.208361009, 0.191239338, 0.174736305, \n 0.158905389, 0.143991189, 0.129427260, 0.115840866, 0.103044024, 0.9110444278e-01, 0.798444445e-01, 0.694444445e-01, \n 0.598444445e-01, 0.510444877e-01, 0.430440239e-01, 0.358409663e-01, 0.294282603e-01, 0.237911889e-01, 0.189053889e-01, \n 0.147363055e-01, 0.112393379e-01, 0.836100883e-02, 0.604155412e-02, 0.421800000e-02, 0.282644445e-02, 0.180999032e-02, \n 0.108343562e-02, 0.605106267e-03, 0.308138660e-03, 0.139055523e-03, 0.532555251e-04, 0.161841328e-04, 0.347816874e-05, \n 0.420576116e-05, 0.167693347e-07, 0.354008603e-10, 0]\n up_x = np.arange(0.5, 1.01, 0.01)\n\n res = 0.\n if ((x >= 0.5) and (x <= 1)):\n for i in range(len(up_x) - 1):\n if (up_x[i] >= x) and (x < up_x[i+1]):\n N1 = 1 - (x - up_x[i])/0.01\n res = N1 * up_y[i] + (1 - N1) * up_y[i+1]\n return res\n return res\n\n\n# In[19]:\n############### Atomic Function Pulse with width, shift and scale #############\ndef pulse(up1, t: float, a = 1., b = 0., c = 1., d = 0.) -> float:\n x = (t - b)/a\n res = 0.\n if (x >= 0.5) and (x <= 1):\n res = up1(x)\n elif (x >= 0.0) and (x < 0.5):\n res = 1 - up1(1 - x)\n elif (x >= -1 and x <= -0.5):\n res = up1(-x)\n elif (x > -0.5) and (x < 0):\n res = 1 - up1(1 + x)\n res = d + res * c\n return res\n\n############### Atomic Function Applied to list with width, shift and scale #############\ndef up(up1, x: list, a = 1., b = 0., c = 1., d = 0.) -> list:\n res = []\n for i in range(len(x)):\n res.append(pulse(up1, x[i], a, b, c, d))\n return res\n\n############### Atomic String #############\ndef AString1(x: float) -> float:\n res = 1 * (pulse(up1, x/2.0 - 0.5) - 0.5)\n return res\n\n############### Atomic String Pulse with width, shift and scale #############\ndef AStringPulse(t: float, a = 1., b = 0., c = 1., d = 0.) -> float:\n x = (t - b)/a\n if (x < -1):\n res = -0.5\n elif (x > 1):\n res = 0.5\n else:\n res = AString1(x)\n res = d + res * c\n return res\n\n###### Atomic String Applied to list with width, shift and scale #############\ndef AString(x: list, a = 1., b = 0., c = 1., d = 0.) -> list:\n res = []\n for i in range(len(x)):\n res.append(AStringPulse(x[i], a, b, c, d))\n return res\n\n",
"_____no_output_____"
],
[
"import numpy as np\nimport pylab as pl",
"_____no_output_____"
],
[
"x = np.arange(-2.0, 2.0, 0.01)",
"_____no_output_____"
],
[
"pl.title('Atomic Function')\npl.plot(x, up(up1, x), label='Atomic Function')\npl.grid(True)\npl.show()",
"_____no_output_____"
],
[
"pl.title('Atomic String')\npl.plot(x, String(AString1, x, 1.0, 0, 1, 0), label='Atomic String')\npl.grid(True)\npl.show()",
"_____no_output_____"
],
[
"x = np.arange(-4.0, 4.0, 0.01)\ndx = x[1] - x[0]\n\npl.title('Atomic String')\npl.plot(x, String(AString1, x, 1., 0., 1., 1.), label='Atomic String')\n\nIntAString = np.cumsum(String(AString1, x, 1., 0., 1., 1.)) * dx\npl.plot(x, IntAString, label='AString Integral')\n\nInt2AString = np.cumsum(IntAString) * dx\npl.plot(x, Int2AString, label='AString Integral Integral')\n\npl.title('AString with Integrals')\npl.legend(loc='best', numpoints=1)\npl.grid(True)\npl.show()",
"_____no_output_____"
]
],
[
[
"## Summary and Observations",
"_____no_output_____"
],
[
"1) AString Integrals provide smooth curly connections between two straight lines\n\n2) Further integrals provide smooth curly connections between parabolas!!\n\n3) In general, AString integrals can provide smooth connections between any similar shapes!!!",
"_____no_output_____"
]
]
] | [
"code",
"markdown"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
d04b7fa1815dc79b0a54c557c2d08eb5aeaf2845 | 50,498 | ipynb | Jupyter Notebook | docs/!ml/notebooks/Perceptron.ipynb | a-mt/dev-roadmap | 0484e018b2a51019577b0f2caafa6182bce689d1 | [
"MIT"
] | 1 | 2019-10-28T05:40:06.000Z | 2019-10-28T05:40:06.000Z | docs/!ml/notebooks/Perceptron.ipynb | a-mt/dev-roadmap | 0484e018b2a51019577b0f2caafa6182bce689d1 | [
"MIT"
] | null | null | null | docs/!ml/notebooks/Perceptron.ipynb | a-mt/dev-roadmap | 0484e018b2a51019577b0f2caafa6182bce689d1 | [
"MIT"
] | null | null | null | 106.987288 | 10,596 | 0.849816 | [
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"## Load data",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame({\n 'x': [4.5, 4.9, 5.0, 4.8, 5.8, 5.6, 5.7, 5.8],\n 'y': [35, 38, 45, 49, 59, 65, 73, 82],\n 'z': [0, 0, 0, 0, 1, 1, 1, 1]\n})\ndf",
"_____no_output_____"
],
[
"plt.scatter(df['x'], df['y'], c=df['z'])",
"_____no_output_____"
]
],
[
[
"## Train model",
"_____no_output_____"
]
],
[
[
"def fit(X, y, max_epochs=500):\n \"\"\"\n X : numpy 2D array. Each row corresponds to one training example.\n y : numpy 1D array. Label (0 or 1) of each example.\n \"\"\"\n\n n = X.shape[1]\n\n # Initialize weights\n weights = np.zeros((n, ))\n bias = 0.0\n\n for _ in range(max_epochs):\n errors = 0\n\n # Loop through the examples\n for i, xi in enumerate(X):\n\n predict_y = 1 if xi.dot(weights) + bias >= 0 else 0\n error = y[i] - predict_y\n\n # Update weights\n if error != 0:\n weights += error * xi\n bias += error\n\n errors += 1\n\n # We converged\n if errors == 0:\n break\n\n\n return (weights, bias)",
"_____no_output_____"
],
[
"X = df.drop('z', axis=1).values\ny = df['z'].values\n\nweights, bias = fit(X, y)\nweights, bias",
"_____no_output_____"
]
],
[
[
"## Plot predictions",
"_____no_output_____"
]
],
[
[
"def plot_decision_boundary():\n\n # Draw points\n plt.scatter(X[:,0], X[:,1], c=y)\n\n a = -weights[0]/weights[1]\n b = -bias/weights[1]\n\n # Draw hyperplane with margin\n _X = np.arange(X[:,0].min(), X[:,0].max()+1, .1)\n _Y = _X * a + b\n plt.plot(_X, _Y)\n\nplot_decision_boundary()",
"_____no_output_____"
],
[
"def plot_contour():\n\n # Draw points\n plt.scatter(X[:,0], X[:,1], c=y)\n\n x_min, x_max = plt.gca().get_xlim()\n y_min, y_max = plt.gca().get_ylim()\n\n # Draw contour\n xx, yy = np.meshgrid(np.arange(x_min, x_max+.1, .1),\n np.arange(y_min, y_max+.1, .1))\n _X = np.c_[xx.ravel(), yy.ravel()]\n\n Z = np.sign(_X.dot(weights) + bias) \\\n .reshape(xx.shape)\n\n plt.contourf(xx, yy, Z, cmap=plt.cm.Pastel1, alpha=0.3)\n\nplot_contour()",
"_____no_output_____"
]
],
[
[
"## Compare with logistic regression",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LogisticRegression",
"_____no_output_____"
],
[
"model = LogisticRegression(C=1e20, solver='liblinear', random_state=0)\nmodel.fit(X, y)",
"_____no_output_____"
],
[
"weights = model.coef_[0]\nbias = model.intercept_[0]\n\nplot_decision_boundary()",
"_____no_output_____"
]
],
[
[
"## Compare with SVM",
"_____no_output_____"
]
],
[
[
"from sklearn import svm",
"_____no_output_____"
],
[
"model = svm.SVC(kernel='linear', C=1.0)\nmodel.fit(X, y)",
"_____no_output_____"
],
[
"weights = model.coef_[0]\nbias = model.intercept_[0]\n\nplot_decision_boundary()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
d04b8091b9077617804ed4a991dac8f45f5bd91f | 4,149 | ipynb | Jupyter Notebook | notebooks/extra - Gradient Boosting.ipynb | lampsonnguyen/ml-training-advance | 992c8304683879ade23410cfa4478622980ef420 | [
"MIT"
] | null | null | null | notebooks/extra - Gradient Boosting.ipynb | lampsonnguyen/ml-training-advance | 992c8304683879ade23410cfa4478622980ef420 | [
"MIT"
] | null | null | null | notebooks/extra - Gradient Boosting.ipynb | lampsonnguyen/ml-training-advance | 992c8304683879ade23410cfa4478622980ef420 | [
"MIT"
] | 2 | 2018-04-20T03:09:43.000Z | 2021-07-23T05:48:42.000Z | 26.259494 | 169 | 0.585924 | [
[
[
"from preamble import *\n% matplotlib notebook",
"_____no_output_____"
]
],
[
[
"# Gradient Boosting",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.model_selection import train_test_split\ncancer = load_breast_cancer()\n\nX_train, X_test, y_train, y_test = train_test_split(\n cancer.data, cancer.target, random_state=0)\n\ngbrt = GradientBoostingClassifier(random_state=0)\ngbrt.fit(X_train, y_train)\n\nprint(\"accuracy on training set: %f\" % gbrt.score(X_train, y_train))\nprint(\"accuracy on test set: %f\" % gbrt.score(X_test, y_test))",
"_____no_output_____"
],
[
"gbrt = GradientBoostingClassifier(random_state=0, max_depth=1)\ngbrt.fit(X_train, y_train)\n\nprint(\"accuracy on training set: %f\" % gbrt.score(X_train, y_train))\nprint(\"accuracy on test set: %f\" % gbrt.score(X_test, y_test))",
"_____no_output_____"
],
[
"gbrt = GradientBoostingClassifier(random_state=0, learning_rate=0.01)\ngbrt.fit(X_train, y_train)\n\nprint(\"accuracy on training set: %f\" % gbrt.score(X_train, y_train))\nprint(\"accuracy on test set: %f\" % gbrt.score(X_test, y_test))",
"_____no_output_____"
],
[
"gbrt = GradientBoostingClassifier(random_state=0, max_depth=1)\ngbrt.fit(X_train, y_train)\n\nplt.barh(range(cancer.data.shape[1]), gbrt.feature_importances_)\nplt.yticks(range(cancer.data.shape[1]), cancer.feature_names);\nax = plt.gca()\nax.set_position([0.4, .2, .9, .9])",
"_____no_output_____"
],
[
"from xgboost import XGBClassifier\nxgb = XGBClassifier()\nxgb.fit(X_train, y_train)\nprint(\"accuracy on training set: %f\" % xgb.score(X_train, y_train))\nprint(\"accuracy on test set: %f\" % xgb.score(X_test, y_test))",
"_____no_output_____"
],
[
"from xgboost import XGBClassifier\nxgb = XGBClassifier(n_estimators=1000)\nxgb.fit(X_train, y_train)\nprint(\"accuracy on training set: %f\" % xgb.score(X_train, y_train))\nprint(\"accuracy on test set: %f\" % xgb.score(X_test, y_test))",
"_____no_output_____"
]
],
[
[
"# Exercise\nUse GradientBoostingRegressor on the Bike dataset.\nSearch over the ``learning_rate`` and ``max_depth`` using ``GridSearchCV``.\nWhat happens if you change ``n_estimators``?\n\nCompare the speed of XGBClassifier with GradientBoostingRegressor. How well does XGBClassifier do with defaults on the ``Bike`` dataset? Can you make it do better?",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
d04b813279c0922db2295e687a3315359d28e7cb | 220,707 | ipynb | Jupyter Notebook | delft course dr weijermars/stress_tensor.ipynb | rksin8/reservoir-geomechanics | 8bc525306309a0764436ccf4daa9336b882d643f | [
"MIT"
] | 36 | 2020-04-16T08:43:10.000Z | 2022-03-24T11:53:19.000Z | delft course dr weijermars/stress_tensor.ipynb | yohanesnuwara/reservoir-geomechanics | 5ee77d19e63dca3c1e79dd867a83fd0070e768ae | [
"MIT"
] | null | null | null | delft course dr weijermars/stress_tensor.ipynb | yohanesnuwara/reservoir-geomechanics | 5ee77d19e63dca3c1e79dd867a83fd0070e768ae | [
"MIT"
] | 26 | 2020-04-16T08:43:17.000Z | 2022-02-23T12:15:56.000Z | 243.875138 | 54,120 | 0.890226 | [
[
[
"<a href=\"https://colab.research.google.com/github/yohanesnuwara/reservoir-geomechanics/blob/master/delft%20course%20dr%20weijermars/stress_tensor.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"# Introduction to vectors",
"_____no_output_____"
],
[
"Plot vector that has notation (2,4,4). Another vector has notation (1,2,3). Find the direction cosines of each vector, the angles of each vector to the three axes, and the angle between the two vectors!",
"_____no_output_____"
]
],
[
[
"from mpl_toolkits.mplot3d import axes3d\n\nX = np.array((0, 0))\nY= np.array((0, 0))\nZ = np.array((0, 0))\nU = np.array((2, 1))\nV = np.array((4, 2))\nW = np.array((4, 3))\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nax.quiver(X, Y, Z, U, V, W)\nax.set_xlim([-4, 4])\nax.set_ylim([-4, 4])\nax.set_zlim([-4, 4])",
"_____no_output_____"
],
[
"# vector A and B\nA_mag = np.sqrt(((U[0] - X[0])**2) + ((V[0] - Y[0])**2) + ((W[0] - Z[0])**2))\nprint('Magnitude of vector A:', A_mag, 'units')\nB_mag = np.sqrt(((U[1] - X[1])**2) + ((V[1] - Y[1])**2) + ((W[1] - Z[1])**2))\nprint('Magnitude of vector B:', B_mag, 'units')",
"Magnitude of vector A: 6.0 units\nMagnitude of vector B: 3.7416573867739413 units\n"
],
[
"# direction cosines\nl_A = (U[0] - X[0]) / A_mag\nm_A = (V[0] - Y[0]) / A_mag\nn_A = (W[0] - Z[0]) / A_mag\nprint('Direction cosine to x axis (cos alpha):', l_A, \"to y axis (cos beta):\", m_A, \"to z axis (cos gamma):\", n_A)\nprint('Pythagorean Sum of direction cosines of vector A:', l_A**2 + m_A**2 + n_A**2, \"and must be equals to 1\")\nl_B = (U[1] - X[1]) / B_mag\nm_B = (V[1] - Y[1]) / B_mag\nn_B = (W[1] - Z[1]) / B_mag\nprint('Direction cosine to x axis (cos alpha):', l_B, \"to y axis (cos beta):\", m_B, \"to z axis (cos gamma):\", n_B)\nprint('Pythagorean Sum of direction cosines of vector B:', l_B**2 + m_B**2 + n_B**2, \"and must be equals to 1\")\n\n# angles\nalpha_A = np.rad2deg(np.arccos(l_A))\nbeta_A = np.rad2deg(np.arccos(m_A))\ngamma_A = np.rad2deg(np.arccos(n_A))\nprint('Angle to x axis (alpha):', alpha_A, \"to y axis (beta):\", beta_A, \"to z axis (gamma):\", gamma_A)\nalpha_B = np.rad2deg(np.arccos(l_B))\nbeta_B= np.rad2deg(np.arccos(m_B))\ngamma_B = np.rad2deg(np.arccos(n_B))\nprint('Angle to x axis (alpha):', alpha_B, \"to y axis (beta):\", beta_B, \"to z axis (gamma):\", gamma_B)",
"Direction cosine to x axis (cos alpha): 0.3333333333333333 to y axis (cos beta): 0.6666666666666666 to z axis (cos gamma): 0.6666666666666666\nPythagorean Sum of direction cosines of vector A: 1.0 and must be equals to 1\nDirection cosine to x axis (cos alpha): 0.2672612419124244 to y axis (cos beta): 0.5345224838248488 to z axis (cos gamma): 0.8017837257372732\nPythagorean Sum of direction cosines of vector B: 1.0 and must be equals to 1\nAngle to x axis (alpha): 70.52877936550931 to y axis (beta): 48.18968510422141 to z axis (gamma): 48.18968510422141\nAngle to x axis (alpha): 74.498640433063 to y axis (beta): 57.688466762576155 to z axis (gamma): 36.69922520048988\n"
],
[
"# angle between two vectors\ncosine_angle = (l_A * l_B) + (m_A * m_B) + (n_A * n_B)\nangle = np.rad2deg(np.arccos(cosine_angle))\nprint('Angle between vector A and B:', angle, 'degrees')",
"Angle between vector A and B: 11.490459903731518 degrees\n"
]
],
[
[
"# Exercise 10-3. Effective, Normal, and Shear Stress on a Plane",
"_____no_output_____"
],
[
"Consider a plane that makes an angle 60 degrees with $\\sigma_1$ and 60 degrees with $\\sigma_3$. The principal stresses are: -600, -400, -200 MPa. Calculate:\n\n* Total effective stress\n* Normal stress\n* Shear stress",
"_____no_output_____"
]
],
[
[
"# principle stresses \nsigma_1 = -600; sigma_2 = -400; sigma_3 = -200\n\n# calculate the angle of plane to second principal stress sigma 2\n# using pythagorean\nalpha = 60; gamma = 60\nl = np.cos(np.deg2rad(alpha))\nn = np.cos(np.deg2rad(gamma))\nm = np.sqrt(1 - l**2 - n**2)\nbeta = np.rad2deg(np.arccos(m))\nprint(\"The second principal stress sigma 2 makes angle:\", beta, \"degrees to the plane\")\n\n# effective stress\nsigma_eff = np.sqrt(((sigma_1**2) * (l**2)) + ((sigma_2**2) * (m**2)) + ((sigma_3**2) * (n**2)))\nprint(\"The effective stress is:\", -sigma_eff, \"MPa (minus because it's compressive)\")\n\n# normal stress\nsigma_normal = (sigma_1 * (l**2)) + (sigma_2 * (m**2)) + (sigma_3 * (n**2))\nprint(\"The normal stress is:\", sigma_normal, \"MPa\")\n\n# shear stress\nsigma_shear = np.sqrt((sigma_eff**2) - (sigma_normal**2))\nprint(\"The shear stress is:\", sigma_shear, \"MPa\")",
"The second principal stress sigma 2 makes angle: 45.000000000000014 degrees to the plane\nThe effective stress is: -424.26406871192853 MPa (minus because it's compressive)\nThe normal stress is: -400.0 MPa\nThe shear stress is: 141.4213562373095 MPa\n"
]
],
[
[
"# Stress Tensor Components",
"_____no_output_____"
]
],
[
[
"stress_tensor = [[sigma_xx, sigma_xy, sigma_xz],\n [sigma_yx, sigma_yy, sigma_yz],\n [sigma_zx, sigma_zy, sigma_zz]]",
"_____no_output_____"
],
[
"import numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\n\n# point of cube\npoints = np.array([[-5, -5, -5],\n [5, -5, -5 ],\n [5, 5, -5],\n [-5, 5, -5],\n [-5, -5, 5],\n [5, -5, 5 ],\n [5, 5, 5],\n [-5, 5, 5]])\n\n# vector\na = np.array((0, 0))\nb= np.array((0, 0))\nc = np.array((0, 0))\nu = np.array((0, -4))\nv = np.array((5, 0))\nw = np.array((0, -4))\n\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nax.quiver(a, b, c, u, v, w, color='black')\nax.set_xlim([-5, 5])\nax.set_ylim([-5, 5])\nax.set_zlim([-5, 5])\n\nr = [-5,5]\n\nX, Y = np.meshgrid(r, r)\none = np.array([5, 5, 5, 5])\none = one.reshape(2, 2)\n\n\nax.plot_wireframe(X,Y,one, alpha=0.5)\nax.plot_wireframe(X,Y,-one, alpha=0.5)\nax.plot_wireframe(X,-one,Y, alpha=0.5)\nax.plot_wireframe(X,one,Y, alpha=0.5)\nax.plot_wireframe(one,X,Y, alpha=0.5)\nax.plot_wireframe(-one,X,Y, alpha=0.5)\nax.scatter3D(points[:, 0], points[:, 1], points[:, 2])\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Z')\nplt.show()",
"_____no_output_____"
],
[
"np.ones(4)",
"_____no_output_____"
]
],
[
[
"# Exercise 10-7 Total Stress, Deviatoric Stress, Effective Stress, Cauchy Summation\n\n$$\\sigma_{ij}=\\tau_{ij}+P_{ij}$$\n\n$$P_{ij}=P \\cdot \\delta_{ij}$$\n\nPressure is: $P=|\\sigma_{mean}|=|\\frac{\\sigma_{xx}+\\sigma_{yy}+\\sigma_{zz}}{3}|$\n\nKnorecker Delta is: $\\delta_{ij}=\\begin{bmatrix} 1 & 0 & 0 \\\\ 0 & 1 & 0 \\\\ 0 & 0 & 1 \\end{bmatrix}$\n\nPressure tensor is: $P_{ij}=P \\cdot \\delta_{ij}$\n\nSo, overall the total stress is: $\\sigma_{ij}=\\begin{bmatrix} P+\\tau_{xx} & \\tau_{xy} & \\tau_{zx} \\\\ \\tau_{yx} & P+\\tau_{yy} & \\tau_{yz} \\\\ \\tau_{zx} & \\tau_{zy} & P+\\tau_{zz} \\end{bmatrix}$\n\nCauchy summation to calculate the components of effective stress\n\n$$\\sigma_{eff}=\\begin{bmatrix} \\sigma_x \\\\ \\sigma_y \\\\ \\sigma_z \\end{bmatrix}=\\begin{bmatrix} \\sigma_{xx} & \\sigma_{xy} & \\sigma_{xz} \\\\ \\sigma_{yx} & \\sigma_{yy} & \\sigma_{zy} \\\\ \\sigma_{zx} & \\sigma_{zy} & \\sigma_{zz} \\end{bmatrix} \\cdot \\begin{bmatrix} l \\\\ m \\\\ n \\end{bmatrix}$$",
"_____no_output_____"
],
[
"**Known**: direction cosines of plane ABC, total stress tensor.\n\n**Task**:\n\n* Determine the deviatoric stress tensor\n* Calculate the components of effective stress on plane ABC (use Cauchy's summation)\n* Calculate total effective stress, total normal stress, total shear stress ",
"_____no_output_____"
]
],
[
[
"# known\nl, m, n = 0.7, 0.5, 0.5 # direction cosines\nalpha, beta, gamma = 45, 60, 60 # angles\nstress_ij = np.array([[-40, -40, -35],\n [-40, 45, -50],\n [-35, -50, -20]]) # total stress tensor\n\n# calculate pressure\nP = np.abs(np.mean(np.array([(stress_ij[0][0]), (stress_ij[1][1]), (stress_ij[2][2])])))\nprint(\"Pressure:\", P, \"MPa\")\n\n# pressure TENSOR\nkronecker = np.array([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]])\nP_ij = P * kronecker\nprint('Pressure tensor:')\nprint(P_ij)\n\n# deviatoric stress TENSOR\ntau_ij = stress_ij - P_ij\nprint('Deviatoric stress tensor:')\nprint(tau_ij)",
"Pressure: 5.0 MPa\nPressure tensor:\n[[5. 0. 0.]\n [0. 5. 0.]\n [0. 0. 5.]]\nDeviatoric stress tensor:\n[[-45. -40. -35.]\n [-40. 40. -50.]\n [-35. -50. -25.]]\n"
],
[
"# direction cosines VECTOR\nlmn = np.array([[l], \n [m],\n [n]])\n\n# effective stress VECTOR\nstress_eff = np.dot(stress_ij, lmn)\n\nstress_eff_1 = stress_eff[0][0]\nstress_eff_2 = stress_eff[1][0]\nstress_eff_3 = stress_eff[2][0]\n\nprint('Effective stress vector:')\nprint(stress_eff)\nprint('X component of effective stress:', stress_eff_1, 'MPa')\nprint('Y component of effective stress:', stress_eff_2, 'MPa')\nprint('Z component of effective stress:', stress_eff_3, 'MPa')",
"Effective stress vector:\n[[-65.5]\n [-30.5]\n [-59.5]]\nX component of effective stress: -65.5 MPa\nY component of effective stress: -30.5 MPa\nZ component of effective stress: -59.5 MPa\n"
],
[
"# total / magnitude of effective stress, is SCALAR\nsigma_eff = np.sqrt((stress_eff_1**2) + (stress_eff_2**2) + (stress_eff_3**2))\nprint(\"The total effective stress is:\", -sigma_eff, \"MPa\")\n\n# principal stresses\nsigma_1 = stress_eff_1 / l\nsigma_2 = stress_eff_2 / m\nsigma_3 = stress_eff_3 / n\nprint('X component of principal stress:', sigma_1, 'MPa')\nprint('Y component of principal stress:', sigma_2, 'MPa')\nprint('Z component of principal stress:', sigma_3, 'MPa')\n\n# total normal stress\nsigma_normal = (sigma_1 * (l**2)) + (sigma_2 * (m**2)) + (sigma_3 * (n**2))\nprint(\"The normal stress is:\", sigma_normal, \"MPa\")\nprint(\"Because normal stress\", sigma_normal, \"MPa nearly equals to sigma 1\", sigma_1, \"MPa, the plane is nearly normal to sigma 1\")\n\n# total shear stress\nsigma_shear = np.sqrt((sigma_eff**2) - (sigma_normal**2))\nprint(\"The shear stress is:\", sigma_shear, \"MPa\")",
"The total effective stress is: -93.59887819840577 MPa\nX component of principal stress: -93.57142857142858 MPa\nY component of principal stress: -61.0 MPa\nZ component of principal stress: -119.0 MPa\nThe normal stress is: -90.85 MPa\nBecause normal stress -90.85 MPa nearly equals to sigma 1 -93.57142857142858 MPa, the plane is nearly normal to sigma 1\nThe shear stress is: 22.517271149053567 MPa\n"
]
],
[
[
"<div>\n<img src=\"https://user-images.githubusercontent.com/51282928/77084625-cdfbe280-6a31-11ea-9c3f-c4e592d5cfd9.jpeg\" width=\"500\"/>\n</div>",
"_____no_output_____"
],
[
"# Exercise 10-8 Transforming Stress Tensor (Containing all the 9 tensors of shear and normal) to Principal Stress Tensor using Cubic Equation",
"_____no_output_____"
]
],
[
[
"sigma_ij = np.array([[0, 0, 100],\n [0, 0, 0],\n [-100, 0, 0]]) # stress tensor\n# cubic equation\ncoeff3 = 1\ncoeff2 = -((sigma_ij[0][0] + sigma_ij[1][1] + sigma_ij[2][2]))\ncoeff1 = (sigma_ij[0][0] * sigma_ij[1][1]) + (sigma_ij[1][1] * sigma_ij[2][2]) + (sigma_ij[2][2] * sigma_ij[0][0]) - ((sigma_ij[0][1])**2) - ((sigma_ij[1][2])**2) - ((sigma_ij[2][0])**2)\ncoeff0 = -((sigma_ij[0][0] * sigma_ij[1][1] * sigma_ij[2][2]) + (2 * sigma_ij[0][1] * sigma_ij[1][2] * sigma_ij[2][0]) - (sigma_ij[0][0] * (sigma_ij[1][2])**2) - (sigma_ij[1][1] * (sigma_ij[2][0])**2) - (sigma_ij[2][2]* (sigma_ij[0][1])**2))\n\nroots = np.roots([coeff3, coeff2, coeff1, coeff0])\nsigma = np.sort(roots)\nsigma_1 = sigma[2]\nsigma_2 = sigma[1]\nsigma_3 = sigma[0]\nsigma_principal = np.array([[sigma_1, 0, 0],\n [0, sigma_2, 0],\n [0, 0, sigma_3]])\nprint(\"The principal stresses are, sigma 1:\", sigma_1, \"MPa, sigma 2:\", sigma_2, \"MPa, and sigma 3:\", sigma_3, \"MPa\")\nprint(\"Principal stress tensor:\")\nprint(sigma_principal)",
"The principal stresses are, sigma 1: 99.99999999999999 MPa, sigma 2: 0.0 MPa, and sigma 3: -100.00000000000001 MPa\nPrincipal stress tensor:\n[[ 100. 0. 0.]\n [ 0. 0. 0.]\n [ 0. 0. -100.]]\n"
],
[
"denominator_l = (sigma_ij[0][0] * sigma_ij[2][2]) - (sigma_ij[1][1] * sigma_1) - (sigma_ij[2][2] * sigma_1) + (sigma_1)**2 - (sigma_ij[1][2])**2\ndenominator_m = (sigma_2 * sigma_ij[0][1]) + (sigma_ij[2][0] * sigma_ij[1][2]) - (sigma_ij[0][1] * sigma_ij[2][2])\ndenominator_n = (sigma_3 * sigma_ij[2][0]) + (sigma_ij[0][1] * sigma_ij[1][2]) - (sigma_ij[2][0] * sigma_ij[1][1])\ndenominator_l, denominator_m, denominator_n",
"_____no_output_____"
]
],
[
[
"# ***",
"_____no_output_____"
]
],
[
[
"from mpl_toolkits.mplot3d import axes3d\n\nX = np.array((0))\nY= np.array((0))\nU = np.array((0))\nV = np.array((4))\n\nfig, ax = plt.subplots()\nq = ax.quiver(X, Y, U, V,units='xy' ,scale=1)\n\nplt.grid()\n\nax.set_aspect('equal')\n\nplt.xlim(-5,5)\nplt.ylim(-5,5)",
"_____no_output_____"
],
[
"from mpl_toolkits.mplot3d import axes3d\n\nX = np.array((0))\nY= np.array((0))\nZ = np.array((0))\nU = np.array((1))\nV = np.array((1))\nW = np.array((1))\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nax.quiver(X, Y, Z, U, V, W)\nax.set_xlim([-1, 1])\nax.set_ylim([-1, 1])\nax.set_zlim([-1, 1])",
"_____no_output_____"
],
[
"from mpl_toolkits.mplot3d import axes3d\n\nvx_mag = v_mag * l\nvy_mag = v_mag * m\nvz_mag = v_mag * n\nx = 0; y = 0; z = 0\n\nfig = plt.figure()\nax = fig.gca(projection='3d')\nax.quiver(x, y, z, vx_mag, vy_mag, vz_mag)\nax.set_xlim(0, 10); ax.set_ylim(0, 10); ax.set_zlim(0, 5)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
d04b9c8cc74e8052ec0f904fe492ab9e0e007da1 | 17,411 | ipynb | Jupyter Notebook | advanced_functionality/autogluon-tabular/AutoGluon_Tabular_SageMaker.ipynb | phiamazon/amazon-sagemaker-examples | abf3d06d3ea21c5ec425344d517700338a620f8c | [
"Apache-2.0"
] | 3 | 2020-09-10T15:02:36.000Z | 2020-09-13T17:37:23.000Z | advanced_functionality/autogluon-tabular/AutoGluon_Tabular_SageMaker.ipynb | phiamazon/amazon-sagemaker-examples | abf3d06d3ea21c5ec425344d517700338a620f8c | [
"Apache-2.0"
] | 4 | 2020-09-26T01:25:36.000Z | 2021-08-25T16:10:50.000Z | advanced_functionality/autogluon-tabular/AutoGluon_Tabular_SageMaker.ipynb | phiamazon/amazon-sagemaker-examples | abf3d06d3ea21c5ec425344d517700338a620f8c | [
"Apache-2.0"
] | 1 | 2020-09-09T08:35:51.000Z | 2020-09-09T08:35:51.000Z | 28.037037 | 573 | 0.577796 | [
[
[
"# AutoGluon Tabular with SageMaker\n\n[AutoGluon](https://github.com/awslabs/autogluon) automates machine learning tasks enabling you to easily achieve strong predictive performance in your applications. With just a few lines of code, you can train and deploy high-accuracy deep learning models on tabular, image, and text data.\nThis notebook shows how to use AutoGluon-Tabular with Amazon SageMaker by creating custom containers.",
"_____no_output_____"
],
[
"## Prerequisites\n\nIf using a SageMaker hosted notebook, select kernel `conda_mxnet_p36`.",
"_____no_output_____"
]
],
[
[
"# Make sure docker compose is set up properly for local mode\n!./setup.sh",
"_____no_output_____"
],
[
"# Imports\nimport os\nimport boto3\nimport sagemaker\nfrom time import sleep\nfrom collections import Counter\nimport numpy as np\nimport pandas as pd\nfrom sagemaker import get_execution_role, local, Model, utils, fw_utils, s3\nfrom sagemaker.estimator import Estimator\nfrom sagemaker.predictor import RealTimePredictor, csv_serializer, StringDeserializer\nfrom sklearn.metrics import accuracy_score, classification_report\nfrom IPython.core.display import display, HTML\nfrom IPython.core.interactiveshell import InteractiveShell\n\n# Print settings\nInteractiveShell.ast_node_interactivity = \"all\"\npd.set_option('display.max_columns', 500)\npd.set_option('display.max_rows', 10)\n\n# Account/s3 setup\nsession = sagemaker.Session()\nlocal_session = local.LocalSession()\nbucket = session.default_bucket()\nprefix = 'sagemaker/autogluon-tabular'\nregion = session.boto_region_name\nrole = get_execution_role()\nclient = session.boto_session.client(\n \"sts\", region_name=region, endpoint_url=utils.sts_regional_endpoint(region)\n )\naccount = client.get_caller_identity()['Account']\necr_uri_prefix = utils.get_ecr_image_uri_prefix(account, region)\nregistry_id = fw_utils._registry_id(region, 'mxnet', 'py3', account, '1.6.0')\nregistry_uri = utils.get_ecr_image_uri_prefix(registry_id, region)",
"_____no_output_____"
]
],
[
[
"### Build docker images",
"_____no_output_____"
],
[
"First, build autogluon package to copy into docker image.",
"_____no_output_____"
]
],
[
[
"if not os.path.exists('package'):\n !pip install PrettyTable -t package\n !pip install --upgrade boto3 -t package\n !pip install bokeh -t package\n !pip install --upgrade matplotlib -t package\n !pip install autogluon -t package",
"_____no_output_____"
]
],
[
[
"Now build the training/inference image and push to ECR",
"_____no_output_____"
]
],
[
[
"training_algorithm_name = 'autogluon-sagemaker-training'\ninference_algorithm_name = 'autogluon-sagemaker-inference'",
"_____no_output_____"
],
[
"!./container-training/build_push_training.sh {account} {region} {training_algorithm_name} {ecr_uri_prefix} {registry_id} {registry_uri}\n!./container-inference/build_push_inference.sh {account} {region} {inference_algorithm_name} {ecr_uri_prefix} {registry_id} {registry_uri}",
"_____no_output_____"
]
],
[
[
"### Get the data",
"_____no_output_____"
],
[
"In this example we'll use the direct-marketing dataset to build a binary classification model that predicts whether customers will accept or decline a marketing offer. \nFirst we'll download the data and split it into train and test sets. AutoGluon does not require a separate validation set (it uses bagged k-fold cross-validation).",
"_____no_output_____"
]
],
[
[
"# Download and unzip the data\n!aws s3 cp --region {region} s3://sagemaker-sample-data-{region}/autopilot/direct_marketing/bank-additional.zip .\n!unzip -qq -o bank-additional.zip\n!rm bank-additional.zip\n\nlocal_data_path = './bank-additional/bank-additional-full.csv'\ndata = pd.read_csv(local_data_path)\n\n# Split train/test data\ntrain = data.sample(frac=0.7, random_state=42)\ntest = data.drop(train.index)\n\n# Split test X/y\nlabel = 'y'\ny_test = test[label]\nX_test = test.drop(columns=[label])",
"_____no_output_____"
]
],
[
[
"##### Check the data",
"_____no_output_____"
]
],
[
[
"train.head(3)\ntrain.shape\n\ntest.head(3)\ntest.shape\n\nX_test.head(3)\nX_test.shape",
"_____no_output_____"
]
],
[
[
"Upload the data to s3",
"_____no_output_____"
]
],
[
[
"train_file = 'train.csv'\ntrain.to_csv(train_file,index=False)\ntrain_s3_path = session.upload_data(train_file, key_prefix='{}/data'.format(prefix))\n\ntest_file = 'test.csv'\ntest.to_csv(test_file,index=False)\ntest_s3_path = session.upload_data(test_file, key_prefix='{}/data'.format(prefix))\n\nX_test_file = 'X_test.csv'\nX_test.to_csv(X_test_file,index=False)\nX_test_s3_path = session.upload_data(X_test_file, key_prefix='{}/data'.format(prefix))",
"_____no_output_____"
]
],
[
[
"## Hyperparameter Selection\n\nThe minimum required settings for training is just a target label, `fit_args['label']`.\n\nAdditional optional hyperparameters can be passed to the `autogluon.task.TabularPrediction.fit` function via `fit_args`.\n\nBelow shows a more in depth example of AutoGluon-Tabular hyperparameters from the example [Predicting Columns in a Table - In Depth](https://autogluon.mxnet.io/tutorials/tabular_prediction/tabular-indepth.html#model-ensembling-with-stacking-bagging). Please see [fit parameters](https://autogluon.mxnet.io/api/autogluon.task.html?highlight=eval_metric#autogluon.task.TabularPrediction.fit) for further information. Note that in order for hyperparameter ranges to work in SageMaker, values passed to the `fit_args['hyperparameters']` must be represented as strings.\n\n```python\nnn_options = {\n 'num_epochs': \"10\",\n 'learning_rate': \"ag.space.Real(1e-4, 1e-2, default=5e-4, log=True)\",\n 'activation': \"ag.space.Categorical('relu', 'softrelu', 'tanh')\",\n 'layers': \"ag.space.Categorical([100],[1000],[200,100],[300,200,100])\",\n 'dropout_prob': \"ag.space.Real(0.0, 0.5, default=0.1)\"\n}\n\ngbm_options = {\n 'num_boost_round': \"100\",\n 'num_leaves': \"ag.space.Int(lower=26, upper=66, default=36)\"\n}\n\nmodel_hps = {'NN': nn_options, 'GBM': gbm_options} \n\nfit_args = {\n 'label': 'y',\n 'presets': ['best_quality', 'optimize_for_deployment'],\n 'time_limits': 60*10,\n 'hyperparameters': model_hps,\n 'hyperparameter_tune': True,\n 'search_strategy': 'skopt'\n}\n\nhyperparameters = {\n 'fit_args': fit_args,\n 'feature_importance': True\n}\n```\n**Note:** Your hyperparameter choices may affect the size of the model package, which could result in additional time taken to upload your model and complete training. Including `'optimize_for_deployment'` in the list of `fit_args['presets']` is recommended to greatly reduce upload times.\n\n<br>",
"_____no_output_____"
]
],
[
[
"# Define required label and optional additional parameters\nfit_args = {\n 'label': 'y',\n # Adding 'best_quality' to presets list will result in better performance (but longer runtime)\n 'presets': ['optimize_for_deployment'],\n}\n\n# Pass fit_args to SageMaker estimator hyperparameters\nhyperparameters = {\n 'fit_args': fit_args,\n 'feature_importance': True\n}",
"_____no_output_____"
]
],
[
[
"## Train\n\nFor local training set `train_instance_type` to `local` . \nFor non-local training the recommended instance type is `ml.m5.2xlarge`. \n\n**Note:** Depending on how many underlying models are trained, `train_volume_size` may need to be increased so that they all fit on disk.",
"_____no_output_____"
]
],
[
[
"%%time\n\ninstance_type = 'ml.m5.2xlarge'\n#instance_type = 'local'\n\necr_image = f'{ecr_uri_prefix}/{training_algorithm_name}:latest'\n\nestimator = Estimator(image_name=ecr_image,\n role=role,\n train_instance_count=1,\n train_instance_type=instance_type,\n hyperparameters=hyperparameters,\n train_volume_size=100)\n\n# Set inputs. Test data is optional, but requires a label column.\ninputs = {'training': train_s3_path, 'testing': test_s3_path}\n\nestimator.fit(inputs)",
"_____no_output_____"
]
],
[
[
"### Create Model",
"_____no_output_____"
]
],
[
[
"# Create predictor object\nclass AutoGluonTabularPredictor(RealTimePredictor):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, content_type='text/csv', \n serializer=csv_serializer, \n deserializer=StringDeserializer(), **kwargs)",
"_____no_output_____"
],
[
"ecr_image = f'{ecr_uri_prefix}/{inference_algorithm_name}:latest'\n\nif instance_type == 'local':\n model = estimator.create_model(image=ecr_image, role=role)\nelse:\n model_uri = os.path.join(estimator.output_path, estimator._current_job_name, \"output\", \"model.tar.gz\")\n model = Model(model_uri, ecr_image, role=role, sagemaker_session=session, predictor_cls=AutoGluonTabularPredictor)",
"_____no_output_____"
]
],
[
[
"### Batch Transform",
"_____no_output_____"
],
[
"For local mode, either `s3://<bucket>/<prefix>/output/` or `file:///<absolute_local_path>` can be used as outputs.\n\nBy including the label column in the test data, you can also evaluate prediction performance (In this case, passing `test_s3_path` instead of `X_test_s3_path`).",
"_____no_output_____"
]
],
[
[
"output_path = f's3://{bucket}/{prefix}/output/'\n# output_path = f'file://{os.getcwd()}'\n\ntransformer = model.transformer(instance_count=1, \n instance_type=instance_type,\n strategy='MultiRecord',\n max_payload=6,\n max_concurrent_transforms=1, \n output_path=output_path)\n\ntransformer.transform(test_s3_path, content_type='text/csv', split_type='Line')\ntransformer.wait()",
"_____no_output_____"
]
],
[
[
"### Endpoint",
"_____no_output_____"
],
[
"##### Deploy remote or local endpoint",
"_____no_output_____"
]
],
[
[
"instance_type = 'ml.m5.2xlarge'\n#instance_type = 'local'\n\npredictor = model.deploy(initial_instance_count=1, \n instance_type=instance_type)",
"_____no_output_____"
]
],
[
[
"##### Attach to endpoint (or reattach if kernel was restarted)",
"_____no_output_____"
]
],
[
[
"# Select standard or local session based on instance_type\nif instance_type == 'local': \n sess = local_session\nelse: \n sess = session\n\n# Attach to endpoint\npredictor = AutoGluonTabularPredictor(predictor.endpoint, sagemaker_session=sess)",
"_____no_output_____"
]
],
[
[
"##### Predict on unlabeled test data",
"_____no_output_____"
]
],
[
[
"results = predictor.predict(X_test.to_csv(index=False)).splitlines()\n\n# Check output\nprint(Counter(results))",
"_____no_output_____"
]
],
[
[
"##### Predict on data that includes label column \nPrediction performance metrics will be printed to endpoint logs.",
"_____no_output_____"
]
],
[
[
"results = predictor.predict(test.to_csv(index=False)).splitlines()\n\n# Check output\nprint(Counter(results))",
"_____no_output_____"
]
],
[
[
"##### Check that classification performance metrics match evaluation printed to endpoint logs as expected",
"_____no_output_____"
]
],
[
[
"y_results = np.array(results)\n\nprint(\"accuracy: {}\".format(accuracy_score(y_true=y_test, y_pred=y_results)))\nprint(classification_report(y_true=y_test, y_pred=y_results, digits=6))",
"_____no_output_____"
]
],
[
[
"##### Clean up endpoint",
"_____no_output_____"
]
],
[
[
"predictor.delete_endpoint()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d04ba25d4f2f65a94b0569b1ad8f5a896d01e9b5 | 42,318 | ipynb | Jupyter Notebook | hidrokit/contrib_taruma/ipynb/taruma_hk73_bmkg.ipynb | hidrokit/manual | 4e6fc1a2eb93ac43d619f8f485e5bf812514f052 | [
"MIT"
] | null | null | null | hidrokit/contrib_taruma/ipynb/taruma_hk73_bmkg.ipynb | hidrokit/manual | 4e6fc1a2eb93ac43d619f8f485e5bf812514f052 | [
"MIT"
] | null | null | null | hidrokit/contrib_taruma/ipynb/taruma_hk73_bmkg.ipynb | hidrokit/manual | 4e6fc1a2eb93ac43d619f8f485e5bf812514f052 | [
"MIT"
] | null | null | null | 29.906714 | 3,999 | 0.436883 | [
[
[
"<a href=\"https://colab.research.google.com/gist/taruma/b00880905f297013f046dad95dc2e284/taruma_hk73_bmkg.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"Berdasarkan isu [#73](https://github.com/taruma/hidrokit/issues/73): **request: mengolah berkas dari data bmkg**\n\nDeskripsi:\n\n- mengolah berkas excel yang diperoleh dari data online bmkg untuk siap dipakai\n- memeriksa kondisi data\n\nFungsi yang diharapkan:\n\n__Umum / General__\n\n- Memeriksa apakah data lengkap atau tidak? Jika tidak, data apa dan pada tanggal berapa?\n- Memeriksa apakah data tidak ada data / tidak ada pengukuran (9999) atau data tidak diukur (8888)? Jika ada, data apa dan pada tanggal berapa?\n- Menampilkan \"potongan\" baris yang tidak memiliki data / tidak melakukan pengukuran?\n",
"_____no_output_____"
],
[
"# DATASET",
"_____no_output_____"
]
],
[
[
"# AKSES GOOGLE DRIVE \nfrom google.colab import drive\ndrive.mount('/content/gdrive')",
"Drive already mounted at /content/gdrive; to attempt to forcibly remount, call drive.mount(\"/content/gdrive\", force_remount=True).\n"
],
[
"# DRIVE PATH\nDRIVE_DROP_PATH = '/content/gdrive/My Drive/Colab Notebooks/_dropbox'\nDRIVE_DATASET_PATH = '/content/gdrive/My Drive/Colab Notebooks/_dataset/uma_pamarayan'",
"_____no_output_____"
],
[
"DATASET_PATH = DRIVE_DATASET_PATH + '/klimatologi_geofisika_tangerang_1998_2009.xlsx'",
"_____no_output_____"
]
],
[
[
"# FUNGSI",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nfrom operator import itemgetter\nfrom itertools import groupby\n\ndef _read_bmkg(io):\n return pd.read_excel(\n io, skiprows=8, skipfooter=16, header=0, index_col=0, parse_dates=True,\n date_parser=lambda x: pd.to_datetime(x, format='%d-%m-%Y')\n )\n\ndef _have_nan(dataset):\n if dataset.isna().any().any():\n return True\n else:\n return False\n\ndef _get_index1D(array1D_bool):\n return np.argwhere(array1D_bool).reshape(-1,)\n\ndef _get_nan(dataset):\n nan = {}\n\n for col in dataset.columns:\n nan[col] = _get_index1D(dataset[col].isna().values).tolist()\n\n return nan\n\ndef _get_missing(dataset):\n missing = {}\n\n for col in dataset.columns:\n masking = (dataset[col] == 8888) | (dataset[col] == 9999)\n missing[col] = _get_index1D(masking.values)\n \n return missing\n\ndef _check_nan(dataset):\n if _have_nan(dataset):\n return _get_nan(dataset)\n else:\n return None\n\ndef _get_nan_columns(dataset):\n return dataset.columns[dataset.isna().any()].tolist()\n\ndef _group_as_list(array):\n\n # based on https://stackoverflow.com/a/15276206 \n group_list = []\n for _, g in groupby(enumerate(array), lambda x: x[0]-x[1]):\n single_list = sorted(list(map(itemgetter(1), g)))\n group_list.append(single_list)\n \n return group_list\n\ndef _group_as_index(\n group_list, index=None, date_format='%Y%m%d',\n format_date = '{}-{}'\n):\n group_index = []\n date_index = isinstance(index, pd.DatetimeIndex)\n\n for item in group_list:\n if len(item) == 1:\n if date_index:\n group_index.append(index[item[0]].strftime(date_format))\n else:\n group_index.append(index[item[0]])\n else:\n if date_index:\n group_index.append(\n format_date.format(\n index[item[0]].strftime(date_format),\n index[item[-1]].strftime(date_format)\n )\n )\n else:\n group_index.append(\n format_date.format(\n index[item[0]], index[item[-1]]\n )\n )\n \n return group_index",
"_____no_output_____"
]
],
[
[
"# PENGGUNAAN",
"_____no_output_____"
],
[
"## Fungsi `_read_bmkg`\n\nTujuan: Impor berkas excel bmkg ke dataframe",
"_____no_output_____"
]
],
[
[
"dataset = _read_bmkg(DATASET_PATH)\ndataset.head()",
"_____no_output_____"
],
[
"dataset.tail()",
"_____no_output_____"
]
],
[
[
"## Fungsi `_have_nan()`\n\nTujuan: Memeriksa apakah di dalam tabel memiliki nilai yang hilang (np.nan)",
"_____no_output_____"
]
],
[
[
"_have_nan(dataset)",
"_____no_output_____"
]
],
[
[
"## Fungsi `_get_index1D()`\n\nTujuan: Memperoleh index data yang hilang untuk setiap array",
"_____no_output_____"
]
],
[
[
"_get_index1D(dataset['RH_avg'].isna().values)",
"_____no_output_____"
]
],
[
[
"## Fungsi `_get_nan()`\n\nTujuan: Memperoleh index data yang hilang untuk setiap kolom dalam bentuk `dictionary`",
"_____no_output_____"
]
],
[
[
"_get_nan(dataset).keys()",
"_____no_output_____"
],
[
"print(_get_nan(dataset)['RH_avg'])",
"[852, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1220, 1221, 1222, 1223, 1224, 1628, 1629, 1697, 2657]\n"
]
],
[
[
"## Fungsi `_get_nan_columns()`\n\nTujuan: Memperoleh nama kolom yang memiliki nilai yang hilang `NaN`.",
"_____no_output_____"
]
],
[
[
"_get_nan_columns(dataset)",
"_____no_output_____"
]
],
[
[
"## Fungsi `_check_nan()`\n\nTujuan: Gabungan dari `_have_nan()` dan `_get_nan()`. Memeriksa apakah dataset memiliki `NaN`, jika iya, memberikan nilai hasil `_get_nan()`, jika tidak memberikan nilai `None`.",
"_____no_output_____"
]
],
[
[
"_check_nan(dataset).items()",
"_____no_output_____"
],
[
"# Jika tidak memiliki nilai nan\nprint(_check_nan(dataset.drop(_get_nan_columns(dataset), axis=1)))",
"None\n"
]
],
[
[
"## Fungsi `_group_as_list()`\n\nTujuan: Mengelompokkan kelompok array yang bersifat kontinu (nilainya berurutan) dalam masing-masing list.\n\nReferensi: https://stackoverflow.com/a/15276206 (dimodifikasi untuk Python 3.x dan kemudahan membaca) ",
"_____no_output_____"
]
],
[
[
"missing_dict = _get_nan(dataset)\nmissing_RH_avg = missing_dict['RH_avg']\nprint(missing_RH_avg)",
"[852, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1220, 1221, 1222, 1223, 1224, 1628, 1629, 1697, 2657]\n"
],
[
"print(_group_as_list(missing_RH_avg))",
"[[852], [1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067], [1220, 1221, 1222, 1223, 1224], [1628, 1629], [1697], [2657]]\n"
]
],
[
[
"## Fungsi `_group_as_index()`\n\nTujuan: Mengubah hasil pengelompokkan menjadi jenis index dataset (dalam kasus ini dalam bentuk tanggal dibandingkan dalam bentuk angka-index dataset).",
"_____no_output_____"
]
],
[
[
"_group_as_index(_group_as_list(missing_RH_avg), index=dataset.index, date_format='%d %b %Y')",
"_____no_output_____"
]
],
[
[
"## Fungsi `_get_missing()`\n\nTujuan: Memperoleh index yang memiliki nilai tidak terukur (bernilai `8888` atau `9999`) untuk setiap kolomnya",
"_____no_output_____"
]
],
[
[
"_get_missing(dataset)",
"_____no_output_____"
]
],
[
[
"# Penerapan",
"_____no_output_____"
],
[
"## Menampilkan index yang bermasalah\n\nTujuan: Setelah memperoleh index dari hasil `_get_missing()` atau `_get_nan()`, bisa menampilkan potongan index tersebut dalam dataframe.",
"_____no_output_____"
]
],
[
[
"dataset.iloc[_get_missing(dataset)['RR']]",
"_____no_output_____"
],
[
"_group_as_list(_get_missing(dataset)['RR'])",
"_____no_output_____"
],
[
"_group_as_index(_, index=dataset.index, date_format='%d %b %Y', format_date='{} sampai {}')",
"_____no_output_____"
]
],
[
[
"# Changelog\n\n```\n- 20190928 - 1.0.0 - Initial\n```\n\n#### Copyright © 2019 [Taruma Sakti Megariansyah](https://taruma.github.io)\n\nSource code in this notebook is licensed under a [MIT License](https://choosealicense.com/licenses/mit/). Data in this notebook is licensed under a [Creative Common Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0/). ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
d04ba34b2203b2b845671c65ceb5f6103e60ca92 | 240,293 | ipynb | Jupyter Notebook | homework/key-random_walks.ipynb | nishadalal120/NEU-365P-385L-Spring-2021 | eff075482913a6c72737c578f1c5fc42527c12bb | [
"Unlicense"
] | 12 | 2021-01-05T18:26:42.000Z | 2021-03-11T19:26:07.000Z | homework/key-random_walks.ipynb | nishadalal120/NEU-365P-385L-Spring-2021 | eff075482913a6c72737c578f1c5fc42527c12bb | [
"Unlicense"
] | 1 | 2021-04-21T00:57:10.000Z | 2021-04-21T00:57:10.000Z | homework/key-random_walks.ipynb | nishadalal120/NEU-365P-385L-Spring-2021 | eff075482913a6c72737c578f1c5fc42527c12bb | [
"Unlicense"
] | 22 | 2021-01-21T18:52:41.000Z | 2021-04-15T20:22:20.000Z | 876.981752 | 119,332 | 0.953136 | [
[
[
"# Homework - Random Walks (18 pts)",
"_____no_output_____"
],
[
"## Continuous random walk in three dimensions\n\nWrite a program simulating a three-dimensional random walk in a continuous space. Let 1000 independent particles all start at random positions within a cube with corners at (0,0,0) and (1,1,1). At each time step each particle will move in a random direction by a random amount between -1 and 1 along each axis (x, y, z).",
"_____no_output_____"
],
[
"1. (3 pts) Create data structure(s) to store your simulated particle positions for each of 2000 time steps and initialize them with the particles starting positions.",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\nnumTimeSteps = 2000\nnumParticles = 1000\n\npositions = np.zeros( (numParticles, 3, numTimeSteps) )\n\n# initialize starting positions on first time step\npositions[:,:,0] = np.random.random( (numParticles, 3) )",
"_____no_output_____"
]
],
[
[
"2. (3 pts) Write code to run your simulation for 2000 time steps.",
"_____no_output_____"
]
],
[
[
"for t in range(numTimeSteps-1):\n # 2 * [0 to 1] - 1 --> [-1 to 1]\n jumpsForAllParticles = 2 * np.random.random((numParticles, 3)) - 1\n positions[:,:,t+1] = positions[:,:,t] + jumpsForAllParticles",
"_____no_output_____"
],
[
"# just for fun, here's another way to run the simulation above without a loop\njumpsForAllParticlesAndAllTimeSteps = 2 * np.random.random((numParticles, 3, numTimeSteps-1)) - 1\npositions[:,:,1:] = positions[:,:,0].reshape(numParticles, 3, 1) + np.cumsum(jumpsForAllParticlesAndAllTimeSteps, axis=2)",
"_____no_output_____"
]
],
[
[
"3. (3 pts) Generate a series of four 3D scatter plots at selected time points to visually convey what is going on. Arrange the plots in a single row from left to right. Make sure you indicate which time points you are showing.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\n\nlim = 70\nplt.figure(figsize=(12,3))\nfor (i,t) in enumerate([0, 100, 1000, 1999]):\n ax = plt.subplot(1, 4, i+1, projection='3d')\n x = positions[:,0,t]\n y = positions[:,1,t]\n z = positions[:,2,t]\n ax.scatter(x, y, z)\n plt.xlim([-lim, lim])\n plt.ylim([-lim, lim])\n ax.set_zlim([-lim, lim])\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n ax.set_zlabel(\"z\")\n plt.title(f\"Time {t}\");",
"_____no_output_____"
]
],
[
[
"4. (3 pts) Draw the path of a single particle (your choice) across all time steps in a 3D plot.",
"_____no_output_____"
]
],
[
[
"ax = plt.subplot(1, 1, 1, projection='3d')\ni = 10 # particle index\nx = positions[i,0,:]\ny = positions[i,1,:]\nz = positions[i,2,:]\nplt.plot(x, y, z)\nplt.xlabel(\"x\")\nplt.ylabel(\"y\")\nax.set_zlabel(\"z\")\nplt.title(f\"Particle {i}\");",
"_____no_output_____"
]
],
[
[
"5. (3 pts) Find the minimum, maximum, mean and variance for the jump distances of all particles throughout the entire simulation. Jump distance is the euclidean distance moved on each time step $\\sqrt(dx^2+dy^2+dz^2)$. *Hint: numpy makes this very simple.*",
"_____no_output_____"
]
],
[
[
"jumpsXYZForAllParticlesAndAllTimeSteps = positions[:,:,1:] - positions[:,:,:-1]\njumpDistancesForAllParticlesAndAllTimeSteps = np.sqrt(np.sum(jumpsXYZForAllParticlesAndAllTimeSteps**2, axis=1))\n\nprint(f\"min = {jumpDistancesForAllParticlesAndAllTimeSteps.min()}\")\nprint(f\"max = {jumpDistancesForAllParticlesAndAllTimeSteps.max()}\")\nprint(f\"mean = {jumpDistancesForAllParticlesAndAllTimeSteps.mean()}\")\nprint(f\"var = {jumpDistancesForAllParticlesAndAllTimeSteps.var()}\")",
"min = 0.0052364433932233926\nmax = 1.7230154410954457\nmean = 0.9602742572616196\nvar = 0.07749699927626445\n"
]
],
[
[
"6. (3 pts) Repeat the simulation, but this time confine the particles to a unit cell of dimension 10x10x10. Make it so that if a particle leaves one edge of the cell, it enters on the opposite edge (this is the sort of thing most molecular dynamics simulations do). Show plots as in #3 to visualize the simulation (note that most interesting stuff liekly happens in the first 100 time steps).",
"_____no_output_____"
]
],
[
[
"for t in range(numTimeSteps-1):\n # 2 * [0 to 1] - 1 --> [-1 to 1]\n jumpsForAllParticles = 2 * np.random.random((numParticles, 3)) - 1\n positions[:,:,t+1] = positions[:,:,t] + jumpsForAllParticles\n # check for out-of-bounds and warp to opposite bound\n for i in range(numParticles):\n for j in range(3):\n if positions[i,j,t+1] < 0:\n positions[i,j,t+1] += 10\n elif positions[i,j,t+1] > 10:\n positions[i,j,t+1] -= 10",
"_____no_output_____"
],
[
"plt.figure(figsize=(12,3))\nfor (i,t) in enumerate([0, 3, 10, 1999]):\n ax = plt.subplot(1, 4, i+1, projection='3d')\n x = positions[:,0,t]\n y = positions[:,1,t]\n z = positions[:,2,t]\n ax.scatter(x, y, z)\n plt.xlim([0, 10])\n plt.ylim([0, 10])\n ax.set_zlim([0, 10])\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n ax.set_zlabel(\"z\")\n plt.title(f\"Time {t}\");",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
d04bb94ce38479f8a32b91c465b65ebdc76f2078 | 9,992 | ipynb | Jupyter Notebook | malicious_domain_detect.ipynb | aierwiki/ngram-detection | abe1a5cdc29b8d5c34f95a6d0c19edf5931c8dda | [
"MIT"
] | null | null | null | malicious_domain_detect.ipynb | aierwiki/ngram-detection | abe1a5cdc29b8d5c34f95a6d0c19edf5931c8dda | [
"MIT"
] | null | null | null | malicious_domain_detect.ipynb | aierwiki/ngram-detection | abe1a5cdc29b8d5c34f95a6d0c19edf5931c8dda | [
"MIT"
] | null | null | null | 21.396146 | 123 | 0.514912 | [
[
[
"- 使用ngram进行恶意域名识别\n- 参考论文:https://www.researchgate.net/publication/330843380_Malicious_Domain_Names_Detection_Algorithm_Based_on_N_-Gram",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport tldextract\nimport matplotlib.pyplot as plt\nimport os\nimport re\nimport time\nfrom scipy import sparse\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## 加载数据",
"_____no_output_____"
],
[
"- 加载正常域名",
"_____no_output_____"
]
],
[
[
"df_benign_domain = pd.read_csv('top-1m.csv', index_col=0, header=None).reset_index(drop=True)",
"_____no_output_____"
],
[
"df_benign_domain.columns = ['domain']",
"_____no_output_____"
],
[
"df_benign_domain['label'] = 0",
"_____no_output_____"
]
],
[
[
"- 加载恶意域名",
"_____no_output_____"
]
],
[
[
"df_malicious_domain = pd.read_csv('malicious-domain.csv', engine='python', header=None)",
"_____no_output_____"
],
[
"df_malicious_domain = df_malicious_domain[[1]]",
"_____no_output_____"
],
[
"df_malicious_domain.columns = ['domain']",
"_____no_output_____"
],
[
"df_malicious_domain = df_malicious_domain[df_malicious_domain['domain'] != '-']",
"_____no_output_____"
],
[
"df_malicious_domain['label'] = 1",
"_____no_output_____"
],
[
"df_domain = pd.concat([df_benign_domain, df_malicious_domain], axis=0)",
"_____no_output_____"
],
[
"def remove_tld(domain):\n ext = tldextract.extract(domain)\n if ext.subdomain != '':\n domain = ext.subdomain + '.' + ext.domain\n else:\n domain = ext.domain\n return domain",
"_____no_output_____"
],
[
"df_domain['domain'] = df_domain['domain'].map(lambda x: tldextract.extract(x).domain)",
"_____no_output_____"
]
],
[
[
"## 提取ngram特征",
"_____no_output_____"
]
],
[
[
"from sklearn.feature_extraction.text import CountVectorizer",
"_____no_output_____"
],
[
"domain_list = df_domain[df_domain['label'] == 0]['domain'].values.tolist()",
"_____no_output_____"
],
[
"benign_text_str = '.'.join(domain_list)",
"_____no_output_____"
],
[
"benign_text = re.split(r'[.-]', benign_text_str)",
"_____no_output_____"
],
[
"benign_text = list(filter(lambda x: len(x) >= 3, benign_text))",
"_____no_output_____"
],
[
"def get_ngram_weight_dict(benign_text):\n cv = CountVectorizer(ngram_range = (3, 7), analyzer='char', max_features=100000)\n cv.fit(benign_text)\n feature_names = cv.get_feature_names()\n benign_text_vectors = cv.transform(benign_text)\n ngram_count = benign_text_vectors.sum(axis=0)\n window_sizes = np.array(list(map(lambda x: len(x), feature_names)))\n ngram_weights = np.multiply(np.log2(ngram_count), window_sizes)\n ngram_weights = sparse.csr_matrix(ngram_weights)\n feature_names = cv.get_feature_names()\n ngram_weights_dict = dict()\n for ngram, weight in zip(feature_names, ngram_weights.toarray()[0].tolist()):\n ngram_weights_dict[ngram] = weight\n return ngram_weights_dict",
"_____no_output_____"
],
[
"ngram_weights_dict = get_ngram_weight_dict(benign_text)",
"_____no_output_____"
]
],
[
[
"## 计算域名的信誉值",
"_____no_output_____"
]
],
[
[
"def get_reputation_value(ngram_weights_dict, domain):\n if len(domain) < 3:\n return 1000\n domains = re.split(r'[.-]', domain)\n reputation = 0\n domain_len = 0\n for domain in domains:\n domain_len += len(domain)\n for window_size in range(3, 8):\n for i in range(len(domain) - window_size + 1):\n reputation += ngram_weights_dict.get(domain[i:i+window_size], 0)\n reputation = reputation / domain_len\n return reputation",
"_____no_output_____"
],
[
"get_reputation_value(ngram_weights_dict, 'google')",
"_____no_output_____"
],
[
"get_reputation_value(ngram_weights_dict, 'ta0ba0')",
"_____no_output_____"
],
[
"get_reputation_value(ngram_weights_dict, 'dskdjisuowerwdfskdfj000')",
"_____no_output_____"
],
[
"start = time.time()\ndf_domain['reputation'] = df_domain['domain'].map(lambda x: get_reputation_value(ngram_weights_dict, x))\nend = time.time()\nprint('cost time : {}'.format(end - start))",
"cost time : 9.624819040298462\n"
],
[
"df_domain[df_domain['label'] == 0]['reputation'].describe()",
"_____no_output_____"
],
[
"df_domain[df_domain['label'] == 1]['reputation'].describe()",
"_____no_output_____"
]
],
[
[
"## 保存模型文件",
"_____no_output_____"
]
],
[
[
"import joblib",
"_____no_output_____"
],
[
"joblib.dump(ngram_weights_dict, 'ngram_weights_dict.m', compress=4)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
d04bbcc6b614e814af539a112f0c169c892c68b9 | 57,255 | ipynb | Jupyter Notebook | data/data-preparation.ipynb | song-zi/geopandas-tutorial | 5c0012d5747b03233f51d215cd7f74f59823da66 | [
"BSD-3-Clause"
] | 341 | 2018-04-26T08:46:05.000Z | 2022-03-01T08:13:39.000Z | data/data-preparation.ipynb | glenn6452/geopandas-philippines | 2153a940ca6e810d96d82198e3f05a0b34c6e2f5 | [
"BSD-3-Clause"
] | 6 | 2018-05-07T08:11:24.000Z | 2019-12-23T12:53:21.000Z | data/data-preparation.ipynb | glenn6452/geopandas-philippines | 2153a940ca6e810d96d82198e3f05a0b34c6e2f5 | [
"BSD-3-Clause"
] | 128 | 2018-05-07T07:30:29.000Z | 2022-02-19T17:53:39.000Z | 30.982143 | 201 | 0.400646 | [
[
[
"# Data preparation for tutorial\n\nThis notebook contains the code to convert raw downloaded external data into a cleaned or simplified version for tutorial purposes.\n\n\nThe raw data is expected to be in the `./raw` sub-directory (not included in the git repo).",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n\nimport pandas as pd\nimport geopandas",
"_____no_output_____"
]
],
[
[
"## Countries dataset\n\nhttp://www.naturalearthdata.com/downloads/110m-cultural-vectors/110m-admin-0-countries/",
"_____no_output_____"
]
],
[
[
"countries = geopandas.read_file(\"zip://./raw/original_data_ne/ne_110m_admin_0_countries.zip\")",
"_____no_output_____"
],
[
"countries.head()",
"_____no_output_____"
],
[
"len(countries)",
"_____no_output_____"
],
[
"countries_subset = countries[['ADM0_A3', 'NAME', 'CONTINENT', 'POP_EST', 'GDP_MD_EST', 'geometry']]",
"_____no_output_____"
],
[
"countries_subset.columns = countries_subset.columns.str.lower()",
"_____no_output_____"
],
[
"countries_subset = countries_subset.rename(columns={'adm0_a3': 'iso_a3'})",
"_____no_output_____"
],
[
"countries_subset.head()",
"_____no_output_____"
],
[
"countries_subset.to_file(\"ne_110m_admin_0_countries.shp\")",
"_____no_output_____"
]
],
[
[
"## Natural Earth - Cities dataset\n\nhttp://www.naturalearthdata.com/downloads/110m-cultural-vectors/110m-populated-places/ (simple, version 4.0.0, downloaded May 2018)",
"_____no_output_____"
]
],
[
[
"cities = geopandas.read_file(\"zip://./raw/original_data_ne/ne_110m_populated_places_simple.zip\")",
"_____no_output_____"
],
[
"cities.head()",
"_____no_output_____"
],
[
"len(cities)",
"_____no_output_____"
],
[
"cities_subset = cities[['name', 'geometry']]",
"_____no_output_____"
],
[
"cities_subset.head()",
"_____no_output_____"
],
[
"cities_subset.to_file(\"ne_110m_populated_places.shp\")",
"_____no_output_____"
]
],
[
[
"## Natural Earth - Rivers dataset\n\nhttp://www.naturalearthdata.com/downloads/50m-physical-vectors/50m-rivers-lake-centerlines/ (version 4.0.0, downloaded May 2018)",
"_____no_output_____"
]
],
[
[
"rivers = geopandas.read_file(\"zip://./raw/ne_50m_rivers_lake_centerlines.zip\")",
"_____no_output_____"
],
[
"rivers.head()",
"_____no_output_____"
]
],
[
[
"Remove rows with missing geometry:",
"_____no_output_____"
]
],
[
[
"len(rivers)",
"_____no_output_____"
],
[
"rivers = rivers[~rivers.geometry.isna()].reset_index(drop=True)",
"_____no_output_____"
],
[
"len(rivers)",
"_____no_output_____"
]
],
[
[
"Subset of the columns:",
"_____no_output_____"
]
],
[
[
"rivers_subset = rivers[['featurecla', 'name_en', 'geometry']].rename(columns={'name_en': 'name'})",
"_____no_output_____"
],
[
"rivers_subset.head()",
"_____no_output_____"
],
[
"rivers_subset.to_file(\"ne_50m_rivers_lake_centerlines.shp\")",
"_____no_output_____"
]
],
[
[
"## Paris districts",
"_____no_output_____"
],
[
"Source: https://opendata.paris.fr/explore/dataset/quartier_paris/ (downloaded as GeoJSON file on August 20, 2018)\n\nAdministrative districts, polygon dataset",
"_____no_output_____"
]
],
[
[
"districts = geopandas.read_file(\"./raw/quartier_paris.geojson\")",
"_____no_output_____"
],
[
"districts.head()",
"_____no_output_____"
],
[
"districts = districts.rename(columns={'l_qu': 'district_name', 'c_qu': 'id'}).sort_values('id').reset_index(drop=True)",
"_____no_output_____"
]
],
[
[
"Add population data (based on pdfs downloaded from ..):",
"_____no_output_____"
]
],
[
[
"import camelot\n\nimport glob\nfiles = glob.glob(\"../../Downloads/A_*.pdf\")\n\n\nresults = []\n\nfor fname in files:\n print(fname)\n tables = camelot.read_pdf(fname, pages='17-end', flavor='stream')\n\n for t in tables:\n\n df = t.df\n if df.loc[0, 0] == \"1. SUPERFICIES ET DENSITÉS EN 1999\":\n district_name = df.loc[2, 0]\n assert df.loc[10, 0] == 'POPULATION TOTALE EN 1999'\n population = df.loc[10, 1]\n print(t.page)\n print(district_name)\n results.append([district_name, population])\n\ndf = pd.DataFrame(results, columns=['district_name', 'population']) \ndf['population'] = df['population'].str.replace(' ', '').astype('int64') \ndf.to_csv(\"datasets/paris-population.csv\", index=False) ",
"_____no_output_____"
]
],
[
[
"population = pd.read_csv(\"./raw/paris-population.csv\")",
"_____no_output_____"
],
[
"population['temp'] = population.district_name.str.lower()",
"_____no_output_____"
],
[
"population['temp'] = population['temp'].replace({\n 'javel': 'javel 15art',\n 'saint avoye': 'sainte avoie',\n \"saint germain l'auxerrois\": \"st germain l'auxerrois\",\n 'plaine monceau': 'plaine de monceaux',\n 'la chapelle': 'la chapelle'})",
"_____no_output_____"
],
[
"districts['temp'] = (districts.district_name.str.lower().str.replace('-', ' ')\n .str.replace('é', 'e').str.replace('è', 'e').str.replace('ê', 'e').str.replace('ô', 'o'))",
"_____no_output_____"
],
[
"res = pd.merge(districts, population[['population', 'temp']], on='temp', how='outer')",
"_____no_output_____"
],
[
"assert len(res) == len(districts)",
"_____no_output_____"
],
[
"districts = res[['id', 'district_name', 'population', 'geometry']]",
"_____no_output_____"
],
[
"districts.head()",
"_____no_output_____"
],
[
"districts.to_file(\"processed/paris_districts.geojson\", driver='GeoJSON')",
"_____no_output_____"
],
[
"districts = districts.to_crs(epsg=32631)",
"_____no_output_____"
],
[
"districts.to_file(\"paris_districts_utm.geojson\", driver='GeoJSON')",
"_____no_output_____"
]
],
[
[
"## Commerces de Paris",
"_____no_output_____"
],
[
"Source: https://opendata.paris.fr/explore/dataset/commercesparis/ (downloaded as csv file (`commercesparis.csv`) on October 30, 2018)",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv(\"./raw/commercesparis.csv\", sep=';')",
"/home/joris/miniconda3/lib/python3.5/site-packages/IPython/core/interactiveshell.py:2698: DtypeWarning: Columns (10) have mixed types. Specify dtype option on import or set low_memory=False.\n interactivity=interactivity, compiler=compiler, result=result)\n"
],
[
"df.iloc[0]",
"_____no_output_____"
]
],
[
[
"Take subset of the restaurants:",
"_____no_output_____"
]
],
[
[
"restaurants = df[df['CODE ACTIVITE'].str.startswith('CH1', na=False)].copy()",
"_____no_output_____"
],
[
"restaurants['LIBELLE ACTIVITE'].value_counts()",
"_____no_output_____"
],
[
"restaurants = restaurants.dropna(subset=['XY']).reset_index(drop=True)",
"_____no_output_____"
]
],
[
[
"Translate the restaurants and rename column:",
"_____no_output_____"
]
],
[
[
"restaurants['LIBELLE ACTIVITE'] = restaurants['LIBELLE ACTIVITE'].replace({\n 'Restaurant traditionnel français': 'Traditional French restaurant',\n 'Restaurant asiatique': 'Asian restaurant',\n 'Restaurant européen': 'European restuarant',\n 'Restaurant indien, pakistanais et Moyen Orient': 'Indian / Middle Eastern restaurant',\n 'Restaurant maghrébin': 'Maghrebian restaurant',\n 'Restaurant africain': 'African restaurant',\n 'Autre restaurant du monde': 'Other world restaurant',\n 'Restaurant central et sud américain': 'Central and South American restuarant',\n 'Restaurant antillais': 'Caribbean restaurant'\n})",
"_____no_output_____"
],
[
"restaurants = restaurants.rename(columns={'LIBELLE ACTIVITE': 'type'})",
"_____no_output_____"
]
],
[
[
"Create GeoDataFrame",
"_____no_output_____"
]
],
[
[
"from shapely.geometry import Point",
"_____no_output_____"
],
[
"restaurants['geometry'] = restaurants['XY'].str.split(', ').map(lambda x: Point(float(x[1]), float(x[0])))",
"_____no_output_____"
],
[
"restaurants = geopandas.GeoDataFrame(restaurants[['type', 'geometry']], crs={'init': 'epsg:4326'})",
"_____no_output_____"
],
[
"restaurants.head()",
"_____no_output_____"
],
[
"restaurants.to_file(\"processed/paris_restaurants.gpkg\", driver='GPKG')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"raw",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"raw"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
d04bf67b176e0a8922e2bf660a565577ccf34968 | 6,134 | ipynb | Jupyter Notebook | note_jupyter/baks/chapter14.02-Linear-Transformations.ipynb | zmy20062010/numerical_analysis | cbdb39b2ec4339681a6c9454c6cbb4ff3931c914 | [
"MIT"
] | null | null | null | note_jupyter/baks/chapter14.02-Linear-Transformations.ipynb | zmy20062010/numerical_analysis | cbdb39b2ec4339681a6c9454c6cbb4ff3931c914 | [
"MIT"
] | null | null | null | note_jupyter/baks/chapter14.02-Linear-Transformations.ipynb | zmy20062010/numerical_analysis | cbdb39b2ec4339681a6c9454c6cbb4ff3931c914 | [
"MIT"
] | null | null | null | 42.013699 | 959 | 0.598305 | [
[
[
"<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"></ul></div>",
"_____no_output_____"
],
[
"<!--BOOK_INFORMATION-->\n<img align=\"left\" style=\"padding-right:10px;\" src=\"images/book_cover.jpg\" width=\"120\">\n\n*This notebook contains an excerpt from the [Python Programming and Numerical Methods - A Guide for Engineers and Scientists](https://www.elsevier.com/books/python-programming-and-numerical-methods/kong/978-0-12-819549-9), the content is also available at [Berkeley Python Numerical Methods](https://pythonnumericalmethods.berkeley.edu/notebooks/Index.html).*\n\n*The copyright of the book belongs to Elsevier. We also have this interactive book online for a better learning experience. The code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work on [Elsevier](https://www.elsevier.com/books/python-programming-and-numerical-methods/kong/978-0-12-819549-9) or [Amazon](https://www.amazon.com/Python-Programming-Numerical-Methods-Scientists/dp/0128195495/ref=sr_1_1?dchild=1&keywords=Python+Programming+and+Numerical+Methods+-+A+Guide+for+Engineers+and+Scientists&qid=1604761352&sr=8-1)!*",
"_____no_output_____"
],
[
"<!--NAVIGATION-->\n< [14.1 Basics of Linear Algebra](chapter14.01-Basics-of-Linear-Algebra.ipynb) | [Contents](Index.ipynb) | [14.3 Systems of Linear Equations](chapter14.03-Systems-of-Linear-Equations.ipynb) >",
"_____no_output_____"
],
[
"# Linear Transformations",
"_____no_output_____"
],
[
"For vectors $x$ and $y$, and scalars $a$ and $b$, it is sufficient to say that a function, $F$, is a **linear transformation** if \n\n$$\nF(ax + by) = aF(x) + bF(y).\n$$\n\nIt can be shown that multiplying an ${m} \\times {n}$ matrix, $A$, and an ${n} \\times {1}$ vector, $v$, of compatible size is a linear transformation of $v$. Therefore from this point forward, a matrix will be synonymous with a linear transformation function.\n\n**TRY IT!** Let $x$ be a vector and let $F(x)$ be defined by $F(x) = Ax$ where $A$ is a rectangular matrix of appropriate size. Show that $F(x)$ is a linear transformation.\n\nProof:\nSince $F(x) = Ax$, then\nfor vectors $v$ and $w$, and scalars $a$ and $b$, $F(av +\nbw) = A(av + bw)$ (by definition of $F$)$=$$aAv + bAw$ (by\ndistributive property of matrix multiplication)$=$$aF(v) +\nbF(w)$ (by definition of $F$).\n\nIf $A$ is an ${m} \\times {n}$ matrix, then there are two important subpsaces associated with $A$, one is ${\\mathbb{R}}^n$, the other is ${\\mathbb{R}}^m$. The **domain** of $A$ is a subspace of ${\\mathbb{R}}^n$. It is the set of all vectors that can be multiplied by $A$ on the right. The **range** of $A$ is a subspace of ${\\mathbb{R}}^m$. It is the set of all vectors $y$ such that $y=Ax$. It can be denoted as $\\mathcal{R}(\\mathbf{A})$, where $\\mathcal{R}(\\mathbf{A}) = \\{y \\in {\\mathbb{R}}^m: Ax = y\\}$. Another way to think about the range of $A$ is the set of all linear combinations of the columns in $A$, where $x_i$ is the coefficient of the ith column in $A$. The **null space** of $A$, defined as $\\mathcal{N}(\\mathbf{A}) = \\{x \\in {\\mathbb{R}}^n: Ax = 0_m\\}$, is the subset of vectors in the domain of $A, x$, such that $Ax = 0_m$, where $0_m$ is the **zero vector** (i.e., a vector in ${\\mathbb{R}}^m$ with all zeros).\n\n**TRY IT!** Let $A = [[1, 0, 0], [0, 1, 0], [0, 0, 0]]$ and let the domain of $A$ be ${\\mathbb{R}}^3$. Characterize the range and nullspace of $A$.\n \nLet $v = [x,y,z]$ be a vector in ${\\mathbb{R}}^3$. Then $u = Av$ is the vector $u = [x,y,0]$. Since $x,y\\in {\\mathbb{R}}$, the range of $A$ is the $x$-$y$ plane at $z = 0$.\n\nLet $v = [0,0,z]$ for $z\\in {\\mathbb{R}}$. Then $u = Av$ is the vector $u = [0, 0, 0]$. Therefore, the nullspace of $A$ is the $z$-axis (i.e., the set of vectors $[0,0,z]$ $z\\in {\\mathbb{R}}$).\n\nTherefore, this linear transformation \"flattens\" any $z$-component from a vector.",
"_____no_output_____"
],
[
"<!--NAVIGATION-->\n< [14.1 Basics of Linear Algebra](chapter14.01-Basics-of-Linear-Algebra.ipynb) | [Contents](Index.ipynb) | [14.3 Systems of Linear Equations](chapter14.03-Systems-of-Linear-Equations.ipynb) >",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
d04c16652447f5b53913248f775f53331fb1b403 | 23,465 | ipynb | Jupyter Notebook | scrap/MCAnpResearch.ipynb | georg-cantor/pyanp | e3506eeb93b487eeb3f6cd96dfb9aa309c210a57 | [
"MIT"
] | 17 | 2019-01-03T08:11:40.000Z | 2021-12-27T03:19:26.000Z | scrap/MCAnpResearch.ipynb | georg-cantor/pyanp | e3506eeb93b487eeb3f6cd96dfb9aa309c210a57 | [
"MIT"
] | 57 | 2018-03-31T13:18:39.000Z | 2020-12-28T19:37:33.000Z | scrap/MCAnpResearch.ipynb | georg-cantor/pyanp | e3506eeb93b487eeb3f6cd96dfb9aa309c210a57 | [
"MIT"
] | 14 | 2018-03-17T18:31:36.000Z | 2022-03-11T16:52:38.000Z | 30.238402 | 108 | 0.485574 | [
[
[
"# Another attempt at MC Simulation on AHP/ANP",
"_____no_output_____"
],
[
"The ideas are the following:\n\n1. There is a class MCAnp that has a sim() method that will simulate any Prioritizer\n2. MCAnp also has a sim_fill() function that does fills in the data needed for a single simulation",
"_____no_output_____"
],
[
"## Import needed libs",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport sys \nimport os\nsys.path.insert(0, os.path.abspath(\"../\"))\nimport numpy as np\nfrom scipy.stats import triang\nfrom copy import deepcopy\nfrom pyanp.priority import pri_eigen\nfrom pyanp.pairwise import Pairwise\nfrom pyanp.ahptree import AHPTree, AHPTreeNode\nfrom pyanp.direct import Direct",
"_____no_output_____"
]
],
[
[
"# MCAnp class",
"_____no_output_____"
]
],
[
[
"def ascale_mscale(val:(float,int))->float:\n if val is None:\n return 0\n elif val < 0:\n val = -val\n val += 1\n val = 1.0/val\n return val\n else:\n return val+1\n \ndef mscale_ascale(val:(float,int))->float:\n if val == 0:\n return None\n elif val >= 1:\n return val - 1\n else:\n val = 1/val\n val = val-1\n return -val",
"_____no_output_____"
],
[
"DEFAULT_DISTRIB = triang(c=0.5, loc=-1.5, scale=3.0)\ndef avote_random(avote):\n \"\"\"\n Returns a random additive vote in the neighborhood of the additive vote avote\n according to the default disribution DEFAULT_DISTRIB\n \"\"\"\n if avote is None:\n return None\n raw_val = DEFAULT_DISTRIB.rvs(size=1)[0]\n return avote+raw_val\n\n\ndef mvote_random(mvote):\n \"\"\"\n Returns a random multiplicative vote in the neighborhhod of the multiplicative vote mvote\n according to the default distribution DEFAULT_DISTRIB. This is handled by converting\n the multiplicative vote to an additive vote, calling avote_random() and converting the\n result back to an additive vote\n \"\"\"\n avote = mscale_ascale(mvote)\n rval_a = avote_random(avote)\n rval = ascale_mscale(rval_a)\n return rval\n\ndef direct_random(direct, max_percent_chg=0.2)->float:\n \"\"\"\n Returns a random direct data value near the value `direct'. This function\n creates a random percent change, between -max_percent_chg and +max_percent_chg, and\n then changes the direct value by that factor, and returns it.\n \"\"\"\n pchg = np.random.uniform(low=-max_percent_chg, high=max_percent_chg)\n return direct * (1 + pchg)\n \nclass MCAnp:\n def __init__(self):\n # Setup the random pairwise vote generator\n self.pwvote_random = mvote_random\n # Setup the random direct vote generator\n self.directvote_random = direct_random\n # Set the default user to use across the simulation\n # follows the standard from Pairwise class, i.e. it can be a list\n # of usernames, a single username, or None (which means total group average)\n self.username = None\n # What is the pairwise priority calculation?\n self.pwprioritycalc = pri_eigen\n \n def sim_fill(self, src, dest):\n \"\"\"\n Fills in data on a structure prior to doing the simulation calculations.\n This function calls sim_NAME_fill depending on the class of the src object.\n If the dest object is None, we create a dest object by calling deepcopy().\n In either case, we always return the allocated dest object\n \"\"\"\n if dest is None:\n dest = deepcopy(src)\n # Which kind of src do we have\n if isinstance(src, np.ndarray):\n # We are simulating on a pairwise comparison matrix\n return self.sim_pwmat_fill(src, dest)\n elif isinstance(src, Pairwise):\n # We are simulating on a multi-user pairwise comparison object\n return self.sim_pw_fill(src, dest)\n elif isinstance(src, AHPTree):\n # We are simulating on an ahp tree object\n return self.sim_ahptree_fill(src, dest)\n elif isinstance(src, Direct):\n # We are simulating on an ahp direct data\n return self.sim_direct_fill(src, dest)\n else:\n raise ValueError(\"Src class is not handled, it is \"+type(src).__name__)\n \n def sim_pwmat_fill(self, pwsrc:np.ndarray, pwdest:np.ndarray=None)->np.ndarray:\n \"\"\"\n Fills in a pairwise comparison matrix with noisy votes based on pwsrc\n If pwsrc is None, we create a new matrix, otherwise we fill in pwdest\n with noisy values based on pwsrc and the self.pwvote_random parameter.\n In either case, we return the resulting noisy matrix\n \"\"\"\n if pwdest is None:\n pwdest = deepcopy(pwsrc)\n size = len(pwsrc)\n for row in range(size):\n pwdest[row,row] = 1.0\n for col in range(row+1, size):\n val = pwsrc[row,col]\n if val >= 1:\n nvote = self.pwvote_random(val)\n pwdest[row, col]=nvote\n pwdest[col, row]=1/nvote\n elif val!= 0:\n nvote = self.pwvote_random(1/val)\n pwdest[col, row] = nvote\n pwdest[row, col] = 1/nvote\n else:\n pwdest[row, col] = nvote\n pwdest[col, row] = nvote\n return pwdest\n \n def sim_pwmat(self, pwsrc:np.ndarray, pwdest:np.ndarray=None)->np.ndarray:\n \"\"\"\n creates a noisy pw comparison matrix from pwsrc, stores the matrix in pwdest (which\n is created if pwdest is None) calculates the resulting priority and returns that\n \"\"\"\n pwdest = self.sim_pwmat_fill(pwsrc, pwdest)\n rval = self.pwprioritycalc(pwdest)\n return rval\n \n def sim_pw(self, pwsrc:Pairwise, pwdest:Pairwise)->np.ndarray:\n \"\"\"\n Performs a simulation on a pairwise comparison matrix object and returns the\n resulting priorities\n \"\"\"\n pwdest = self.sim_pw_fill(pwsrc, pwdest)\n mat = pwdest.matrix(self.username)\n rval = self.pwprioritycalc(mat)\n return rval\n \n \n def sim_pw_fill(self, pwsrc:Pairwise, pwdest:Pairwise=None)->Pairwise:\n \"\"\"\n Fills in the pairwise comparison structure of pwdest with noisy pairwise data from pwsrc.\n If pwdest is None, we create one first, then fill in. In either case, we return the pwdest\n object with new noisy data in it.\n \"\"\"\n if pwdest is None:\n pwdest = deepcopy(pwsrc)\n for user in pwsrc.usernames():\n srcmat = pwsrc.matrix(user)\n destmat = pwdest.matrix(user)\n self.sim_pwmat_fill(srcmat, destmat)\n return pwdest\n \n def sim_direct_fill(self, directsrc:Direct, directdest:Direct=None)->Direct:\n \"\"\"\n Fills in the direct data structure of directdest with noisy data from directsrc.\n If directdest is None, we create on as a deep copy of directsrc, then fill in.\n In either case, we return the directdest object with new noisy data in it.\n \"\"\"\n if directdest is None:\n directdest = deepcopy(directsrc)\n for altpos in range(len(directdest)):\n orig = directsrc[altpos]\n newvote = self.directvote_random(orig)\n directdest.data[altpos] = newvote\n return directdest\n \n def sim_direct(self, directsrc:Direct, directdest:Direct=None)->np.ndarray:\n \"\"\"\n Simulates for direct data\n \"\"\"\n directdest = self.sim_direct_fill(directsrc, directdest)\n return directdest.priority()\n \n def sim_ahptree_fill(self, ahpsrc:AHPTree, ahpdest:AHPTree)->AHPTree:\n \"\"\"\n Fills in the ahp tree structure of ahpdest with noisy data from ahpsrc.\n If ahpdest is None, we create one as a deepcopy of ahpsrc, then fill in.\n In either case, we return the ahpdest object with new noisy data in it.\n \"\"\"\n if ahpdest is None:\n ahpdest = deepcopy(ahpsrc)\n self.sim_ahptreenode_fill(ahpsrc.root, ahpdest.root)\n return ahpdest\n \n def sim_ahptreenode_fill(self, nodesrc:AHPTreeNode, nodedest:AHPTreeNode)->AHPTreeNode:\n \"\"\"\n Fills in data in an AHPTree\n \"\"\"\n #Okay, first we fill in for the alt_prioritizer\n if nodesrc.alt_prioritizer is not None:\n self.sim_fill(nodesrc.alt_prioritizer, nodedest.alt_prioritizer)\n #Now wefill in the child prioritizer\n if nodesrc.child_prioritizer is not None:\n self.sim_fill(nodesrc.child_prioritizer, nodedest.child_prioritizer)\n #Now for each child, fill in\n for childsrc, childdest in zip(nodesrc.children, nodedest.children):\n self.sim_ahptreenode_fill(childsrc, childdest)\n #We are done, return the dest\n return nodedest\n \n def sim_ahptree(self, ahpsrc:AHPTree, ahpdest:AHPTree)->np.ndarray:\n \"\"\"\n Perform the actual simulation\n \"\"\"\n ahpdest = self.sim_ahptree_fill(ahpsrc, ahpdest)\n return ahpdest.priority()",
"_____no_output_____"
],
[
"mc = MCAnp()",
"_____no_output_____"
],
[
"pw = np.array([\n [1, 1/2, 3],\n [2, 1, 5],\n [1/3, 1/5, 1]\n])\nrpw= mc.sim_pwmat_fill(pw)\nrpw",
"_____no_output_____"
],
[
"[mc.sim_pwmat(pw) for i in range(20)]",
"_____no_output_____"
],
[
"pwobj = Pairwise(alts=['alt '+str(i) for i in range(3)])\npwobj.vote_matrix(user_name='u1', val=pw)",
"_____no_output_____"
]
],
[
[
"## Checking that the deep copy is actually a deep copy\nFor some reason deepcopy was not copying the matrix, I had to overwrite\n__deepcopy__ in Pairwise",
"_____no_output_____"
]
],
[
[
"pwobj.matrix('u1')",
"_____no_output_____"
],
[
"rpwobj = pwobj.__deepcopy__()",
"_____no_output_____"
],
[
"a=rpwobj\nb=pwobj\na.df",
"_____no_output_____"
],
[
"display(a.df.loc['u1', 'Matrix']) \ndisplay(b.df.loc['u1', 'Matrix'])",
"_____no_output_____"
],
[
"display(a.matrix('u1') is b.matrix('u1'))\ndisplay(a.matrix('u1') == b.matrix('u1'))",
"_____no_output_____"
]
],
[
[
"## Now let's try to simulate",
"_____no_output_____"
]
],
[
[
"[mc.sim_pw(pwobj, rpwobj) for i in range(20)]",
"_____no_output_____"
],
[
"pwobj.matrix('u1')",
"_____no_output_____"
]
],
[
[
"## Try to simulate a direct data",
"_____no_output_____"
]
],
[
[
"dd = Direct(alt_names=['a1', 'a2', 'a3'])\ndd.data[0]=0.5\ndd.data[1]=0.3\ndd.data[2]=0.2",
"_____no_output_____"
],
[
"rdd=mc.sim_direct_fill(dd)\nrdd.data",
"_____no_output_____"
]
],
[
[
"## Simulate an ahptree",
"_____no_output_____"
]
],
[
[
"alts=['alt '+str(i) for i in range(3)]\ntree = AHPTree(alt_names=alts)\nkids = ['crit '+str(i) for i in range(4)]\nfor kid in kids:\n tree.add_child(kid)\n node = tree.get_node(kid)\n direct = node.alt_prioritizer\n s = 0\n for alt in alts:\n direct[alt] = np.random.uniform()\n s += direct[alt]\n if s != 0:\n for alt in alts:\n direct[alt] /= s\n ",
"_____no_output_____"
],
[
"tree.priority()",
"_____no_output_____"
],
[
"mc.sim_ahptree(tree, None)",
"_____no_output_____"
],
[
"tree.priority()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
d04c2ea695576feb7487a051e64f4f48e6c3e565 | 18,776 | ipynb | Jupyter Notebook | labs/lab05.ipynb | aoguedao/mat281_2020S2 | 276b694790c21b4ffdaef575b08bacebd884e560 | [
"BSD-3-Clause"
] | 2 | 2021-08-30T15:58:38.000Z | 2021-10-11T20:06:11.000Z | labs/lab05.ipynb | aoguedao/mat281_2020S2 | 276b694790c21b4ffdaef575b08bacebd884e560 | [
"BSD-3-Clause"
] | null | null | null | labs/lab05.ipynb | aoguedao/mat281_2020S2 | 276b694790c21b4ffdaef575b08bacebd884e560 | [
"BSD-3-Clause"
] | 6 | 2020-09-04T18:43:24.000Z | 2021-10-04T03:20:08.000Z | 31.986371 | 1,755 | 0.563965 | [
[
[
"# Laboratorio 5",
"_____no_output_____"
],
[
"## Datos: _European Union lesbian, gay, bisexual and transgender survey (2012)_\n\nLink a los datos [aquí](https://www.kaggle.com/ruslankl/european-union-lgbt-survey-2012).\n\n### Contexto\n\nLa FRA (Agencia de Derechos Fundamentales) realizó una encuesta en línea para identificar cómo las personas lesbianas, gays, bisexuales y transgénero (LGBT) que viven en la Unión Europea y Croacia experimentan el cumplimiento de sus derechos fundamentales. La evidencia producida por la encuesta apoyará el desarrollo de leyes y políticas más efectivas para combatir la discriminación, la violencia y el acoso, mejorando la igualdad de trato en toda la sociedad. La necesidad de una encuesta de este tipo en toda la UE se hizo evidente después de la publicación en 2009 del primer informe de la FRA sobre la homofobia y la discriminación por motivos de orientación sexual o identidad de género, que destacó la ausencia de datos comparables. La Comisión Europea solicitó a FRA que recopilara datos comparables en toda la UE sobre este tema. FRA organizó la recopilación de datos en forma de una encuesta en línea que abarca todos los Estados miembros de la UE y Croacia. Los encuestados eran personas mayores de 18 años, que se identifican como lesbianas, homosexuales, bisexuales o transgénero, de forma anónima. La encuesta se hizo disponible en línea, de abril a julio de 2012, en los 23 idiomas oficiales de la UE (excepto irlandés) más catalán, croata, luxemburgués, ruso y turco. En total, 93,079 personas LGBT completaron la encuesta. Los expertos internos de FRA diseñaron la encuesta que fue implementada por Gallup, uno de los líderes del mercado en encuestas a gran escala. Además, organizaciones de la sociedad civil como ILGA-Europa (Región Europea de la Asociación Internacional de Lesbianas, Gays, Bisexuales, Trans e Intersexuales) y Transgender Europe (TGEU) brindaron asesoramiento sobre cómo acercarse mejor a las personas LGBT.\n\nPuede encontrar más información sobre la metodología de la encuesta en el [__Informe técnico de la encuesta LGBT de la UE. Metodología, encuesta en línea, cuestionario y muestra__](https://fra.europa.eu/sites/default/files/eu-lgbt-survey-technical-report_en.pdf).\n\n### Contenido\n\nEl conjunto de datos consta de 5 archivos .csv que representan 5 bloques de preguntas: vida cotidiana, discriminación, violencia y acoso, conciencia de los derechos, preguntas específicas de personas transgénero.\n\nEl esquema de todas las tablas es idéntico:\n\n* `CountryCode` - name of the country\n* `subset` - Lesbian, Gay, Bisexual women, Bisexual men or Transgender (for Transgender Specific Questions table the value is only Transgender)\n* `question_code` - unique code ID for the question\n* `question_label` - full question text\n* `answer` - answer given\n* `percentage`\n* `notes` - [0]: small sample size; [1]: NA due to small sample size; [2]: missing value\n\nEn el laboratorio de hoy solo utilizaremos los relacionados a la vida cotidiana, disponibles en el archivo `LGBT_Survey_DailyLife.csv` dentro de la carpeta `data`.",
"_____no_output_____"
]
],
[
[
"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n%matplotlib inline",
"_____no_output_____"
],
[
"daily_life_raw = pd.read_csv(os.path.join(\"..\", \"data\", \"LGBT_Survey_DailyLife.csv\"))\ndaily_life_raw.head()",
"_____no_output_____"
],
[
"daily_life_raw.info()",
"_____no_output_____"
],
[
"daily_life_raw.describe(include=\"all\").T",
"_____no_output_____"
],
[
"questions = (\n daily_life_raw.loc[: , [\"question_code\", \"question_label\"]]\n .drop_duplicates()\n .set_index(\"question_code\")\n .squeeze()\n)\nfor idx, value in questions.items():\n print(f\"Question code {idx}:\\n\\n{value}\\n\\n\")",
"_____no_output_____"
]
],
[
[
"### Preprocesamiento de datos",
"_____no_output_____"
],
[
"¿Te fijaste que la columna `percentage` no es numérica? Eso es por los registros con notes `[1]`, por lo que los eliminaremos.",
"_____no_output_____"
]
],
[
[
"daily_life_raw.notes.unique()",
"_____no_output_____"
],
[
"daily_life = (\n daily_life_raw.query(\"notes != ' [1] '\")\n .astype({\"percentage\": \"int\"})\n .drop(columns=[\"question_label\", \"notes\"])\n .rename(columns={\"CountryCode\": \"country\"})\n)\ndaily_life.head()",
"_____no_output_____"
]
],
[
[
"## Ejercicio 1\n\n(1 pto)\n\n¿A qué tipo de dato (nominal, ordinal, discreto, continuo) corresponde cada columna del DataFrame `daily_life`?\n\nRecomendación, mira los valores únicos de cada columna.",
"_____no_output_____"
]
],
[
[
"daily_life.dtypes",
"_____no_output_____"
],
[
"# FREE STYLE #",
"_____no_output_____"
]
],
[
[
"__Respuesta:__\n\n* `country`: \n* `subset`: \n* `question_code`:\n* `answer`: \n* `percentage`: ",
"_____no_output_____"
],
[
"## Ejercicio 2 \n\n(1 pto)\n\nCrea un nuevo dataframe `df1` tal que solo posea registros de Bélgica, la pregunta con código `b1_b` y que hayan respondido _Very widespread_.\n\nAhora, crea un gráfico de barras vertical con la función `bar` de `matplotlib` para mostrar el porcentaje de respuestas por cada grupo. La figura debe ser de tamaño 10 x 6 y el color de las barras verde.",
"_____no_output_____"
]
],
[
[
"print(f\"Question b1_b:\\n\\n{questions['b1_b']}\")",
"_____no_output_____"
],
[
"df1 = # FIX ME #\ndf1",
"_____no_output_____"
],
[
"x = # FIX ME #\ny = # FIX ME #\n\nfig = plt.figure(# FIX ME #)\n\nplt# FIX ME #\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Ejercicio 3\n\n(1 pto)\n\nRespecto a la pregunta con código `g5`, ¿Cuál es el porcentaje promedio por cada valor de la respuesta (notar que la respuestas a las preguntas son numéricas)?",
"_____no_output_____"
]
],
[
[
"print(f\"Question g5:\\n\\n{questions['g5']}\")",
"_____no_output_____"
]
],
[
[
"Crea un DataFrame llamado `df2` tal que:\n\n1. Solo sean registros con la pregunta con código `g5`\n2. Cambia el tipo de la columna `answer` a `int`.\n3. Agrupa por país y respuesta y calcula el promedio a la columna porcentaje (usa `agg`).\n4. Resetea los índices.",
"_____no_output_____"
]
],
[
[
"df2 = (\n # FIX ME #\n)\ndf2",
"_____no_output_____"
]
],
[
[
"Crea un DataFrame llamado `df2_mean` tal que:\n\n1. Agrupa `df2` por respuesta y calcula el promedio del porcentaje.\n2. Resetea los índices.",
"_____no_output_____"
]
],
[
[
"df2_mean = df2.# FIX ME #\ndf2_mean.head()",
"_____no_output_____"
]
],
[
[
"Ahora, grafica lo siguiente:\n\n1. Una figura con dos columnas, tamaño de figura 15 x 12 y que compartan eje x y eje y. Usar `plt.subplots`.\n2. Para el primer _Axe_ (`ax1`), haz un _scatter plot_ tal que el eje x sea los valores de respuestas de `df2`, y el eye y corresponda a los porcentajes de `df2`. Recuerda que en este caso corresponde a promedios por país, por lo que habrán más de 10 puntos en el gráfico..\n3. Para el segundo _Axe_ (`ax2`), haz un gráfico de barras horizontal tal que el eje x sea los valores de respuestas de `df2_mean`, y el eye y corresponda a los porcentajes de `df2_mean`. ",
"_____no_output_____"
]
],
[
[
"x = # FIX ME #\ny = # FIX ME #\n\nx_mean = # FIX ME #s\ny_mean = # FIX ME #\n\n\nfig, (ax1, ax2) = plt.subplots(# FIX ME #)\n\nax1.# FIX ME #\nax1.grid(alpha=0.3)\n\nax2.# FIX ME #\nax2.grid(alpha=0.3)\n\nfig.show()",
"_____no_output_____"
]
],
[
[
"## Ejercicio 4\n\n(1 pto)",
"_____no_output_____"
],
[
"Respecto a la misma pregunta `g5`, cómo se distribuyen los porcentajes en promedio para cada país - grupo?\n\nUtilizaremos el mapa de calor presentado en la clase, para ello es necesario procesar un poco los datos para conformar los elementos que se necesitan.\n\nCrea un DataFrame llamado `df3` tal que:\n\n1. Solo sean registros con la pregunta con código `g5`\n2. Cambia el tipo de la columna `answer` a `int`.\n3. Agrupa por país y subset, luego calcula el promedio a la columna porcentaje (usa `agg`).\n4. Resetea los índices.\n5. Pivotea tal que los índices sean los países, las columnas los grupos y los valores el promedio de porcentajes.\n6. Llena los valores nulos con cero. Usa `fillna`.",
"_____no_output_____"
]
],
[
[
"## Code from:\n# https://matplotlib.org/3.1.1/gallery/images_contours_and_fields/image_annotated_heatmap.html#sphx-glr-gallery-images-contours-and-fields-image-annotated-heatmap-py\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\ndef heatmap(data, row_labels, col_labels, ax=None,\n cbar_kw={}, cbarlabel=\"\", **kwargs):\n \"\"\"\n Create a heatmap from a numpy array and two lists of labels.\n\n Parameters\n ----------\n data\n A 2D numpy array of shape (N, M).\n row_labels\n A list or array of length N with the labels for the rows.\n col_labels\n A list or array of length M with the labels for the columns.\n ax\n A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If\n not provided, use current axes or create a new one. Optional.\n cbar_kw\n A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.\n cbarlabel\n The label for the colorbar. Optional.\n **kwargs\n All other arguments are forwarded to `imshow`.\n \"\"\"\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False,\n labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar\n\n\ndef annotate_heatmap(im, data=None, valfmt=\"{x:.2f}\",\n textcolors=[\"black\", \"white\"],\n threshold=None, **textkw):\n \"\"\"\n A function to annotate a heatmap.\n\n Parameters\n ----------\n im\n The AxesImage to be labeled.\n data\n Data used to annotate. If None, the image's data is used. Optional.\n valfmt\n The format of the annotations inside the heatmap. This should either\n use the string format method, e.g. \"$ {x:.2f}\", or be a\n `matplotlib.ticker.Formatter`. Optional.\n textcolors\n A list or array of two color specifications. The first is used for\n values below a threshold, the second for those above. Optional.\n threshold\n Value in data units according to which the colors from textcolors are\n applied. If None (the default) uses the middle of the colormap as\n separation. Optional.\n **kwargs\n All other arguments are forwarded to each call to `text` used to create\n the text labels.\n \"\"\"\n\n if not isinstance(data, (list, np.ndarray)):\n data = im.get_array()\n\n # Normalize the threshold to the images color range.\n if threshold is not None:\n threshold = im.norm(threshold)\n else:\n threshold = im.norm(data.max())/2.\n\n # Set default alignment to center, but allow it to be\n # overwritten by textkw.\n kw = dict(horizontalalignment=\"center\",\n verticalalignment=\"center\")\n kw.update(textkw)\n\n # Get the formatter in case a string is supplied\n if isinstance(valfmt, str):\n valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)\n\n # Loop over the data and create a `Text` for each \"pixel\".\n # Change the text's color depending on the data.\n texts = []\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)])\n text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)\n texts.append(text)\n\n return texts",
"_____no_output_____"
],
[
"df3 = (\n # FIX ME #\n)\ndf3.head()",
"_____no_output_____"
]
],
[
[
"Finalmente, los ingredientes para el heat map son:",
"_____no_output_____"
]
],
[
[
"countries = df3.index.tolist()\nsubsets = df3.columns.tolist()\nanswers = df3.values",
"_____no_output_____"
]
],
[
[
"El mapa de calor debe ser de la siguiente manera:\n\n* Tamaño figura: 15 x 20\n* cmap = \"YlGn\"\n* cbarlabel = \"Porcentaje promedio (%)\"\n* Precición en las anotaciones: Flotante con dos decimales.",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(# FIX ME #)\n\nim, cbar = heatmap(# FIX ME #\")\ntexts = annotate_heatmap(# FIX ME #)\n\nfig.tight_layout()\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d04c399eeaddedfd304a61bf53dc893874b6768d | 170,062 | ipynb | Jupyter Notebook | talktorials/1_ChEMBL/T1_ChEMBL.ipynb | speleo3/TeachOpenCADD | 0218363bb3a3870882efcb14571b1f63df8380b9 | [
"CC-BY-4.0"
] | 2 | 2021-05-25T02:48:05.000Z | 2022-01-07T23:29:20.000Z | talktorials/1_ChEMBL/T1_ChEMBL.ipynb | speleo3/TeachOpenCADD | 0218363bb3a3870882efcb14571b1f63df8380b9 | [
"CC-BY-4.0"
] | null | null | null | talktorials/1_ChEMBL/T1_ChEMBL.ipynb | speleo3/TeachOpenCADD | 0218363bb3a3870882efcb14571b1f63df8380b9 | [
"CC-BY-4.0"
] | 1 | 2021-05-24T23:09:17.000Z | 2021-05-24T23:09:17.000Z | 90.506652 | 9,643 | 0.697957 | [
[
[
"# Talktorial 1\n\n# Compound data acquisition (ChEMBL)\n\n#### Developed in the CADD seminars 2017 and 2018, AG Volkamer, Charité/FU Berlin \n\nPaula Junge and Svetlana Leng",
"_____no_output_____"
],
[
"## Aim of this talktorial\n\nWe learn how to extract data from ChEMBL:\n\n* Find ligands which were tested on a certain target\n* Filter by available bioactivity data\n* Calculate pIC50 values\n* Merge dataframes and draw extracted molecules\n\n## Learning goals\n\n\n### Theory\n\n* ChEMBL database\n * ChEMBL web services\n * ChEMBL webresource client\n* Compound activity measures\n * IC50\n * pIC50\n\n### Practical\n \nGoal: Get list of compounds with bioactivity data for a given target\n\n* Connect to ChEMBL database\n* Get target data (EGFR kinase)\n* Bioactivity data\n * Download and filter bioactivities\n * Clean and convert\n* Compound data\n * Get list of compounds\n * Prepare output data\n* Output\n * Draw molecules with highest pIC50\n * Write output file\n\n\n## References\n\n* ChEMBL bioactivity database (https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5210557/)\n* ChEMBL web services: <i>Nucleic Acids Res.</i> (2015), <b>43</b>, 612-620 (https://academic.oup.com/nar/article/43/W1/W612/2467881) \n* ChEMBL webrescource client GitHub (https://github.com/chembl/chembl_webresource_client)\n* myChEMBL webservices version 2.x (https://github.com/chembl/mychembl/blob/master/ipython_notebooks/09_myChEMBL_web_services.ipynb)\n* ChEMBL web-interface (https://www.ebi.ac.uk/chembl/)\n* EBI-RDF platform (https://www.ncbi.nlm.nih.gov/pubmed/24413672)\n* IC50 and pIC50 (https://en.wikipedia.org/wiki/IC50)\n* UniProt website (https://www.uniprot.org/)",
"_____no_output_____"
],
[
"_____________________________________________________________________________________________________________________\n\n\n## Theory\n\n### ChEMBL database\n\n* Open large-scale bioactivity database\n* **Current data content (as of 10.2018):**\n * \\>1.8 million distinct compound structures\n * \\>15 million activity values from 1 million assays\n * Assays are mapped to ∼12 000 targets\n* **Data sources** include scientific literature, PubChem bioassays, Drugs for Neglected Diseases Initiative (DNDi), BindingDB database, ...\n* ChEMBL data can be accessed via a [web-interface](https://www.ebi.ac.uk/chembl/), the [EBI-RDF platform](https://www.ncbi.nlm.nih.gov/pubmed/24413672) and the [ChEMBL web services](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4489243/#B5)\n \n \n#### ChEMBL web services\n\n* RESTful web service\n* ChEMBL web service version 2.x resource schema: \n\n[](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4489243/figure/F2/)\n\n*Figure 1:* \n\"ChEMBL web service schema diagram. The oval shapes represent ChEMBL web service resources and the line between two resources indicates that they share a common attribute. The arrow direction shows where the primary information about a resource type can be found. A dashed line indicates the relationship between two resources behaves differently. For example, the `Image` resource provides a graphical based representation of a `Molecule`.\"\nFigure and description taken from: [<i>Nucleic Acids Res.</i> (2015), <b>43</b>, 612-620](https://academic.oup.com/nar/article/43/W1/W612/2467881).\n\n\n#### ChEMBL webresource client\n\n* Python client library for accessing ChEMBL data\n* Handles interaction with the HTTPS protocol\n* Lazy evaluation of results -> reduced number of network requests\n\n### Compound activity measures\n\n#### IC50 \n\n* [Half maximal inhibitory concentration](https://en.wikipedia.org/wiki/IC50)\n* Indicates how much of a particular drug or other substance is needed to inhibit a given biological process by half\n\n[<img src=\"https://upload.wikimedia.org/wikipedia/commons/8/81/Example_IC50_curve_demonstrating_visually_how_IC50_is_derived.png\" width=\"450\" align=\"center\" >](https://commons.wikimedia.org/wiki/File:Example_IC50_curve_demonstrating_visually_how_IC50_is_derived.png)\n\n*Figure 2:* Visual demonstration of how to derive an IC50 value: Arrange data with inhibition on vertical axis and log(concentration) on horizontal axis; then identify max and min inhibition; then the IC50 is the concentration at which the curve passes through the 50% inhibition level.\n\n#### pIC50\n\n* To facilitate the comparison of IC50 values, we define pIC50 values on a logarithmic scale, such that <br />\n $ pIC_{50} = -log_{10}(IC_{50}) $ where $ IC_{50}$ is specified in units of M.\n* Higher pIC50 values indicate exponentially greater potency of the drug\n* pIC50 is given in terms of molar concentration (mol/L or M) <br />\n * IC50 should be specified in M to convert to pIC50 \n * For nM: $pIC_{50} = -log_{10}(IC_{50}*10^{-9})= 9-log_{10}(IC_{50}) $\n \nBesides, IC50 and pIC50, other bioactivity measures are used, such as the equilibrium constant [KI](https://en.wikipedia.org/wiki/Equilibrium_constant) and the half maximal effective concentration [EC50](https://en.wikipedia.org/wiki/EC50).",
"_____no_output_____"
],
[
"## Practical\n\nIn the following, we want to download all molecules that have been tested against our target of interest, the EGFR kinase.\n\n### Connect to ChEMBL database",
"_____no_output_____"
],
[
"First, the ChEMBL webresource client as well as other python libraries are imported.",
"_____no_output_____"
]
],
[
[
"from chembl_webresource_client.new_client import new_client\nimport pandas as pd\nimport math\nfrom rdkit.Chem import PandasTools",
"/home/andrea/anaconda2/envs/cadd-py36/lib/python3.6/site-packages/grequests.py:21: MonkeyPatchWarning: Monkey-patching ssl after ssl has already been imported may lead to errors, including RecursionError on Python 3.6. It may also silently lead to incorrect behaviour on Python 3.7. Please monkey-patch earlier. See https://github.com/gevent/gevent/issues/1016. Modules that had direct imports (NOT patched): ['urllib3.contrib.pyopenssl (/home/andrea/anaconda2/envs/cadd-py36/lib/python3.6/site-packages/urllib3/contrib/pyopenssl.py)', 'urllib3.util (/home/andrea/anaconda2/envs/cadd-py36/lib/python3.6/site-packages/urllib3/util/__init__.py)']. \n curious_george.patch_all(thread=False, select=False)\n"
]
],
[
[
"Create resource objects for API access.",
"_____no_output_____"
]
],
[
[
"targets = new_client.target\ncompounds = new_client.molecule\nbioactivities = new_client.activity",
"_____no_output_____"
]
],
[
[
"## Target data\n\n* Get UniProt-ID (http://www.uniprot.org/uniprot/P00533) of the target of interest (EGFR kinase) from UniProt website (https://www.uniprot.org/)\n* Use UniProt-ID to get target information\n* Select a different UniProt-ID if you are interested into another target",
"_____no_output_____"
]
],
[
[
"uniprot_id = 'P00533'\n# Get target information from ChEMBL but restrict to specified values only\ntarget_P00533 = targets.get(target_components__accession=uniprot_id) \\\n .only('target_chembl_id', 'organism', 'pref_name', 'target_type')\nprint(type(target_P00533))\npd.DataFrame.from_records(target_P00533)",
"<class 'chembl_webresource_client.query_set.QuerySet'>\n"
]
],
[
[
"### After checking the entries, we select the first entry as our target of interest\n`CHEMBL203`: It is a single protein and represents the human Epidermal growth factor receptor (EGFR, also named erbB1) ",
"_____no_output_____"
]
],
[
[
"target = target_P00533[0]\ntarget",
"_____no_output_____"
]
],
[
[
"Save selected ChEMBL-ID.",
"_____no_output_____"
]
],
[
[
"chembl_id = target['target_chembl_id']\nchembl_id",
"_____no_output_____"
]
],
[
[
"### Bioactivity data\n\nNow, we want to query bioactivity data for the target of interest.\n\n#### Download and filter bioactivities for the target",
"_____no_output_____"
],
[
"In this step, we download and filter the bioactivity data and only consider\n\n* human proteins\n* bioactivity type IC50\n* exact measurements (relation '=') \n* binding data (assay type 'B')",
"_____no_output_____"
]
],
[
[
"bioact = bioactivities.filter(target_chembl_id = chembl_id) \\\n .filter(type = 'IC50') \\\n .filter(relation = '=') \\\n .filter(assay_type = 'B') \\\n .only('activity_id','assay_chembl_id', 'assay_description', 'assay_type', \\\n 'molecule_chembl_id', 'type', 'units', 'relation', 'value', \\\n 'target_chembl_id', 'target_organism')\nlen(bioact), len(bioact[0]), type(bioact), type(bioact[0])",
"_____no_output_____"
]
],
[
[
"If you experience difficulties to query the ChEMBL database, we provide here a file containing the results for the query in the previous cell (11 April 2019). We do this using the Python package pickle which serializes Python objects so they can be saved to a file, and loaded in a program again later on.\n(Learn more about object serialization on [DataCamp](https://www.datacamp.com/community/tutorials/pickle-python-tutorial))\n\nYou can load the \"pickled\" compounds by uncommenting and running the next cell.",
"_____no_output_____"
]
],
[
[
"#import pickle\n#bioact = pickle.load(open(\"../data/T1/EGFR_compounds_from_chembl_query_20190411.p\", \"rb\"))",
"_____no_output_____"
]
],
[
[
"#### Clean and convert bioactivity data\n\nThe data is stored as a list of dictionaries",
"_____no_output_____"
]
],
[
[
"bioact[0]",
"_____no_output_____"
]
],
[
[
"Convert to pandas dataframe (this might take some minutes).",
"_____no_output_____"
]
],
[
[
"bioact_df = pd.DataFrame.from_records(bioact)\nbioact_df.head(10)",
"_____no_output_____"
],
[
"bioact_df.shape",
"_____no_output_____"
]
],
[
[
"Delete entries with missing values.",
"_____no_output_____"
]
],
[
[
"bioact_df = bioact_df.dropna(axis=0, how = 'any')\nbioact_df.shape",
"_____no_output_____"
]
],
[
[
"Delete duplicates:\nSometimes the same molecule (`molecule_chembl_id`) has been tested more than once, in this case, we only keep the first one.",
"_____no_output_____"
]
],
[
[
"bioact_df = bioact_df.drop_duplicates('molecule_chembl_id', keep = 'first')\nbioact_df.shape",
"_____no_output_____"
]
],
[
[
"We would like to only keep bioactivity data measured in molar units. The following print statements will help us to see what units are contained and to control what is kept after dropping some rows.",
"_____no_output_____"
]
],
[
[
"print(bioact_df.units.unique())\nbioact_df = bioact_df.drop(bioact_df.index[~bioact_df.units.str.contains('M')])\nprint(bioact_df.units.unique())\nbioact_df.shape",
"['uM' 'nM' 'M' \"10'1 ug/ml\" 'ug ml-1' \"10'-1microM\" \"10'1 uM\"\n \"10'-1 ug/ml\" \"10'-2 ug/ml\" \"10'2 uM\" '/uM' \"10'-6g/ml\" 'mM' 'umol/L'\n 'nmol/L']\n['uM' 'nM' 'M' \"10'-1microM\" \"10'1 uM\" \"10'2 uM\" '/uM' 'mM']\n"
]
],
[
[
"Since we deleted some rows, but we want to iterate over the index later, we reset index to be continuous.",
"_____no_output_____"
]
],
[
[
"bioact_df = bioact_df.reset_index(drop=True) \nbioact_df.head()",
"_____no_output_____"
]
],
[
[
"To allow further comparison of the IC50 values, we convert all units to nM. First, we write a helper function, which can be applied to the whole dataframe in the next step.",
"_____no_output_____"
]
],
[
[
"def convert_to_NM(unit, bioactivity):\n# c=0\n# for i, unit in enumerate(bioact_df.units):\n if unit != \"nM\": \n if unit == \"pM\":\n value = float(bioactivity)/1000\n elif unit == \"10'-11M\":\n value = float(bioactivity)/100\n elif unit == \"10'-10M\":\n value = float(bioactivity)/10\n elif unit == \"10'-8M\":\n value = float(bioactivity)*10\n elif unit == \"10'-1microM\" or unit == \"10'-7M\":\n value = float(bioactivity)*100\n elif unit == \"uM\" or unit == \"/uM\" or unit == \"10'-6M\":\n value = float(bioactivity)*1000\n elif unit == \"10'1 uM\":\n value = float(bioactivity)*10000\n elif unit == \"10'2 uM\":\n value = float(bioactivity)*100000\n elif unit == \"mM\":\n value = float(bioactivity)*1000000\n elif unit == \"M\":\n value = float(bioactivity)*1000000000\n else:\n print ('unit not recognized...', unit)\n return value\n else: return bioactivity",
"_____no_output_____"
],
[
"bioactivity_nM = []\nfor i, row in bioact_df.iterrows():\n bioact_nM = convert_to_NM(row['units'], row['value'])\n bioactivity_nM.append(bioact_nM)\nbioact_df['value'] = bioactivity_nM\nbioact_df['units'] = 'nM'\nbioact_df.head()",
"_____no_output_____"
]
],
[
[
"### Compound data\n\nWe have a data frame containing all molecules tested (with the respective measure) against EGFR. Now, we want to get the molecules that are stored behind the respective ChEMBL IDs. ",
"_____no_output_____"
],
[
"#### Get list of compounds\n\nLet's have a look at the compounds from ChEMBL we have defined bioactivity data for. First, we retrieve ChEMBL ID and structures for the compounds with desired bioactivity data.",
"_____no_output_____"
]
],
[
[
"cmpd_id_list = list(bioact_df['molecule_chembl_id'])\ncompound_list = compounds.filter(molecule_chembl_id__in = cmpd_id_list) \\\n .only('molecule_chembl_id','molecule_structures')",
"_____no_output_____"
]
],
[
[
"Then, we convert the list to a pandas dataframe and delete duplicates (again, the pandas from_records function might take some time).",
"_____no_output_____"
]
],
[
[
"compound_df = pd.DataFrame.from_records(compound_list)\ncompound_df = compound_df.drop_duplicates('molecule_chembl_id', keep = 'first')\nprint(compound_df.shape)\nprint(bioact_df.shape)\ncompound_df.head()",
"(4780, 2)\n(4780, 11)\n"
]
],
[
[
"So far, we have multiple different molecular structure representations. We only want to keep the canonical SMILES.",
"_____no_output_____"
]
],
[
[
"for i, cmpd in compound_df.iterrows():\n if compound_df.loc[i]['molecule_structures'] != None:\n compound_df.loc[i]['molecule_structures'] = cmpd['molecule_structures']['canonical_smiles']\n\nprint (compound_df.shape)",
"(4780, 2)\n"
]
],
[
[
"#### Prepare output data",
"_____no_output_____"
],
[
"Merge values of interest in one dataframe on ChEMBL-IDs:\n* ChEMBL-IDs\n* SMILES\n* units\n* IC50",
"_____no_output_____"
]
],
[
[
"output_df = pd.merge(bioact_df[['molecule_chembl_id','units','value']], compound_df, on='molecule_chembl_id')\nprint(output_df.shape)\noutput_df.head()",
"(4780, 4)\n"
]
],
[
[
"For distinct column names, we rename IC50 and SMILES columns.",
"_____no_output_____"
]
],
[
[
"output_df = output_df.rename(columns= {'molecule_structures':'smiles', 'value':'IC50'})\noutput_df.shape",
"_____no_output_____"
]
],
[
[
"If we do not have a SMILES representation of a compound, we can not further use it in the following talktorials. Therefore, we delete compounds without SMILES column.",
"_____no_output_____"
]
],
[
[
"output_df = output_df[~output_df['smiles'].isnull()]\nprint(output_df.shape)\noutput_df.head()",
"(4771, 4)\n"
]
],
[
[
"In the next cell, you see that the low IC50 values are difficult to read. Therefore, we prefer to convert the IC50 values to pIC50.",
"_____no_output_____"
]
],
[
[
"output_df = output_df.reset_index(drop=True)\nic50 = output_df.IC50.astype(float) \nprint(len(ic50))\nprint(ic50.head(10))",
"4771\n0 41.0\n1 170.0\n2 9300.0\n3 500000.0\n4 3000000.0\n5 96000.0\n6 5310.0\n7 264000.0\n8 125.0\n9 35000.0\nName: IC50, dtype: float64\n"
],
[
"# Convert IC50 to pIC50 and add pIC50 column:\npIC50 = pd.Series() \ni = 0\nwhile i < len(output_df.IC50):\n value = 9 - math.log10(ic50[i]) # pIC50=-log10(IC50 mol/l) --> for nM: -log10(IC50*10**-9)= 9-log10(IC50)\n if value < 0:\n print(\"Negative pIC50 value at index\"+str(i))\n pIC50.at[i] = value\n i += 1\n \noutput_df['pIC50'] = pIC50\noutput_df.head()",
"_____no_output_____"
]
],
[
[
"### Collected bioactivity data for EGFR",
"_____no_output_____"
],
[
"Let's have a look at our collected data set.\n#### Draw molecules\nIn the next steps, we add a molecule column to our datafame and look at the structures of the molecules with the highest pIC50 values. ",
"_____no_output_____"
]
],
[
[
"PandasTools.AddMoleculeColumnToFrame(output_df, smilesCol='smiles')",
"_____no_output_____"
]
],
[
[
"Sort molecules by pIC50.",
"_____no_output_____"
]
],
[
[
"output_df.sort_values(by=\"pIC50\", ascending=False, inplace=True)\noutput_df.reset_index(drop=True, inplace=True)",
"_____no_output_____"
]
],
[
[
"Show the most active molecules = molecules with the highest pIC50 values.",
"_____no_output_____"
]
],
[
[
"output_df.drop(\"smiles\", axis=1).head()",
"_____no_output_____"
]
],
[
[
"#### Write output file\nTo use the data for the following talktorials, we save the data as csv file. Note that it is advisable to drop the molecule column (only contains an image of the molecules) when saving the data.",
"_____no_output_____"
]
],
[
[
"output_df.drop(\"ROMol\", axis=1).to_csv(\"../data/T1/EGFR_compounds.csv\")",
"_____no_output_____"
]
],
[
[
"## Discussion",
"_____no_output_____"
],
[
"In this tutorial, we collected all available bioactivity data for our target of interest from the ChEMBL database. We filtered the data set to only contain molecules with measured IC50 or pIC50 bioactivity values. \n\nBe aware that ChEMBL data originates from various sources. Compound data has been generated in different labs by different people all over the world. Therefore, we have to be cautious with the predictions we make using this dataset. It is always important to consider the source of the data and consistency of data production assays when interpreting the results and determining how much confidence we have in our predictions.\n\nIn the next tutorials we will filter our acquired data by the Lipinski rule of five and by unwanted substructures. Another important step would be to clean the data and remove duplicates. As this is not shown in any of our talktorials (yet), we would like to refer to the standardiser library ([github Francis Atkinson](https://github.com/flatkinson/standardiser)) or [MolVS](https://molvs.readthedocs.io/en/latest/) as possible tools for this task.",
"_____no_output_____"
],
[
"## Quiz",
"_____no_output_____"
],
[
"* We have downloaded in this talktorial molecules and bioactivity data from ChEMBL. What else is the ChEMBL database useful for?\n* What is the difference between IC50 and EC50?\n* What can we use the data extracted from ChEMBL for?",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
d04c3d713d580dcffe05d19d3f0118e0ca872c75 | 99,005 | ipynb | Jupyter Notebook | machine_learning/lesson 4 - ML Apps/Gradio/EMNIST_Gradio_Tutorial.ipynb | BreakoutMentors/Data-Science-and-Machine-Learning | 26bbe39c3e94dc200e8837e2fcba9bdd38222e36 | [
"MIT"
] | 3 | 2020-06-04T15:11:33.000Z | 2021-09-14T02:12:22.000Z | machine_learning/lesson 4 - ML Apps/Gradio/EMNIST_Gradio_Tutorial.ipynb | BreakoutMentors/Data-Science-and-Machine-Learning | 26bbe39c3e94dc200e8837e2fcba9bdd38222e36 | [
"MIT"
] | 19 | 2021-05-27T16:42:42.000Z | 2022-03-22T23:37:03.000Z | machine_learning/lesson 4 - ML Apps/Gradio/EMNIST_Gradio_Tutorial.ipynb | BreakoutMentors/Data-Science-and-Machine-Learning | 26bbe39c3e94dc200e8837e2fcba9bdd38222e36 | [
"MIT"
] | 1 | 2020-07-08T21:35:07.000Z | 2020-07-08T21:35:07.000Z | 85.275624 | 17,318 | 0.749033 | [
[
[
"<a href=\"https://colab.research.google.com/github/BreakoutMentors/Data-Science-and-Machine-Learning/blob/main/machine_learning/lesson%204%20-%20ML%20Apps/Gradio/EMNIST_Gradio_Tutorial.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Making ML Applications with Gradio\n\n[Gradio](https://www.gradio.app/) is a python library that provides web interfaces for your models. This library is very high-level with it being the easiest to learn for beginners. Here we use a dataset called [EMNIST](https://pytorch.org/vision/stable/datasets.html#emnist) which is an addition to the MNIST(dataset of images with numbers) datasest, by including images of capital and lowercase letters with a total of 62 classes.\n\nUsing Gradio, an interface is created at the bottom using the model trained in this notebook to accept our drawings of images or numbers to then predict.",
"_____no_output_____"
],
[
"## Importing libraries and Installing Gradio using PIP\n\nGoogle does not have Gradio automatically installed on their Google Colab machines, so it is necessary to install it to the specific machine you are using right now. If you choose another runtime machine, it is necessary to repeat this step.\n\n**Also, please run this code with a GPU**",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Importing PyTorch\nimport torch\nimport torch.nn as nn\n\n# Importing torchvision for dataset\nimport torchvision\nimport torchvision.transforms as transforms\n\n# Installing gradio using PIP\n!pip install gradio",
"Collecting gradio\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/e4/c6/19d6941437fb56db775b00c0181af81e539c42369bc79c664001d2272ccb/gradio-2.0.5-py3-none-any.whl (1.6MB)\n\u001b[K |████████████████████████████████| 1.6MB 5.2MB/s \n\u001b[?25hRequirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (from gradio) (3.2.2)\nRequirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from gradio) (1.1.5)\nCollecting analytics-python\n Downloading https://files.pythonhosted.org/packages/30/81/2f447982f8d5dec5b56c10ca9ac53e5de2b2e9e2bdf7e091a05731f21379/analytics_python-1.3.1-py2.py3-none-any.whl\nRequirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from gradio) (1.4.1)\nCollecting paramiko\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/95/19/124e9287b43e6ff3ebb9cdea3e5e8e88475a873c05ccdf8b7e20d2c4201e/paramiko-2.7.2-py2.py3-none-any.whl (206kB)\n\u001b[K |████████████████████████████████| 215kB 22.1MB/s \n\u001b[?25hCollecting pycryptodome\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/ad/16/9627ab0493894a11c68e46000dbcc82f578c8ff06bc2980dcd016aea9bd3/pycryptodome-3.10.1-cp35-abi3-manylinux2010_x86_64.whl (1.9MB)\n\u001b[K |████████████████████████████████| 1.9MB 22.7MB/s \n\u001b[?25hCollecting Flask-Cors>=3.0.8\n Downloading https://files.pythonhosted.org/packages/db/84/901e700de86604b1c4ef4b57110d4e947c218b9997adf5d38fa7da493bce/Flask_Cors-3.0.10-py2.py3-none-any.whl\nCollecting flask-cachebuster\n Downloading https://files.pythonhosted.org/packages/74/47/f3e1fedfaad965c81c2f17234636d72f71450f1b4522ca26d2b7eb4a0a74/Flask-CacheBuster-1.0.0.tar.gz\nRequirement already satisfied: Flask>=1.1.1 in /usr/local/lib/python3.7/dist-packages (from gradio) (1.1.4)\nCollecting markdown2\n Downloading https://files.pythonhosted.org/packages/5d/be/3924cc1c0e12030b5225de2b4521f1dc729730773861475de26be64a0d2b/markdown2-2.4.0-py2.py3-none-any.whl\nRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from gradio) (1.19.5)\nRequirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from gradio) (2.23.0)\nCollecting ffmpy\n Downloading https://files.pythonhosted.org/packages/bf/e2/947df4b3d666bfdd2b0c6355d215c45d2d40f929451cb29a8a2995b29788/ffmpy-0.3.0.tar.gz\nRequirement already satisfied: pillow in /usr/local/lib/python3.7/dist-packages (from gradio) (7.1.2)\nCollecting Flask-Login\n Downloading https://files.pythonhosted.org/packages/2b/83/ac5bf3279f969704fc1e63f050c50e10985e50fd340e6069ec7e09df5442/Flask_Login-0.5.0-py2.py3-none-any.whl\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->gradio) (1.3.1)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->gradio) (2.4.7)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib->gradio) (0.10.0)\nRequirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->gradio) (2.8.1)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas->gradio) (2018.9)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from analytics-python->gradio) (1.15.0)\nCollecting backoff==1.10.0\n Downloading https://files.pythonhosted.org/packages/f0/32/c5dd4f4b0746e9ec05ace2a5045c1fc375ae67ee94355344ad6c7005fd87/backoff-1.10.0-py2.py3-none-any.whl\nCollecting monotonic>=1.5\n Downloading https://files.pythonhosted.org/packages/9a/67/7e8406a29b6c45be7af7740456f7f37025f0506ae2e05fb9009a53946860/monotonic-1.6-py2.py3-none-any.whl\nCollecting pynacl>=1.0.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/9d/57/2f5e6226a674b2bcb6db531e8b383079b678df5b10cdaa610d6cf20d77ba/PyNaCl-1.4.0-cp35-abi3-manylinux1_x86_64.whl (961kB)\n\u001b[K |████████████████████████████████| 962kB 28.0MB/s \n\u001b[?25hCollecting cryptography>=2.5\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/b2/26/7af637e6a7e87258b963f1731c5982fb31cd507f0d90d91836e446955d02/cryptography-3.4.7-cp36-abi3-manylinux2014_x86_64.whl (3.2MB)\n\u001b[K |████████████████████████████████| 3.2MB 37.5MB/s \n\u001b[?25hCollecting bcrypt>=3.1.3\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/26/70/6d218afbe4c73538053c1016dd631e8f25fffc10cd01f5c272d7acf3c03d/bcrypt-3.2.0-cp36-abi3-manylinux2010_x86_64.whl (63kB)\n\u001b[K |████████████████████████████████| 71kB 9.2MB/s \n\u001b[?25hRequirement already satisfied: Werkzeug<2.0,>=0.15 in /usr/local/lib/python3.7/dist-packages (from Flask>=1.1.1->gradio) (1.0.1)\nRequirement already satisfied: itsdangerous<2.0,>=0.24 in /usr/local/lib/python3.7/dist-packages (from Flask>=1.1.1->gradio) (1.1.0)\nRequirement already satisfied: click<8.0,>=5.1 in /usr/local/lib/python3.7/dist-packages (from Flask>=1.1.1->gradio) (7.1.2)\nRequirement already satisfied: Jinja2<3.0,>=2.10.1 in /usr/local/lib/python3.7/dist-packages (from Flask>=1.1.1->gradio) (2.11.3)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->gradio) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->gradio) (2.10)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->gradio) (2021.5.30)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->gradio) (3.0.4)\nRequirement already satisfied: cffi>=1.4.1 in /usr/local/lib/python3.7/dist-packages (from pynacl>=1.0.1->paramiko->gradio) (1.14.5)\nRequirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.7/dist-packages (from Jinja2<3.0,>=2.10.1->Flask>=1.1.1->gradio) (2.0.1)\nRequirement already satisfied: pycparser in /usr/local/lib/python3.7/dist-packages (from cffi>=1.4.1->pynacl>=1.0.1->paramiko->gradio) (2.20)\nBuilding wheels for collected packages: flask-cachebuster, ffmpy\n Building wheel for flask-cachebuster (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for flask-cachebuster: filename=Flask_CacheBuster-1.0.0-cp37-none-any.whl size=3372 sha256=2e68e88b4d90e766446a679a8b3c199673be350c949933099f14b55957e7b658\n Stored in directory: /root/.cache/pip/wheels/9f/fc/a7/ab5712c3ace9a8f97276465cc2937316ab8063c1fea488ea77\n Building wheel for ffmpy (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for ffmpy: filename=ffmpy-0.3.0-cp37-none-any.whl size=4710 sha256=9da4ad5c3f5cf80dbda1d5ddde11d3b0b7388a9fc462dc27b8e4d3eba882ae2c\n Stored in directory: /root/.cache/pip/wheels/cc/ac/c4/bef572cb7e52bfca170046f567e64858632daf77e0f34e5a74\nSuccessfully built flask-cachebuster ffmpy\nInstalling collected packages: backoff, monotonic, analytics-python, pynacl, cryptography, bcrypt, paramiko, pycryptodome, Flask-Cors, flask-cachebuster, markdown2, ffmpy, Flask-Login, gradio\nSuccessfully installed Flask-Cors-3.0.10 Flask-Login-0.5.0 analytics-python-1.3.1 backoff-1.10.0 bcrypt-3.2.0 cryptography-3.4.7 ffmpy-0.3.0 flask-cachebuster-1.0.0 gradio-2.0.5 markdown2-2.4.0 monotonic-1.6 paramiko-2.7.2 pycryptodome-3.10.1 pynacl-1.4.0\n"
]
],
[
[
"## Downloading and Preparing EMNIST Dataset\n\n**Note:** Even though the images in the EMNIST dataset are 28x28 images just like the regular MNIST dataset, there are necessary transforms needed for EMNIST dataset. If not transformed, the images are rotated 90° counter-clockwise and are flipped vertically. To undo these two issues, we first rotate it 90° counter-clockwise and then flip it horizontally\n\nHere is the image before processing:\n\n<img src=\"https://raw.githubusercontent.com/BreakoutMentors/Data-Science-and-Machine-Learning/main/machine_learning/lesson%204%20-%20ML%20Apps/images/image_before_processing.jpg\" width=200>\n\nHere is the image after processing:\n\n<img src=\"https://github.com/BreakoutMentors/Data-Science-and-Machine-Learning/blob/main/machine_learning/lesson%204%20-%20ML%20Apps/images/image_after_processing.jpg?raw=true\" width=200>",
"_____no_output_____"
]
],
[
[
"# Getting Dataset\n!mkdir EMNIST\nroot = '/content/EMNIST'\n\n# Creating Transforms\ntransforms = transforms.Compose([\n # Rotating image 90 degrees counter-clockwise\n transforms.RandomRotation((-90,-90)),\n # Flipping images horizontally\n transforms.RandomHorizontalFlip(p=1),\n # Converting images to tensor\n transforms.ToTensor()\n])\n\n# Getting dataset\ntraining_dataset = torchvision.datasets.EMNIST(root,\n split='byclass',\n train=True,\n download=True,\n transform=transforms)\n\ntest_dataset = torchvision.datasets.EMNIST(root,\n split='byclass',\n train=False,\n download=True,\n transform=transforms)\n\n# Loading Dataset into dataloaders\nbatch_size = 2048\ntraining_dataloader = torch.utils.data.DataLoader(training_dataset, batch_size=batch_size, shuffle=True)\ntest_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n\n# Getting shapes of dataset\nprint('Shape of the training dataset:', training_dataset.data.shape)\nprint('Shape of the test dataset:', test_dataset.data.shape)\n\n# Getting reverted class_to_idx dictionary to get classes by idx\nidx_to_class = {val:key for key, val in training_dataset.class_to_idx.items()}\n\n# Plotting 5 images with classes\nplt.figure(figsize=(10,2))\nfor i in range(5):\n plt.subplot(1,5,i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(training_dataset[i][0].squeeze().numpy(), cmap=plt.cm.binary)\n plt.xlabel(idx_to_class[training_dataset[i][1]])",
"Downloading and extracting zip archive\nDownloading https://www.itl.nist.gov/iaui/vip/cs_links/EMNIST/gzip.zip to /content/EMNIST/EMNIST/raw/emnist.zip\n"
]
],
[
[
"## Building the Model",
"_____no_output_____"
]
],
[
[
"class Neural_Network(nn.Module):\n # Constructor\n def __init__(self, num_classes):\n super(Neural_Network, self).__init__()\n\n # Defining Fully-Connected Layers\n self.fc1 = nn.Linear(28*28, 392) # 28*28 since each image is 28*28\n self.fc2 = nn.Linear(392, 196)\n self.fc3 = nn.Linear(196, 98)\n self.fc4 = nn.Linear(98, num_classes)\n \n # Activation function\n self.relu = nn.ReLU()\n\n def forward(self, x):\n \n # Need to flatten each image in the batch\n x = x.flatten(start_dim=1)\n\n # Input it into the Fully connected layers\n x = self.relu(self.fc1(x))\n x = self.relu(self.fc2(x))\n x = self.relu(self.fc3(x))\n x = self.fc4(x)\n\n return x\n\n# Getting number of classes\nnum_classes = len(idx_to_class)\nmodel = Neural_Network(num_classes)\nprint(model)",
"Neural_Network(\n (fc1): Linear(in_features=784, out_features=392, bias=True)\n (fc2): Linear(in_features=392, out_features=196, bias=True)\n (fc3): Linear(in_features=196, out_features=98, bias=True)\n (fc4): Linear(in_features=98, out_features=62, bias=True)\n (relu): ReLU()\n)\n"
]
],
[
[
"## Defining Loss Function and Optimizer",
"_____no_output_____"
]
],
[
[
"criterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)",
"_____no_output_____"
]
],
[
[
"## Moving model to GPU\n\nIf you have not changed the runtime type to a GPU, please do so now. This helps with the speed of training.",
"_____no_output_____"
]
],
[
[
"# Use GPU if available\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n# Moving model to use GPU\nmodel.to(device)",
"_____no_output_____"
]
],
[
[
"## Training the Model",
"_____no_output_____"
]
],
[
[
"# Function that returns a torch tensor with predictions to compare with labels\ndef get_preds_from_logits(logits):\n # Using softmax to get an array that sums to 1, and then getting the index with the highest value\n return torch.nn.functional.softmax(logits, dim=1).argmax(dim=1)\n\nepochs = 10\ntrain_losses = []\ntrain_accuracies = []\nfor epoch in range(1, epochs+1):\n train_loss = 0.0\n\n train_counts = 0\n\n ###################\n # train the model #\n ###################\n\n # Setting model to train mode\n model.train()\n\n for images, labels in training_dataloader:\n\n # Moving data to GPU if available\n images, labels = images.to(device), labels.to(device)\n \n # Setting all gradients to zero\n optimizer.zero_grad()\n\n # Calculate Output\n output = model(images)\n \n # Calculate Loss\n loss = criterion(output, labels)\n\n # Calculate Gradients\n loss.backward()\n\n # Perform Gradient Descent Step\n optimizer.step()\n\n # Saving loss\n train_loss += loss.item()\n\n # Get Predictions\n train_preds = get_preds_from_logits(output)\n\n # Saving number of right predictions for accuracy\n train_counts += train_preds.eq(labels).sum().item()\n\n # Averaging and Saving Losses\n train_loss/=len(training_dataset)\n train_losses.append(train_loss)\n\n # Getting accuracies and saving them\n train_acc = train_counts/len(training_dataset)\n train_accuracies.append(train_acc)\n\n\n print('Epoch: {} \\tTraining Loss: {:.6f} \\tTraining Accuracy: {:.2f}%'.format(epoch, train_loss, train_acc*100))",
"Epoch: 1 \tTraining Loss: 0.000590 \tTraining Accuracy: 67.11%\nEpoch: 2 \tTraining Loss: 0.000302 \tTraining Accuracy: 80.17%\nEpoch: 3 \tTraining Loss: 0.000259 \tTraining Accuracy: 82.35%\nEpoch: 4 \tTraining Loss: 0.000238 \tTraining Accuracy: 83.43%\nEpoch: 5 \tTraining Loss: 0.000225 \tTraining Accuracy: 84.08%\nEpoch: 6 \tTraining Loss: 0.000216 \tTraining Accuracy: 84.53%\nEpoch: 7 \tTraining Loss: 0.000209 \tTraining Accuracy: 84.88%\nEpoch: 8 \tTraining Loss: 0.000203 \tTraining Accuracy: 85.17%\nEpoch: 9 \tTraining Loss: 0.000198 \tTraining Accuracy: 85.46%\nEpoch: 10 \tTraining Loss: 0.000194 \tTraining Accuracy: 85.71%\n"
],
[
"plt.plot(train_losses)\nplt.xlabel('epoch')\nplt.ylabel('Mean Squared Error')\nplt.title('Training Loss')\nplt.show()",
"_____no_output_____"
],
[
"plt.plot(train_accuracies)\nplt.xlabel('epoch')\nplt.ylabel('Accuracy')\nplt.title('Training Accuracy')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Evaluating the model\n\nHere we will display the test loss and accuracy and examples of images that were misclassified.",
"_____no_output_____"
]
],
[
[
"test_loss = 0.0\ntest_counts = 0\n\n# Setting model to evaluation mode, no parameters will change\nmodel.eval()\n\nfor images, labels in test_dataloader:\n\n # Moving to GPU if available\n images, labels = images.to(device), labels.to(device)\n\n # Calculate Output\n output = model(images)\n\n # Calculate Loss\n loss = criterion(output, labels)\n\n # Saving loss\n test_loss += loss.item()\n\n # Get Predictions\n test_preds = get_preds_from_logits(output)\n\n # Saving number of right predictions for accuracy\n test_counts += test_preds.eq(labels).sum().item()\n\n# Calculating test accuracy\ntest_acc = test_counts/len(test_dataset)\nprint('Test Loss: {:.6f} \\tTest Accuracy: {:.2f}%'.format(test_loss, test_acc*100))",
"Test Loss: 24.000878 \tTest Accuracy: 84.97%\n"
],
[
"import torchvision.transforms as transforms\n\n# Have to another set of transforms to rotate and flip testing data\ntest_transforms = transforms.Compose([\n # Rotating image 90 degrees counter-clockwise\n transforms.RandomRotation((-90,-90)),\n # Flipping images horizontally\n transforms.RandomHorizontalFlip(p=1)\n])\n\n# Transforming the data and normalizing them\ntest_images = test_transforms(test_dataset.data).to(device)/255\n# Getting Predictions\npredictions = get_preds_from_logits(model(test_images))\n# Getting Labels\ntest_labels = test_dataset.targets.to(device)\n\n# Getting misclassified booleans\ncorrect_bools = test_labels.eq(predictions)\nmisclassified_indices = []\nfor i in range(len(correct_bools)):\n if correct_bools[i] == False:\n misclassified_indices.append(i)\n\n# Plotting 5 misclassified images\nplt.figure(figsize=(10,2))\nfor i in range(5):\n idx = misclassified_indices[i]\n plt.subplot(1,5,i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(test_images[idx].squeeze().cpu().numpy(), cmap=plt.cm.binary)\n true_label = idx_to_class[test_labels[idx].item()]\n pred_label = idx_to_class[predictions[idx].item()]\n plt.xlabel(f'True: {true_label}, Pred: {pred_label}')",
"_____no_output_____"
]
],
[
[
"# How to use Gradio\n\nThere are three parts of using Gradio\n1. Define a function that takes input and returns your model's output\n2. Define what type of input the interface will use\n3. Define what type of output the interface will give\n\nThe function `recognize_image` takes a 28x28 image that is not yet normalized and returns a dictionary with the keys being the classes and the values being the probabilities for that class.\n\nThe class [`gradio.inputs.Image`](https://www.gradio.app/docs#i_image) is used as the input that provides a window in the Gradio interface, but there are many customizations you can provide.\n\nThese are some the parameters:\n1. shape - (width, height) shape to crop and resize image to; if None, matches input image size.\n2. image_mode - \"RGB\" if color, or \"L\" if black and white.\n3. invert_colors - whether to invert the image as a preprocessing step.\n4. source - Source of image. \"upload\" creates a box where user can drop an image file, \"webcam\" allows user to take snapshot from their webcam, \"canvas\" defaults to a white image that can be edited and drawn upon with tools.\n\nThe class [gradio.outputs.Label](https://www.gradio.app/docs#o_label) is used as the output, which provides probabilities to the interface for the purpose of displaying them.\n\nThese are the parameters:\n1. num_top_classes - number of most confident classes to show.\n2. type - Type of value to be passed to component. \"value\" expects a single out label, \"confidences\" expects a dictionary mapping labels to confidence scores, \"auto\" detects return type.\n3. label - component name in interface.\n\nThe interface class [gradio.Interface](https://www.gradio.app/docs#interface) is responsible of creating the interface that compiles the type of inputs and outputs. There is a `.launch()` method that launches the interface in this notebook after compiling.\n\nThese are the parameters used in this interface:\n1. fn - the function to wrap an interface around.\n2. inputs - a single Gradio input component, or list of Gradio input components. Components can either be passed as instantiated objects, or referred to by their string shortcuts. The number of input components should match the number of parameters in fn.\n3. outputs - a single Gradio output component, or list of Gradio output components. Components can either be passed as instantiated objects, or referred to by their string shortcuts. The number of output components should match the number of values returned by fn.\n4. title - a title for the interface; if provided, appears above the input and output components.\n5. description - a description for the interface; if provided, appears above the input and output components.\n6. live - whether the interface should automatically reload on change.\n7. interpretation - function that provides interpretation explaining prediction output. Pass \"default\" to use built-in interpreter.\n\nI will enourage you to view the [documentation](https://www.gradio.app/docs) for the interface, inputs and outputs, you can find all the information you need there. It is helpful to refer to the documentation to understand other parameters that are not used in this lesson.",
"_____no_output_____"
]
],
[
[
"import gradio\nimport gradio as gr\n\n# Function that returns a torch tensor with predictions to compare with labels\ndef get_probs_from_logits(logits):\n # Using softmax to get probabilities from the logits\n return torch.nn.functional.softmax(logits, dim=1)\n\n# Function that takes the img drawn in the Gradio interface, then gives probabilities\ndef recognize_image(img):\n # Normalizes inputted image and converts it to a tensor for the model\n img = torch.tensor(img/255, dtype=torch.float).unsqueeze(dim=0).to(device)\n\n # Getting output\n output = model(img)\n \n # Getting probabilites of the image\n probabilities = get_probs_from_logits(output).flatten()\n\n # Returns a dictionary with the key being the class and val being the probability\n probabilities_dict = {idx_to_class[i]:probabilities[i].item() for i in range(num_classes)}\n\n return probabilities_dict\n\nim = gradio.inputs.Image(shape=(28, 28),\n image_mode='L',\n invert_colors=True,\n source=\"canvas\")\n\ntitle = \"Number and Letter Classifier App\"\ndescription = \"\"\"This app is able to guess the number or letter you draw below.\n The ML model was trained on the EMNIST dataset, please use below!\"\"\"\n\niface = gr.Interface(fn=recognize_image,\n inputs=im,\n outputs=gradio.outputs.Label(num_top_classes=5),\n title=title,\n description=description,\n live=True,\n interpretation=\"default\")\n\niface.launch()",
"Colab notebook detected. To show errors in colab notebook, set `debug=True` in `launch()`\nThis share link will expire in 24 hours. If you need a permanent link, visit: https://gradio.app/introducing-hosted (NEW!)\nRunning on External URL: https://27407.gradio.app\nInterface loading below...\n"
]
],
[
[
"# What's next?\n\nThe next challenge will cover pretrained models, which are models that are already trained for us and gives us the availability of using the model to make predictions automatically. You will create another Gradio app that uses pretrained models to classify images.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d04c3f4bc4b5b18236e8a30e51bbe6aeb70fd289 | 603,045 | ipynb | Jupyter Notebook | src/projeto_1_ciencia_de_dados.ipynb | Cogitus/covid-vacine-progression-analysis | 18135c1fa2646fa4692886bdfab1a6971eea0848 | [
"MIT"
] | null | null | null | src/projeto_1_ciencia_de_dados.ipynb | Cogitus/covid-vacine-progression-analysis | 18135c1fa2646fa4692886bdfab1a6971eea0848 | [
"MIT"
] | null | null | null | src/projeto_1_ciencia_de_dados.ipynb | Cogitus/covid-vacine-progression-analysis | 18135c1fa2646fa4692886bdfab1a6971eea0848 | [
"MIT"
] | null | null | null | 565.708255 | 287,858 | 0.899726 | [
[
[
"# Baixando a base de dados do Kaggle\n",
"_____no_output_____"
]
],
[
[
"# baixando a lib do kaggle\n!pip install --upgrade kaggle\n!pip install plotly\n# para visualizar dados faltantes\n!pip install missingno",
"_____no_output_____"
],
[
"# requisitando upload do token de autentificação do Kaggle\n\n# OBS: o arquivo kaggle.json precisa ser baixado da sua conta pessoal do Kaggle.\nfrom google.colab import files\n\nuploaded = files.upload()\n\nfor fn in uploaded.keys():\n print('User uploaded file \"{name}\" with length {length} bytes'.format(name=fn, length=len(uploaded[fn]) ))",
"_____no_output_____"
],
[
"# alocando o arquivo kaggle.json em seu devido local e permitindo escrita e leitura no mesmo\n!mkdir -p ~/.kaggle\n!mv kaggle.json ~/.kaggle\n!chmod 600 ~/.kaggle/kaggle.json",
"_____no_output_____"
],
[
"!kaggle datasets download -d gpreda/covid-world-vaccination-progress\n!!unzip covid-world-vaccination-progress.zip -d data_folder",
"Downloading covid-world-vaccination-progress.zip to /content\n\r 0% 0.00/172k [00:00<?, ?B/s]\n\r100% 172k/172k [00:00<00:00, 20.4MB/s]\n"
]
],
[
[
"# Código da análise exploratória em si\n",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport missingno as msno\nimport plotly.graph_objects as go\nimport matplotlib.ticker as ticker",
"_____no_output_____"
],
[
"def gera_lista_vacinas(dataset):\n '''\n Gera uma lista com todas as vacinas do dataset\n input: DataFrame dos dados\n output: lista de todas as vacinas\n '''\n todas_vacinas = list(dataset.groupby(['vacinas']).count().index)\n\n conjunto_vacinas = set()\n for lista_vacinas in todas_vacinas:\n lista_vacinas = lista_vacinas.split(', ')\n\n for vacina in lista_vacinas:\n conjunto_vacinas.add(vacina)\n lista_vacinas = list(conjunto_vacinas)\n lista_vacinas.sort()\n\n return lista_vacinas\n\n\ndef gera_lista_paises(dataset):\n '''\n Gera a lista de países que estão em vacinação\n input: DataFrame dos dados\n output: lista de todos os países\n '''\n return list(dataset.groupby(['pais']).count().index)\n\n\ndef gera_dataframe_vacinas(dataset):\n '''\n Gera um novo DataFrame em que as vacinas antes eram listadas na coluna 'vacinas'\n agora são listadas entre 10 colunas correspondentes à cada vacina, com 0's e 1's.\n Os 1's representam que vacina está sendo aplicada naquele país, os 0's que não!\n\n input: DataFrame dos dados\n output: DataFrame dos dados das vacinas categorizados\n '''\n labels_vacinas = gera_lista_vacinas(dataset) # lista das vacinas entendidas como labels\n dataset_vacinas = dataset['vacinas']\n\n array_temporario_vacinas = [] # inicia como uma lista vazia\n for linha_vacina in dataset_vacinas:\n sublista_vacinas = linha_vacina.split(', ')\n #lista de tamanho len(labels_vacinas) com 0's para elementos em sublista\n nova_linha = [int(vacina in sublista_vacinas) for vacina in labels_vacinas]\n array_temporario_vacinas.append(nova_linha)\n\n dataset_temporario_vacinas = pd.DataFrame(array_temporario_vacinas, columns=labels_vacinas)\n dataset.drop(columns=['vacinas'], axis=1, inplace=True)\n dataset = pd.concat([dataset, dataset_temporario_vacinas], axis=1)\n\n return dataset",
"_____no_output_____"
],
[
"dataset = pd.read_csv(r'data_folder/country_vaccinations.csv')\n\nnome_colunas = ['pais', 'codigo_iso', 'data', 'total_vacinacoes', 'pessoas_vacinadas',\n 'pessoas_tot_vacinadas', 'vacinacoes_diarias_raw', 'vacinacoes_diarias',\n 'tot_vacinacoes_por_cent', 'pessoas_vacinadas_por_cent', 'pessoas_tot_vacinadas_por_cent',\n 'vacinacoes_diarias_por_milhao', 'vacinas', 'fonte_dados', 'website_fonte']\nnome_colunas_antigo = list(dataset.columns)\n\ndataset.rename(columns=dict(zip(nome_colunas_antigo, nome_colunas)), inplace=True)",
"_____no_output_____"
],
[
"dataset.head()",
"_____no_output_____"
],
[
"# DATAFRAME COM AS INFOS DAS VACINAS\nfreq_vacinas = dataset.groupby('pais').max()\n\ndemais_colunas = [coluna for coluna in nome_colunas if coluna not in lista_vacinas and coluna not in ['pais', 'vacinas']]\nfreq_vacinas.drop(columns=demais_colunas, axis=1, inplace=True)\n\n# para o bar plot vacinas x num_paises\ndensidade_vacinas = pd.DataFrame(freq_vacinas.sum(), columns=['num_paises']) ",
"_____no_output_____"
],
[
"# BARPLOT DAS VACINAS\nfig_disposicao_vacinas = plt.figure(figsize = (20, 10))\nplt.title('Número de países que utilizam as vacinas', fontsize=18)\n\ny_label = densidade_vacinas.index\nx_label = densidade_vacinas['num_paises'].values\n\nplt.bar(y_label, x_label)\nplt.grid()\n\nfor i in range(len(x_label)):\n plt.annotate(str(x_label[i]), xy=(y_label[i], x_label[i]), ha='center', va='bottom', fontsize=14)\n\nplt.show()",
"_____no_output_____"
],
[
"# dados faltantes de todo o banco de dados\nmsno.matrix(dataset)",
"_____no_output_____"
],
[
"# Vamos visualizar a distribuição de dados faltantes POR PAÍS\nfrom math import floor\n\n# caso dê problema, é possível que um novo país tenha sido adicionado!\nnum_rows = 25\nnum_columns = 6\nfig, axarr = plt.subplots(num_rows, num_columns, figsize=(24, 90))\n\nlista_paises = gera_lista_paises(dataset)\n\nfor pais in enumerate(lista_paises):\n # extraindo nome e numero do pais\n num_pais = pais[0]\n nome_pais = pais[1]\n\n # definindo coordenadas de onde no subplot será plotado\n x_plot = floor(num_pais/num_columns)\n y_plot = num_pais % num_columns\n\n axarr[x_plot][y_plot].set_title(nome_pais)\n msno.matrix(dataset[dataset['pais'] == nome_pais], ax=axarr[x_plot][y_plot], labels=False)",
"/usr/local/lib/python3.7/dist-packages/missingno/missingno.py:61: UserWarning:\n\nPlotting a sparkline on an existing axis is not currently supported. To remove this warning, set sparkline=False.\n\n"
],
[
"dataset.describe()",
"_____no_output_____"
]
],
[
[
"# Código da criação dos gráficos e mapas",
"_____no_output_____"
]
],
[
[
"groupby_country = dataset.groupby(['pais'])\n\nlistof_dataframe_countries = []\nfor idx, group in enumerate(groupby_country):\n listof_dataframe_countries.append(group)",
"_____no_output_____"
],
[
"total_vac_top_countries = pd.DataFrame()\n# total_vacinacoes \tpessoas_vacinadas \tpessoas_tot_vacinadas\nfor i in range(len(listof_dataframe_countries)):\n country_df = listof_dataframe_countries[i][1]\n filtered_df = country_df[country_df['total_vacinacoes'].notna()]\n latest_day_data = filtered_df.iloc[-1:]\n total_vac_top_countries = total_vac_top_countries.append(latest_day_data, ignore_index=True)\n\ntotal_vac_top_countries = total_vac_top_countries.sort_values(by=['total_vacinacoes'], ascending=False)\n\nfig, axes = plt.subplots(nrows=2, ncols=5)\ni = 0\nj = 0\nfor pais in total_vac_top_countries.head(10).iterrows():\n country = dataset[dataset['pais'] == pais[1]['pais']]\n filtered = country[country['total_vacinacoes'].notna()].reset_index()\n\n fig2 = filtered[['total_vacinacoes','pessoas_vacinadas','pessoas_tot_vacinadas']].plot(title=pais[1]['pais'], ax=axes[j][i], grid=True)\n fig2.yaxis.set_major_formatter(ticker.EngFormatter())\n \n i+=1\n if(i%5 == 0):\n j+=1\n i=0\nplt.show()",
"_____no_output_____"
],
[
"fig, axes = plt.subplots(nrows=2, ncols=5)\ni = 0\nj = 0\nfor pais in total_vac_top_countries.head(10).iterrows():\n country = dataset[dataset['pais'] == pais[1]['pais']]\n filtered = country[country['tot_vacinacoes_por_cent'].notna()].reset_index()\n\n fig2 = filtered[['tot_vacinacoes_por_cent','pessoas_vacinadas_por_cent','pessoas_tot_vacinadas_por_cent']].plot(title=pais[1]['pais'], ax=axes[j][i], grid=True)\n fig2.yaxis.set_major_formatter(ticker.PercentFormatter())\n fig2.set_ylim(0, 100)\n fig2.legend(('Total doses', 'Pessoas vacinadas', 'Pessoas imunizadas'))\n \n i+=1\n if(i%5 == 0):\n j+=1\n i=0\n\nplt.show()",
"_____no_output_____"
],
[
"for i in range(len(listof_dataframe_countries)):\n country_name = listof_dataframe_countries[i][0]\n if(country_name in [\"United States\", \"Austria\", \"Brazil\", \"United Kingdom\"]):\n country_df = listof_dataframe_countries[i][1]\n filtered_df = country_df[country_df['total_vacinacoes'].notna()]\n filtered_df[['total_vacinacoes','pessoas_vacinadas','pessoas_tot_vacinadas']].plot(title=country_name)\n\nplt.show()",
"_____no_output_____"
],
[
"df = pd.DataFrame()\n\nfor i in range(len(listof_dataframe_countries)):\n country_name = listof_dataframe_countries[i][0]\n country_df = listof_dataframe_countries[i][1]\n filtered_df = country_df[country_df['pessoas_vacinadas_por_cent'].notna()]\n latest_day_data = filtered_df.iloc[-1:]\n df = df.append(latest_day_data, ignore_index=True)\n\ndf.to_csv('./pessoas_vacinadas_por_cent.csv')\n\nfig_pessoas_vacinadas = go.Figure(data=go.Choropleth(\n locations = df['codigo_iso'],\n z = df['pessoas_vacinadas_por_cent'],\n text = df['pais'],\n colorscale = 'YlGnBu',\n autocolorscale=False,\n marker_line_width=0.5,\n colorbar_title = '% pessoas<br>vacinadas',\n))\nconfig = {\n 'modeBarButtonsToRemove': ['lasso2d','zoomInGeo','zoomOutGeo']\n }\n\nfig_pessoas_vacinadas.update_layout(\n title_text='Covid-19 World Vaccination - Porcentagem de pessoas que tomaram pelo menos uma dose da vacina',\n geo=dict(\n showframe=False,\n showcoastlines=False,\n projection_type='equirectangular'\n )\n)\n\nfig_pessoas_vacinadas.data[0].update(zmin=0, zmax=60)\n\nfig_pessoas_vacinadas.show(config=config)",
"_____no_output_____"
],
[
"df2 = pd.DataFrame()\n\nfor i in range(len(listof_dataframe_countries)):\n country_name = listof_dataframe_countries[i][0]\n country_df = listof_dataframe_countries[i][1]\n filtered_df = country_df[country_df['total_vacinacoes'].notna()]\n latest_day_data = filtered_df.iloc[-1:]\n df2 = df2.append(latest_day_data, ignore_index=True)\n\ndf2.to_csv('./total_vacinacoes.csv')\n\nfig_total_doses = go.Figure(data=go.Choropleth(\n locations = df2['codigo_iso'],\n z = df2['total_vacinacoes'],\n text = df2['pais'],\n colorscale = 'Blues',\n autocolorscale=False,\n marker_line_width=0.5,\n colorbar_title = 'Total<br>vacinas<br>(milhões)',\n))\nconfig = {\n 'modeBarButtonsToRemove': ['lasso2d','zoomInGeo','zoomOutGeo']\n }\n\nfig_total_doses.update_layout(\n title_text='Covid-19 World Vaccination - Total de doses aplicadas',\n geo=dict(\n showframe=False,\n showcoastlines=False,\n projection_type='equirectangular'\n )\n)\n\nfig_total_doses.show(config=config)",
"_____no_output_____"
],
[
"df3 = pd.DataFrame()\n\nfor i in range(len(listof_dataframe_countries)):\n country_name = listof_dataframe_countries[i][0]\n country_df = listof_dataframe_countries[i][1]\n filtered_df = country_df[country_df['vacinacoes_diarias_por_milhao'].notna()]\n latest_day_data = filtered_df.iloc[-1:]\n df3 = df3.append(latest_day_data, ignore_index=True)\n\ndf3.to_csv('./vac_diarias_milhao.csv')\n\nfig_vac_diarias_milhao = go.Figure(data=go.Choropleth(\n locations = df3['codigo_iso'],\n z = df3['vacinacoes_diarias_por_milhao'],\n text = df3['pais'],\n colorscale = 'YlGnBu',\n autocolorscale=False,\n reversescale=False,\n marker_line_width=0.5,\n colorbar_title = 'vacinações<br>diárias<br>p/ milhão',\n))\nconfig = {\n 'modeBarButtonsToRemove': ['lasso2d','zoomInGeo','zoomOutGeo']\n }\n\nfig_vac_diarias_milhao.update_layout(\n title_text='Covid-19 World Vaccination - Vacinações diárias por milhão',\n geo=dict(\n showframe=False,\n showcoastlines=False,\n projection_type='equirectangular'\n )\n)\n\nfig_vac_diarias_milhao.data[0].update(zmin=500, zmax=15000)\n\nfig_vac_diarias_milhao.show(config=config)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d04c44c76232fe09a12c23bea5989917d1794c7d | 5,682 | ipynb | Jupyter Notebook | examples/Summarisation.ipynb | lucky7323/backprop | 4daa756f3a46600d4dfa0631bb3607237df1fed6 | [
"Apache-2.0"
] | 200 | 2021-03-22T17:29:46.000Z | 2022-03-20T21:58:31.000Z | examples/Summarisation.ipynb | lucky7323/backprop | 4daa756f3a46600d4dfa0631bb3607237df1fed6 | [
"Apache-2.0"
] | 6 | 2021-04-15T06:48:32.000Z | 2021-12-21T08:07:49.000Z | examples/Summarisation.ipynb | lucky7323/backprop | 4daa756f3a46600d4dfa0631bb3607237df1fed6 | [
"Apache-2.0"
] | 15 | 2021-03-25T05:25:43.000Z | 2022-01-04T08:12:29.000Z | 48.564103 | 303 | 0.694298 | [
[
[
"# Backprop Core Example: Text Summarisation\n\nText summarisation takes a chunk of text, and extracts the key information.",
"_____no_output_____"
]
],
[
[
"# Set your API key to do inference on Backprop's platform\n# Leave as None to run locally\napi_key = None",
"_____no_output_____"
],
[
"import backprop\n\nsummarisation = backprop.Summarisation(api_key=api_key)",
"_____no_output_____"
],
[
"# Change this up.\ninput_text = \"\"\"\nBritain began its third COVID-19 lockdown on Tuesday with the government calling for one last major national effort to defeat the spread of a virus that has infected an estimated one in 50 citizens before mass vaccinations turn the tide.\nFinance minister Rishi Sunak announced a new package of business grants worth 4.6 billion pounds ($6.2 billion) to help keep people in jobs and firms afloat until measures are relaxed gradually, at the earliest from mid-February but likely later.\n\nBritain has been among the countries worst-hit by COVID-19, with the second highest death toll in Europe and an economy that suffered the sharpest contraction of any in the Group of Seven during the first wave of infections last spring.\n\nPrime Minister Boris Johnson said the latest data showed 2% of the population were currently infected - more than a million people in England.\n\n“When everybody looks at the position, people understand overwhelmingly that we have no choice,” he told a news conference.\n\nMore than 1.3 million people in Britain have already received their first dose of a COVID-19 vaccination, but this is not enough to have an impact on transmission yet.\n\nJohnson announced the new lockdown late on Monday, saying the highly contagious new coronavirus variant first identified in Britain was spreading so fast the National Health Service risked being overwhelmed within 21 days.\n\nIn England alone, some 27,000 people are in hospital with COVID, 40% more than during the first peak in April, with infection numbers expected to rise further after increased socialising during the Christmas period.\n\nSince the start of the pandemic, more than 75,000 people have died in the United Kingdom within 28 days of testing positive for coronavirus, according to official figures. The number of daily new infections passed 60,000 for the first time on Tuesday.\nA Savanta-ComRes poll taken just after Johnson’s address suggested four in five adults in England supported the lockdown.\n\n“I definitely think it was the right decision to make,” said Londoner Kaitlin Colucci, 28. “I just hope that everyone doesn’t struggle too much with having to be indoors again.”\n\nDowning Street said Johnson had cancelled a visit to India later this month to focus on the response to the virus, and Buckingham Palace called off its traditional summer garden parties this year.\n\nnder the new rules in England, schools are closed to most pupils, people should work from home if possible, and all hospitality and non-essential shops are closed. Semi-autonomous executives in Scotland, Wales and Northern Ireland have imposed similar measures.\n\nAs infection rates soar across Europe, other countries are also clamping down on public life. Germany is set to extend its strict lockdown until the end of the month, and Italy will keep nationwide restrictions in place this weekend while relaxing curbs on weekdays.\n\nSunak’s latest package of grants adds to the eye-watering 280 billion pounds in UK government support already announced for this financial year to stave off total economic collapse.\n\nThe new lockdown is likely to cause the economy to shrink again, though not as much as during the first lockdown last spring. JP Morgan economist Allan Monks said he expected the economy to shrink by 2.5% in the first quarter of 2021 -- compared with almost 20% in the second quarter of 2020.\n\nTo end the cycle of lockdowns, the government is pinning its hopes on vaccines. It aims to vaccinate all elderly care home residents and their carers, everyone over the age of 70, all frontline health and social care workers, and everyone who is clinically extremely vulnerable by mid-February.\n\"\"\"",
"_____no_output_____"
],
[
"summary = summarisation(input_text)\nprint(summary)",
"Britain begins its third COVID-19 lockdown. Finance minister Rishi Sunak announces a package of business grants. The government is pinning its hopes on vaccines.\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
d04c57c7469d37a8dd1aa6b4cfbd7f397597d4e6 | 29,600 | ipynb | Jupyter Notebook | pydstools_implementation.ipynb | gnouveau/birdsynth | 3d6e3fdd101739961c46fad6a0c5e73e216f7cc8 | [
"MIT"
] | 2 | 2017-04-18T15:17:19.000Z | 2018-01-20T20:50:49.000Z | pydstools_implementation.ipynb | gnouveau/birdsynth | 3d6e3fdd101739961c46fad6a0c5e73e216f7cc8 | [
"MIT"
] | null | null | null | pydstools_implementation.ipynb | gnouveau/birdsynth | 3d6e3fdd101739961c46fad6a0c5e73e216f7cc8 | [
"MIT"
] | null | null | null | 66.666667 | 14,530 | 0.79625 | [
[
[
"%pylab --no-import-all\n%matplotlib inline\nimport PyDSTool as pdt",
"Using matplotlib backend: TkAgg\nPopulating the interactive namespace from numpy and matplotlib\n"
],
[
"ab = np.loadtxt('birdsynth/test/ba_example_ab.dat')\n#ab = np.zeros((40000, 2))\nab[:, 0] += np.random.normal(0, 0.01, len(ab))\n",
"_____no_output_____"
],
[
"t_mom = np.linspace(0, len(ab)/44100, len(ab))\ninputs = pdt.pointset_to_traj(pdt.Pointset(coorddict={'a': ab[:, 1], 'b':ab[:, 0]}, indepvardict={'t': t_mom}))",
"_____no_output_____"
]
],
[
[
"# Jacobian calculation",
"_____no_output_____"
]
],
[
[
"x = pdt.Var('x')\ny = pdt.Var('y')\ngm = pdt.Par('gm')\na = pdt.Par('a')\nb = pdt.Par('b')\nt = pdt.Var('t')",
"_____no_output_____"
],
[
"xdot = pdt.Fun(y, [y], 'xdot')\nydot = pdt.Fun(-a*gm*gm - b*gm*gm*x -gm*gm*x*x*x -gm*x*x*y + gm*gm*x*x - gm*x*y, [x, y], 'ydot')\nF = pdt.Fun([xdot(y), ydot(x, y)], [x,y], 'F')\njac = pdt.Fun(pdt.Diff(F, [x, y]), [t, x, y], 'Jacobian')\njac.simplify()\nprint(jac.eval(t=t, x=x, y=y))",
"[[0,1],[(((-b*gm*gm)-gm*gm*(x*x+x*2*x))-gm*(x*y+x*y)+gm*gm*2*x)-gm*y,(-gm*x*x)-gm*x]]\n"
]
],
[
[
"# Simple model",
"_____no_output_____"
]
],
[
[
"icdict = {'x': 0, 'y': 0}\npardict = {\n 'gm': 2 # g is γ in Boari 2015\n}\nvardict = {\n 'x': xdot(y),\n 'y': ydot(x,y),\n }\n\nargs = pdt.args()\nargs.name = 'birdsynth'\nargs.fnspecs = [jac, xdot, ydot]\nargs.ics = icdict\nargs.pars = pardict\nargs.inputs = inputs\nargs.tdata = [0, 1]\nargs.varspecs = vardict\n\nds = pdt.Generator.Vode_ODEsystem(args)",
"_____no_output_____"
],
[
"ds.haveJacobian()",
"_____no_output_____"
],
[
"traj = ds.compute('demo')",
"_____no_output_____"
],
[
"plt.plot(traj.sample(dt=1/(44100*20))['x'])",
"_____no_output_____"
],
[
"auxdict = {'Pi':(['t', 'x', 'a_'], 'if(t > 0, a_ * x - r * 1, 0)'),\n 'Pt':(['t', 'x', 'a_'], '(1 - r) * Pi(t - 0.5 * T, x, a_)')\n }",
"_____no_output_____"
],
[
"icdict = {'x': 0, 'y': 0, 'o1':0, 'i1':0, 'i3':0}\npardict = {'g': 2400, # g is γ in Boari 2015\n 'T': 0.2,\n 'r': 0.1,\n 'a_p': -540e6,\n 'b_p': -7800,\n 'c_p': 1.8e8,\n 'd_p': 1.2e-2,\n 'e_p': 7.2e-1,\n 'f_p': -0.83e-2,\n 'g_p': -5e2,\n 'h_p': 1e-4\n }\nvardict = {'x': 'y',\n 'y': '-a*Pow(g, 2) - b * Pow(g, 2) * x - Pow(g, 2) * Pow(x, 3) - g * Pow(x, 2) * y + Pow(g, 2) * x * x'\n '- g * x * y',\n 'i1': 'o1',\n 'o1': 'a_p * i1 + b_p * o1 + c_p * i3 + d_p * Pt(t, x, a) + e_p * Pt(t, x, a)',\n 'i3': 'f_p * o1 + g_p * i3 + h_p * Pt(t, x, a)'\n }\n\nargs = pdt.args()\nargs.name = 'birdsynth'\nargs.ics = icdict\nargs.pars = pardict\nargs.fnspecs = auxdict\nargs.inputs = inputs\nargs.tdata = [0, len(ab)/44100]\nargs.varspecs = vardict",
"_____no_output_____"
],
[
"ds = pdt.Generator.Vode_ODEsystem(args)",
"_____no_output_____"
],
[
"traj = ds.compute('demo')",
"_____no_output_____"
],
[
"pts = traj.sample(dt=1/(44100))",
"_____no_output_____"
],
[
"plt.plot(pts['t'], pts['x'])",
"_____no_output_____"
],
[
"x = ds.variables['x']",
"_____no_output_____"
],
[
"y_0 = pdt.Var('-a*Pow(g, 2) - b * Pow(g, 2) * x - Pow(g, 2) * Pow(x, 3) - g * Pow(x, 2) * y + Pow(g, 2) * x * x'\n '- g * x * y', 'y_0')",
"_____no_output_____"
],
[
"Pi(2)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d04c5952e6ba247ce17560157bccf2396d020476 | 5,078 | ipynb | Jupyter Notebook | 04-kNN/05-Hyper-Parameters/05-Hyper-Parameters.ipynb | mtianyan/Mtianyan-Play-with-Machine-Learning-Algorithms | 445b5930564f85ba2bccc18ee51fa7f68ef34ddd | [
"Apache-2.0"
] | 7 | 2019-03-24T09:36:14.000Z | 2021-04-17T06:28:15.000Z | 04-kNN/05-Hyper-Parameters/05-Hyper-Parameters.ipynb | mtianyan/Play_with_Machine_Learning | 445b5930564f85ba2bccc18ee51fa7f68ef34ddd | [
"Apache-2.0"
] | null | null | null | 04-kNN/05-Hyper-Parameters/05-Hyper-Parameters.ipynb | mtianyan/Play_with_Machine_Learning | 445b5930564f85ba2bccc18ee51fa7f68ef34ddd | [
"Apache-2.0"
] | 4 | 2020-02-11T15:25:27.000Z | 2021-04-17T06:28:17.000Z | 21.516949 | 96 | 0.491926 | [
[
[
"## 05 超参数",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom sklearn import datasets",
"_____no_output_____"
],
[
"digits = datasets.load_digits()\nX = digits.data\ny = digits.target",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=666)",
"_____no_output_____"
],
[
"from sklearn.neighbors import KNeighborsClassifier\n\nknn_clf = KNeighborsClassifier(n_neighbors=3)\nknn_clf.fit(X_train, y_train)\nknn_clf.score(X_test, y_test)",
"_____no_output_____"
]
],
[
[
"### 寻找最好的k",
"_____no_output_____"
]
],
[
[
"best_score = 0.0\nbest_k = -1\nfor k in range(1, 11):\n knn_clf = KNeighborsClassifier(n_neighbors=k)\n knn_clf.fit(X_train, y_train)\n score = knn_clf.score(X_test, y_test)\n if score > best_score:\n best_k = k\n best_score = score\n \nprint(\"best_k =\", best_k)\nprint(\"best_score =\", best_score)",
"best_k = 4\nbest_score = 0.991666666667\n"
]
],
[
[
"### 考虑距离?不考虑距离?",
"_____no_output_____"
]
],
[
[
"best_score = 0.0\nbest_k = -1\nbest_method = \"\"\nfor method in [\"uniform\", \"distance\"]:\n for k in range(1, 11):\n knn_clf = KNeighborsClassifier(n_neighbors=k, weights=method)\n knn_clf.fit(X_train, y_train)\n score = knn_clf.score(X_test, y_test)\n if score > best_score:\n best_k = k\n best_score = score\n best_method = method\n \nprint(\"best_method =\", best_method)\nprint(\"best_k =\", best_k)\nprint(\"best_score =\", best_score)",
"best_method = uniform\nbest_k = 4\nbest_score = 0.991666666667\n"
],
[
"sk_knn_clf = KNeighborsClassifier(n_neighbors=4, weights=\"distance\", p=1)\nsk_knn_clf.fit(X_train, y_train)\nsk_knn_clf.score(X_test, y_test)",
"_____no_output_____"
]
],
[
[
"### 搜索明可夫斯基距离相应的p",
"_____no_output_____"
]
],
[
[
"best_score = 0.0\nbest_k = -1\nbest_p = -1\n\nfor k in range(1, 11):\n for p in range(1, 6):\n knn_clf = KNeighborsClassifier(n_neighbors=k, weights=\"distance\", p=p)\n knn_clf.fit(X_train, y_train)\n score = knn_clf.score(X_test, y_test)\n if score > best_score:\n best_k = k\n best_p = p\n best_score = score\n \nprint(\"best_k =\", best_k)\nprint(\"best_p =\", best_p)\nprint(\"best_score =\", best_score)",
"best_k = 3\nbest_p = 2\nbest_score = 0.988888888889\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
d04c5b44e7b29196e281f61db5d848f6f856e314 | 23,970 | ipynb | Jupyter Notebook | BouyguesImmobilierKitBigData.ipynb | franblas/ImmobilierKitBigData | be6a5df18c678d974f1b262eff77c53fbbc64091 | [
"MIT"
] | null | null | null | BouyguesImmobilierKitBigData.ipynb | franblas/ImmobilierKitBigData | be6a5df18c678d974f1b262eff77c53fbbc64091 | [
"MIT"
] | null | null | null | BouyguesImmobilierKitBigData.ipynb | franblas/ImmobilierKitBigData | be6a5df18c678d974f1b262eff77c53fbbc64091 | [
"MIT"
] | null | null | null | 41.470588 | 114 | 0.373091 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
d04c7688510281403ef2f7953eb27c508995da86 | 161,316 | ipynb | Jupyter Notebook | bhms.ipynb | sfeeney/bhm_lecture | 5965ff7ee8f717ef4dd6d7aa02c4839a9933dfc2 | [
"MIT"
] | 4 | 2019-03-15T10:00:46.000Z | 2021-05-11T14:24:37.000Z | bhms.ipynb | sfeeney/bhm_lecture | 5965ff7ee8f717ef4dd6d7aa02c4839a9933dfc2 | [
"MIT"
] | null | null | null | bhms.ipynb | sfeeney/bhm_lecture | 5965ff7ee8f717ef4dd6d7aa02c4839a9933dfc2 | [
"MIT"
] | 2 | 2019-03-14T23:22:44.000Z | 2019-03-16T21:39:38.000Z | 214.515957 | 42,708 | 0.891052 | [
[
[
"# Bayesian Hierarchical Modeling\n\nThis jupyter notebook accompanies the Bayesian Hierarchical Modeling lecture(s) delivered by Stephen Feeney as part of David Hogg's [Computational Data Analysis class](http://dwh.gg/FlatironCDA). As part of the lecture(s) you will be asked to complete a number of tasks, some of which will involve direct coding into the notebook; these sections are marked by task. This notebook requires numpy, matplotlib, scipy, [corner](https://github.com/sfeeney/bhm_lecture.git), [pystan](https://pystan.readthedocs.io/en/latest/getting_started.html) and pickle to run (the last two are required solely for the final task).\n\nThe model we're going to be inferring is below.\n\n<img src=\"bhm_plot.png\" alt=\"drawing\" width=\"500\"/>\n\nWe start with imports...",
"_____no_output_____"
]
],
[
[
"from __future__ import print_function\n\n# make sure everything we need is installed if running on Google Colab\ndef is_colab():\n try:\n cfg = get_ipython().config\n if cfg['IPKernelApp']['kernel_class'] == 'google.colab._kernel.Kernel':\n return True\n else:\n return False\n except NameError:\n return False\nif is_colab():\n !pip install --quiet numpy matplotlib scipy corner pystan\n\nimport numpy as np\nimport numpy.random as npr\nimport matplotlib.pyplot as mp\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"... and immediately move to...\n\n## Task 2\n\nIn which I ask you to write a Python function to generate a simulated Cepheid sample using the period-luminosity relation $m_{ij} = \\mu_i + M^* + s\\,\\log p_{ij} + \\epsilon(\\sigma_{\\rm int})$. For simplicity, assume Gaussian priors on everything, Gaussian intrinsic scatter and Gaussian measurement uncertainties. Assume only the first host has a distance modulus estimate.",
"_____no_output_____"
]
],
[
[
"# setup\nn_gal = 2\nn_star = 200\nn_samples = 50000\n\n# PL relation parameters\nabs_bar = -26.0 # mean of standard absolute magnitude prior\nabs_sig = 4.0 # std dev of standard absolute magnitude prior\ns_bar = -1.0 # mean of slope prior\ns_sig = 1.0 # std dev of slope prior\nmu_bar = 30.0 # mean of distance modulus prior\nmu_sig = 5.0 # std dev of distance modulus prior\nm_sig_int = 0.05 # intrinsic scatter, assumed known\n\n# uncertainties\nmu_hat_sig = 0.01 # distance modulus measurement uncertainty\nm_hat_sig = 0.02 # apparent magnitude measurement uncertainty\n\ndef simulate(n_gal, n_star, abs_bar, abs_sig, s_bar, s_sig, mu_bar, mu_sig, mu_hat_sig, m_sig_int, m_hat_sig):\n \n # draw CPL parameters from Gaussian prior with means abs_bar and s_bar and standard deviations\n # abs_sig and s_sig\n #abs_true = abs_bar\n #s_true = s_bar\n abs_true = abs_bar + npr.randn() * abs_sig\n s_true = s_bar + npr.randn() * s_sig\n \n # draw n_gal distance moduli from Gaussian prior with mean mu_bar and standard deviation mu_sig\n # i've chosen to sort here so the closest galaxy is the one with the measured distance modulus\n mu_true = np.sort(mu_bar + npr.randn(n_gal) * mu_sig)\n \n # measure ONLY ONE galaxy's distance modulus noisily. the noise here is assumed Gaussian with\n # zero mean and standard deviation mu_hat_sig\n mu_hat = mu_true[0] + npr.randn() * mu_hat_sig\n \n # draw log periods. these are assumed to be perfectly observed in this model, so they \n # are simply a set of pre-specified numbers. i have chosen to generate new values with \n # each simulation, drawn such that log-periods are uniformly drawn in the range 1-2 (i.e., \n # 10 to 100 days). you can have these for free!\n lp_true = 1.0 + npr.rand(n_gal, n_star)\n \n # draw true apparent magnitudes. these are distributed around the Cepheid period-luminosity \n # relation with Gaussian intrinsic scatter (mean 0, standard deviation m_sig_int)\n m_true = np.zeros((n_gal, n_star))\n for i in range(n_gal):\n m_true[i, :] = mu_true[i] + abs_true + s_true * lp_true[i, :] + npr.randn(n_star) * m_sig_int\n \n # measure the apparent magnitudes noisily, all with the same measurement uncertainty m_hat_sig\n m_hat = m_true + npr.randn(n_gal, n_star) * m_hat_sig\n \n # return!\n return (abs_true, s_true, mu_true, lp_true, m_true, mu_hat, m_hat)",
"_____no_output_____"
]
],
[
[
"Let's check that the simulation generates something sane. A simple test that the magnitude measurements errors are correctly generated.",
"_____no_output_____"
]
],
[
[
"# simulate\nabs_true, s_true, mu_true, lp_true, m_true, mu_hat, m_hat = \\\n simulate(n_gal, n_star, abs_bar, abs_sig, s_bar, s_sig, mu_bar, mu_sig, mu_hat_sig, m_sig_int, m_hat_sig)\n\n# plot difference between true and observed apparent magnitudes. this should be the \n# noise, which is Gaussian distributed with mean zero and std dev m_hat_sig\nouts = mp.hist((m_true - m_hat).flatten())\ndm_grid = np.linspace(np.min(outs[1]), np.max(outs[1]))\nmp.plot(dm_grid, np.exp(-0.5 * (dm_grid/m_hat_sig) ** 2) * np.max(outs[0]))\nmp.xlabel(r'$m_{ij} - \\hat{m}_{ij}$')\nmp.ylabel(r'$N \\left(m_{ij} - \\hat{m}_{ij}\\right)$')",
"_____no_output_____"
]
],
[
[
"And another test that the intrinsic scatter is added as expected.",
"_____no_output_____"
]
],
[
[
"# plot difference between true apparent magnitudes and expected apparent \n# magnitude given a perfect (i.e., intrinsic-scatter-free) period-luminosity \n# relation. this should be the intrinsic scatter, which is Gaussian-\n# distributed with mean zero and std dev m_sig_int\neps = np.zeros((n_gal, n_star))\nfor i in range(n_gal):\n eps[i, :] = mu_true[i] + abs_true + s_true * lp_true[i, :] - m_true[i, :]\nouts = mp.hist(eps.flatten())\ndm_grid = np.linspace(np.min(outs[1]), np.max(outs[1]))\nmp.plot(dm_grid, np.exp(-0.5 * (dm_grid/m_sig_int) ** 2) * np.max(outs[0]))\nmp.xlabel(r'$m_{ij} - \\hat{m}_{ij}$')\nmp.ylabel(r'$N \\left(m_{ij} - \\hat{m}_{ij}\\right)$')",
"_____no_output_____"
]
],
[
[
"## Generalized Least Squares Demo\n\nCoding up the [GLS estimator](https://en.wikipedia.org/wiki/Generalized_least_squares) is a little involved, so I've done it for you below. Note that, rather unhelpfully, I've done so in a different order than in the notes. When I get a chance I will re-write. For now, you can simply evaluate the cells and bask in the glory of the fastest inference you will ever do!",
"_____no_output_____"
]
],
[
[
"def gls_fit(n_gal, n_star, mu_hat, mu_hat_sig, m_hat, m_sig_int, m_hat_sig, \\\n lp_true, priors=None):\n\n # setup\n # n_obs is one anchor constraint and one magnitude per Cepheid.\n # n_par is one mu per Cepheid host and 2 CPL params. if priors \n # are used, we add on n_gal + 2 observations: one prior constraint \n # on each host distance modulus and CPL parameter\n n_obs = n_gal * n_star + 1\n n_par = n_gal + 2\n if priors is not None:\n n_obs += n_gal + 2\n data = np.zeros(n_obs)\n design = np.zeros((n_obs, n_par))\n cov_inv = np.zeros((n_obs, n_obs))\n \n # anchor\n data[0] = mu_hat\n design[0, 0] = 1.0\n cov_inv[0, 0] = 1.0 / mu_hat_sig ** 2\n\n # Cepheids\n k = 1\n for i in range(0, n_gal):\n for j in range(0, n_star):\n\n data[k] = m_hat[i, j]\n design[k, i] = 1.0\n design[k, n_gal] = 1.0\n design[k, n_gal + 1] = lp_true[i, j]\n cov_inv[k, k] = 1.0 / (m_hat_sig ** 2 + m_sig_int ** 2)\n k += 1\n \n # and, finally, priors if desired\n if priors is not None:\n abs_bar, abs_sig, s_bar, s_sig, mu_bar, mu_sig = priors\n for i in range(n_gal):\n data[k] = mu_bar\n design[k, i] = 1.0\n cov_inv[k, k] = 1.0 / mu_sig ** 2\n k += 1\n data[k] = abs_bar\n design[k, n_gal] = 1.0\n cov_inv[k, k] = 1.0 / abs_sig ** 2\n k += 1\n data[k] = s_bar\n design[k, n_gal + 1] = 1.0\n cov_inv[k, k] = 1.0 / s_sig ** 2\n k += 1\n \n # fit and return\n destci = np.dot(design.transpose(), cov_inv)\n pars_cov = np.linalg.inv(np.dot(destci, design))\n pars = np.dot(np.dot(pars_cov, destci), data)\n res = data - np.dot(design, pars)\n dof = n_obs - n_par\n chisq_dof = np.dot(res.transpose(), np.dot(cov_inv, res))\n return pars, pars_cov, chisq_dof",
"_____no_output_____"
],
[
"gls_pars, gls_pars_cov, gls_chisq = gls_fit(n_gal, n_star, mu_hat, mu_hat_sig, m_hat, \\\n m_sig_int, m_hat_sig, lp_true, \\\n priors=[abs_bar, abs_sig, s_bar, s_sig, mu_bar, mu_sig])",
"_____no_output_____"
]
],
[
[
"In order to plot the outputs of the GLS fit we could draw a large number of samples from the resulting multivariate Gaussian posterior and pass them to something like [`corner`](https://corner.readthedocs.io/en/latest/); however, as we have analytic results we might as well use those directly. I've coded up something totally hacky here in order to do so. Information on how to draw confidence ellipses can be found in [Dan Coe's note](https://arxiv.org/pdf/0906.4123.pdf).",
"_____no_output_____"
]
],
[
[
"# this is a hacky function designed to transform the analytic GLS outputs\n# into a corner.py style triangle plot, containing 1D and 2D marginalized\n# posteriors\nimport scipy.stats as sps\nimport matplotlib.patches as mpp\ndef schmorner(par_mean, par_cov, par_true, par_label):\n \n # setup\n par_std = np.sqrt(np.diag(par_cov))\n x_min = par_mean[0] - 3.5 * par_std[0]\n x_max = par_mean[0] + 3.5 * par_std[0]\n y_min = par_mean[1] - 3.5 * par_std[1]\n y_max = par_mean[1] + 3.5 * par_std[1]\n fig, axes = mp.subplots(2, 2)\n \n # 1D marge\n x = np.linspace(x_min, x_max, 100)\n axes[0, 0].plot(x, sps.norm.pdf(x, par_mean[0], par_std[0]), 'k')\n axes[0, 0].axvline(par_true[0])\n axes[1, 0].axvline(par_true[0])\n axes[0, 0].set_xticklabels([])\n axes[0, 0].set_yticklabels([])\n axes[0, 0].set_xlim(x_min, x_max)\n axes[0, 0].set_title(par_label[0])\n axes[0, 0].set_title(par_label[0] + r'$=' + '{:6.2f}'.format(par_mean[0]) + \\\n r'\\pm' + '{:4.2f}'.format(par_std[0]) + r'$')\n y = np.linspace(y_min, y_max, 100)\n axes[1, 1].plot(y, sps.norm.pdf(y, par_mean[1], par_std[1]), 'k')\n axes[1, 0].axhline(par_true[1])\n axes[1, 1].axvline(par_true[1])\n axes[1, 1].tick_params(labelleft=False)\n axes[1, 1].set_xlim(y_min, y_max)\n for tick in axes[1, 1].get_xticklabels():\n tick.set_rotation(45)\n axes[1, 1].set_title(par_label[1] + r'$=' + '{:5.2f}'.format(par_mean[1]) + \\\n r'\\pm' + '{:4.2f}'.format(par_std[1]) + r'$')\n\n # 2D marge\n vals, vecs = np.linalg.eig(par_cov)\n theta = np.degrees(np.arctan2(*vecs[::-1, 0]))\n w, h = 2 * np.sqrt(vals)\n ell = mpp.Ellipse(xy=par_mean, width=w, height=h,\n angle=theta, color='k')\n ell.set_facecolor(\"none\")\n axes[1, 0].add_artist(ell)\n ell = mpp.Ellipse(xy=par_mean, width=2*w, height=2*h,\n angle=theta, color='k')\n ell.set_facecolor(\"none\")\n axes[1, 0].add_artist(ell)\n axes[1, 0].set_xlim(x_min, x_max)\n axes[1, 0].set_ylim(y_min, y_max)\n for tick in axes[1, 0].get_xticklabels():\n tick.set_rotation(45)\n for tick in axes[1, 0].get_yticklabels():\n tick.set_rotation(45)\n axes[1, 0].set_xlabel(par_label[0])\n axes[1, 0].set_ylabel(par_label[1])\n fig.delaxes(axes[0, 1])\n fig.subplots_adjust(hspace=0, wspace=0)\n \ntest = schmorner(gls_pars[n_gal:], gls_pars_cov[n_gal:, n_gal:], \\\n [abs_true, s_true], [r'$M$', r'$s$'])\n#\n#lazy = npr.multivariate_normal(gls_pars[n_gal:], gls_pars_cov[n_gal:, n_gal:], n_samples)\n#fig = corner.corner(samples.T, labels=[r\"$M$\", r\"$s$\"],\n# show_titles=True, truths=[abs_bar, s_bar])",
"_____no_output_____"
]
],
[
[
"## Task 3B\n\nBelow I've written the majority of a Gibbs sampler to infer the hyper-parameters of the Cepheid PL relation from our simulated sample. One component is missing: drawing from the conditional distribution of the standard absolute magnitude, $M^*$. Please fill it in, using the results of whiteboard/paper Task 3A. ",
"_____no_output_____"
]
],
[
[
"def gibbs_sample(n_samples, n_gal, n_star, abs_bar, abs_sig, \\\n s_bar, s_sig, mu_bar, mu_sig, mu_hat_sig, \\\n m_sig_int, m_hat_sig, mu_hat, lp_true, m_hat):\n \n # storage\n abs_samples = np.zeros(n_samples)\n s_samples = np.zeros(n_samples)\n mu_samples = np.zeros((n_gal, n_samples))\n m_samples = np.zeros((n_gal, n_star, n_samples))\n \n # initialize sampler\n abs_samples[0] = abs_bar + npr.randn() * abs_sig\n s_samples[0] = s_bar + npr.randn() * s_sig\n mu_samples[:, 0] = mu_bar + npr.randn(n_gal) * mu_bar\n for i in range(n_gal):\n m_samples[i, :, 0] = mu_samples[i, 0] + abs_samples[0] + s_samples[0] * lp_true[i, :]\n \n # sample!\n for k in range(1, n_samples):\n \n # sample abs mag\n abs_sig_pl = m_sig_int / np.sqrt(n_gal * n_star)\n abs_bar_pl = 0.0\n for j in range(n_gal):\n abs_bar_pl += np.sum(m_samples[j, :, k - 1] - mu_samples[j, k - 1] - s_samples[k - 1] * lp_true[j, :])\n abs_bar_pl /= (n_gal * n_star)\n abs_std = np.sqrt((abs_sig * abs_sig_pl) ** 2 / (abs_sig ** 2 + abs_sig_pl ** 2))\n abs_mean = (abs_sig ** 2 * abs_bar_pl + abs_sig_pl ** 2 * abs_bar) / \\\n (abs_sig ** 2 + abs_sig_pl ** 2)\n abs_samples[k] = abs_mean + npr.randn() * abs_std\n \n # sample slope\n s_sig_pl = m_sig_int / np.sqrt(np.sum(lp_true ** 2))\n s_bar_pl = 0.0\n for j in range(n_gal):\n s_bar_pl += np.sum((m_samples[j, :, k - 1] - mu_samples[j, k - 1] - abs_samples[k]) * lp_true[j, :])\n s_bar_pl /= np.sum(lp_true ** 2)\n s_std = np.sqrt((s_sig * s_sig_pl) ** 2 / (s_sig ** 2 + s_sig_pl ** 2))\n s_mean = (s_sig ** 2 * s_bar_pl + s_sig_pl ** 2 * s_bar) / \\\n (s_sig ** 2 + s_sig_pl ** 2)\n s_samples[k] = s_mean + npr.randn() * s_std\n \n # sample apparent magnitudes\n for j in range(n_gal):\n m_mean_pl = mu_samples[j, k - 1] + abs_samples[k] + s_samples[k] * lp_true[j, :]\n m_std = np.sqrt(m_sig_int ** 2 * m_hat_sig ** 2 / (m_sig_int ** 2 + m_hat_sig ** 2))\n m_mean = (m_sig_int ** 2 * m_hat[j, :] + m_hat_sig ** 2 * m_mean_pl) / (m_sig_int ** 2 + m_hat_sig ** 2)\n m_samples[j, :, k] = m_mean + npr.randn(n_star) * m_std\n \n # sample distance moduli\n mu_sig_pl = m_sig_int / np.sqrt(n_star)\n mu_bar_pl = np.mean(m_samples[0, :, k] - abs_samples[k] - s_samples[k] * lp_true[0, :])\n mu_var = 1.0 / (1.0 / mu_sig ** 2 + 1.0 / mu_hat_sig ** 2 + 1.0 / mu_sig_pl ** 2)\n mu_mean = (mu_bar / mu_sig ** 2 + mu_hat / mu_hat_sig ** 2 + mu_bar_pl / mu_sig_pl ** 2) * mu_var\n mu_samples[0, k] = mu_mean + npr.randn() * np.sqrt(mu_var)\n for j in range(1, n_gal):\n mu_sig_pl = m_sig_int / np.sqrt(n_star)\n mu_bar_pl = np.mean(m_samples[j, :, k] - abs_samples[k] - s_samples[k] * lp_true[j, :])\n mu_std = (mu_sig * mu_sig_pl) ** 2 / (mu_sig ** 2 + mu_sig_pl ** 2)\n mu_mean = (mu_sig ** 2 * mu_bar_pl + mu_sig_pl ** 2 * mu_bar) / \\\n (mu_sig ** 2 + mu_sig_pl ** 2)\n mu_samples[j, k] = mu_mean + npr.randn() * mu_std\n \n return (abs_samples, s_samples, mu_samples, m_samples)",
"_____no_output_____"
]
],
[
[
"Now let's sample, setting aside the first half of the samples as warmup.",
"_____no_output_____"
]
],
[
[
"all_samples = gibbs_sample(n_samples, n_gal, n_star, abs_bar, abs_sig, \\\n s_bar, s_sig, mu_bar, mu_sig, mu_hat_sig, \\\n m_sig_int, m_hat_sig, mu_hat, lp_true, m_hat)\nn_warmup = int(n_samples / 2)\ng_samples = [samples[n_warmup:] for samples in all_samples]",
"_____no_output_____"
]
],
[
[
"Let's make sure that the absolute magnitude is being inferred as expected. First, generate a trace plot of the absolute magnitude samples (the first entry in `g_samples`), overlaying the ground truth. Then print out the mean and standard deviation of the marginalized absolute magnitude posterior. Recall that marginalizing is as simple as throwing away the samples of all other parameters.",
"_____no_output_____"
]
],
[
[
"mp.plot(g_samples[0])\nmp.axhline(abs_true)\nmp.xlabel('sample')\nmp.ylabel(r'$M^*$')\nprint('Truth {:6.2f}; inferred {:6.2f} +/- {:4.2f}'.format(abs_true, np.mean(g_samples[0]), np.std(g_samples[0])))",
"Truth -30.95; inferred -30.97 +/- 0.02\n"
]
],
[
[
"Now let's generate some marginalized parameter posteriors (by simply discarding all samples of the latent parameters) using DFM's [`corner`](https://corner.readthedocs.io/en/latest/) package. Note the near identical nature of this plot to the `schmorner` plot we generated above.",
"_____no_output_____"
]
],
[
[
"import corner\nsamples = np.stack((g_samples[0], g_samples[1]))\nfig = corner.corner(samples.T, labels=[r\"$M^*$\", r\"$s$\"],\n show_titles=True, truths=[abs_true, s_true])",
"_____no_output_____"
]
],
[
[
"## Task 4\n\nThe final task is to write a [Stan model](https://pystan.readthedocs.io/en/latest/getting_started.html) to infer the parameters of the period-luminosity relation. I've coded up the other two blocks required (`data` and `parameters`), so all that is required is for you to write the joint posterior (factorized into its individual components) in Stan's sampling-statement-based syntax. Essentially all you need are Gaussian sampling statements (`abs_true ~ normal(abs_bar, abs_sig);`) and for loops (`for(i in 1: n_gal){...}`).\n\nWhen you evaluate this cell, Stan will translate your model into `c++` code and compile it. We will then pickle the compiled model so you can re-use it rapidly without recompiling. To do so, please set `recompile = False` in the notebook.",
"_____no_output_____"
]
],
[
[
"import sys\nimport pystan as ps\nimport pickle\n\nstan_code = \"\"\"\ndata {\n int<lower=0> n_gal;\n int<lower=0> n_star;\n real mu_hat;\n real mu_hat_sig;\n real m_hat[n_gal, n_star];\n real m_hat_sig;\n real m_sig_int;\n real lp_true[n_gal, n_star];\n real abs_bar;\n real abs_sig;\n real s_bar;\n real s_sig;\n real mu_bar;\n real mu_sig;\n}\nparameters {\n real mu_true[n_gal];\n real m_true[n_gal, n_star];\n real abs_true;\n real s_true;\n}\nmodel {\n // priors\n abs_true ~ normal(abs_bar, abs_sig);\n s_true ~ normal(s_bar, s_sig);\n mu_true ~ normal(mu_bar, mu_sig);\n \n // whatevers\n for(i in 1: n_gal){\n for(j in 1: n_star){\n m_true[i, j] ~ normal(mu_true[i] + abs_true + s_true * lp_true[i, j], m_sig_int);\n }\n }\n \n // likelihoods\n mu_hat ~ normal(mu_true[1], mu_hat_sig);\n for(i in 1: n_gal){\n for(j in 1: n_star){\n m_hat[i, j] ~ normal(m_true[i, j], m_hat_sig);\n }\n }\n}\n\"\"\"\nn_samples_stan = 5000\nrecompile = True\npkl_fname = 'bhms_stan_model_v{:d}p{:d}p{:d}.pkl'.format(sys.version_info[0], \\\n sys.version_info[1], \\\n sys.version_info[2])\nif recompile:\n stan_model = ps.StanModel(model_code=stan_code)\n with open(pkl_fname, 'wb') as f:\n pickle.dump(stan_model, f)\nelse:\n try:\n with open(pkl_fname, 'rb') as f:\n stan_model = pickle.load(f)\n except EnvironmentError:\n print('ERROR: pickled Stan model (' + pkl_fname + ') not found. ' + \\\n 'Please set recompile = True')",
"INFO:pystan:COMPILING THE C++ CODE FOR MODEL anon_model_ab41b39e55c2f57c74acf30e86ea4ea5 NOW.\n"
]
],
[
[
"Now let's sample...",
"_____no_output_____"
]
],
[
[
"stan_data = {'n_gal': n_gal, 'n_star': n_star, 'mu_hat': mu_hat, 'mu_hat_sig': mu_hat_sig, \\\n 'm_hat': m_hat, 'm_hat_sig': m_hat_sig, 'm_sig_int': m_sig_int, 'lp_true': lp_true, \\\n 'abs_bar': abs_bar, 'abs_sig': abs_sig, 's_bar': s_bar, 's_sig': s_sig, \\\n 'mu_bar': mu_bar, 'mu_sig': mu_sig}\nfit = stan_model.sampling(data=stan_data, iter=n_samples_stan, chains=4)",
"_____no_output_____"
]
],
[
[
"... print out Stan's posterior summary (note this is for _all_ parameters)...",
"_____no_output_____"
]
],
[
[
"samples = fit.extract(permuted=True)\nprint(fit)",
"_____no_output_____"
]
],
[
[
"... and plot the marginalized posterior of the PL parameters, as with the Gibbs sampler.",
"_____no_output_____"
]
],
[
[
"c_samples = np.stack((samples['abs_true'], samples['s_true']))\nfig = corner.corner(c_samples.T, labels=[r\"$M^*$\", r\"$s$\"],\n show_titles=True, truths=[abs_true, s_true])",
"_____no_output_____"
]
],
[
[
"Our work here is done!",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d04c8ff0abc3cf119063e338b134382bb4c9f04c | 6,757 | ipynb | Jupyter Notebook | 2/1-3/linked_lists/Detecting Loops.ipynb | ZacksAmber/Udacity-Data-Structure-Algorithms | b5e008ab111b6bc9765acd58d7e1771852eb1d30 | [
"MIT"
] | 1 | 2021-09-27T10:18:14.000Z | 2021-09-27T10:18:14.000Z | 2/1-3/linked_lists/Detecting Loops.ipynb | ZacksAmber/Udacity-Data-Structure-Algorithms | b5e008ab111b6bc9765acd58d7e1771852eb1d30 | [
"MIT"
] | null | null | null | 2/1-3/linked_lists/Detecting Loops.ipynb | ZacksAmber/Udacity-Data-Structure-Algorithms | b5e008ab111b6bc9765acd58d7e1771852eb1d30 | [
"MIT"
] | null | null | null | 27.921488 | 423 | 0.526565 | [
[
[
"# Detecting Loops in Linked Lists\n\nIn this notebook, you'll implement a function that detects if a loop exists in a linked list. The way we'll do this is by having two pointers, called \"runners\", moving through the list at different rates. Typically we have a \"slow\" runner which moves at one node per step and a \"fast\" runner that moves at two nodes per step.\n\nIf a loop exists in the list, the fast runner will eventually move behind the slow runner as it moves to the beginning of the loop. Eventually it will catch up to the slow runner and both runners will be pointing to the same node at the same time. If this happens then you know there is a loop in the linked list. Below is an example where we have a slow runner (the green arrow) and a fast runner (the red arrow).\n\n<center><img src='assets/two_runners_circular.png' alt=\"Visual walk through of the steps described above to determine if a loop exists in a linked list.\" width=300px></center>",
"_____no_output_____"
]
],
[
[
"class Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n \nclass LinkedList:\n def __init__(self, init_list=None):\n self.head = None\n if init_list:\n for value in init_list:\n self.append(value)\n \n def append(self, value):\n if self.head is None:\n self.head = Node(value)\n return\n \n # Move to the tail (the last node)\n node = self.head\n while node.next:\n node = node.next\n \n node.next = Node(value)\n return\n \n def __iter__(self):\n node = self.head\n while node:\n yield node.value\n node = node.next\n \n def __repr__(self):\n return str([i for i in self])",
"_____no_output_____"
],
[
"list_with_loop = LinkedList([2, -1, 3, 0, 5])\n\n# Creating a loop where the last node points back to the second node\nloop_start = list_with_loop.head.next\n\nnode = list_with_loop.head\nwhile node.next: \n node = node.next \nnode.next = loop_start",
"_____no_output_____"
],
[
"# You will encouter the unlimited loop\n# Click on stop\n# Then right click on `clear outpit`\nfor i in list_with_loop:\n print(i)",
"_____no_output_____"
]
],
[
[
"### Write the function definition here\n**Exercise:** Given a linked list, implement a function `iscircular` that returns `True` if a loop exists in the list and `False` otherwise.",
"_____no_output_____"
]
],
[
[
"def iscircular(linked_list):\n \"\"\"\n Determine whether the Linked List is circular or not\n\n Args:\n linked_list(obj): Linked List to be checked\n Returns:\n bool: Return True if the linked list is circular, return False otherwise\n \"\"\"\n \n # TODO: Write function to check if linked list is circular\n if linked_list is None:\n return False\n \n slow, fast = linked_list.head, linked_list.head\n \n while fast and fast.next:\n slow, fast = slow.next, fast.next.next\n if slow == fast:\n return True\n return False",
"_____no_output_____"
]
],
[
[
"### Let's test your function",
"_____no_output_____"
]
],
[
[
"iscircular(list_with_loop)",
"_____no_output_____"
],
[
"# Test Cases\n\n# Create another circular linked list\nsmall_loop = LinkedList([0])\nsmall_loop.head.next = small_loop.head\n\nprint (\"Pass\" if iscircular(list_with_loop) else \"Fail\") # Pass\nprint (\"Pass\" if iscircular(LinkedList([-4, 7, 2, 5, -1])) else \"Fail\") # Fail\nprint (\"Pass\" if iscircular(LinkedList([1])) else \"Fail\") # Fail\nprint (\"Pass\" if iscircular(small_loop) else \"Fail\") # Pass\nprint (\"Pass\" if iscircular(LinkedList([])) else \"Fail\") # Fail\n",
"Pass\nFail\nFail\nPass\nFail\n"
]
],
[
[
"<span class=\"graffiti-highlight graffiti-id_tuhz4y1-id_fy0906u\"><i></i><button>Show Solution</button></span>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
d04c95e8e999826790914744e92cbbd21e3e2c24 | 1,972 | ipynb | Jupyter Notebook | nbs/utils/utils.normalize.ipynb | sparsh-ai/recohut | 4121f665761ffe38c9b6337eaa9293b26bee2376 | [
"Apache-2.0"
] | null | null | null | nbs/utils/utils.normalize.ipynb | sparsh-ai/recohut | 4121f665761ffe38c9b6337eaa9293b26bee2376 | [
"Apache-2.0"
] | 1 | 2022-01-12T05:40:57.000Z | 2022-01-12T05:40:57.000Z | nbs/utils/utils.normalize.ipynb | RecoHut-Projects/recohut | 4121f665761ffe38c9b6337eaa9293b26bee2376 | [
"Apache-2.0"
] | null | null | null | 20.757895 | 77 | 0.476166 | [
[
[
"# default_exp utils.normalize",
"_____no_output_____"
]
],
[
[
"# Normalize\n> Data normalization methods.",
"_____no_output_____"
]
],
[
[
"#hide\nfrom nbdev.showdoc import *",
"_____no_output_____"
],
[
"#export\ndef simple_normalize(data, method='minmax', target_column='RATING'):\n\n zscore = lambda x: (x - x.mean()) / x.std()\n minmax = lambda x: (x - x.min()) / (x.max() - x.min())\n\n if method=='minmax':\n norm = data.groupby('USERID')[target_column].transform(minmax)\n elif method=='zscore':\n norm = data.groupby('USERID')[target_column].transform(zscore)\n \n data.loc[:,target_column] = norm\n \n return data",
"_____no_output_____"
],
[
"#hide\n!pip install -q watermark\n%reload_ext watermark\n%watermark -a \"Sparsh A.\" -m -iv -u -t -d",
"Author: Sparsh A.\n\nLast updated: 2021-12-18 08:35:26\n\nCompiler : GCC 7.5.0\nOS : Linux\nRelease : 5.4.104+\nMachine : x86_64\nProcessor : x86_64\nCPU cores : 2\nArchitecture: 64bit\n\nIPython: 5.5.0\n\n"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
d04ca0f19c84c6d688e349a0618b761862fc4cc8 | 58,248 | ipynb | Jupyter Notebook | Quiz/m4_multifactor_models/PCA_3D copy.ipynb | jcrangel/AI-for-Trading | c3b865e992f8eb8deda91e7641428eef1d343636 | [
"Apache-2.0"
] | null | null | null | Quiz/m4_multifactor_models/PCA_3D copy.ipynb | jcrangel/AI-for-Trading | c3b865e992f8eb8deda91e7641428eef1d343636 | [
"Apache-2.0"
] | null | null | null | Quiz/m4_multifactor_models/PCA_3D copy.ipynb | jcrangel/AI-for-Trading | c3b865e992f8eb8deda91e7641428eef1d343636 | [
"Apache-2.0"
] | null | null | null | 93.646302 | 32,050 | 0.774104 | [
[
[
"import numpy as np\nfrom scipy.stats import norm\nimport matplotlib.pylab as plt\nimport pandas as pd\nfrom bokeh.layouts import row, widgetbox, layout, gridplot\nfrom bokeh.models import CustomJS, Slider\nfrom bokeh.plotting import figure, output_file, show, ColumnDataSource\nfrom bokeh.models.glyphs import MultiLine\nfrom bokeh.io import output_notebook\nfrom bokeh.models.widgets import Div\n%matplotlib inline\noutput_notebook()",
"_____no_output_____"
],
[
"num_data = 10\nX = norm.rvs(size=(num_data,3), random_state=42)\n#X = np.dot(Y, np.linalg.cholesky([[1, 0.6], [0.6, 0.6]]))\nm = X.mean(axis=0)\nX = X - m",
"_____no_output_____"
],
[
"X",
"_____no_output_____"
],
[
"from mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\nax.scatter(X[:,0], X[:,1], X[:,2])\n\nax.set_xlabel('X Label')\nax.set_ylabel('Y Label')\nax.set_zlabel('Z Label')\n\nplt.show()",
"_____no_output_____"
],
[
"a, b = np.linalg.eig(np.cov(X.T));",
"_____no_output_____"
],
[
"a",
"_____no_output_____"
],
[
"b",
"_____no_output_____"
],
[
"from sklearn.decomposition import PCA\npca = PCA(n_components=3)\npca.fit(X)\nprint(pca.components_) \nprint(pca.explained_variance_) ",
"[[-0.57561026 -0.48303245 0.65981246]\n [-0.48111412 -0.45240071 -0.75090798]\n [ 0.66121255 -0.74967543 0.02801275]]\n[1.139166 0.78744362 0.34832021]\n"
],
[
"X_star = pca.transform(X)",
"_____no_output_____"
],
[
"X_star",
"_____no_output_____"
],
[
"# keep projections onto first two pcs\nF_2 = np.dot(pca.components_[0:2,:], X.T)",
"_____no_output_____"
],
[
"np.dot(F_2, F_2.T)",
"_____no_output_____"
],
[
"# keep projection onto first pc\nF_1 = np.dot(pca.components_[0,:], X.T)",
"_____no_output_____"
],
[
"F_1",
"_____no_output_____"
],
[
"X",
"_____no_output_____"
],
[
"XF = np.outer(pca.components_[0,:].T, F_1)\nXF",
"_____no_output_____"
],
[
"resid = X.T - XF",
"_____no_output_____"
],
[
"resid",
"_____no_output_____"
],
[
"np.dot(resid, resid.T)",
"_____no_output_____"
],
[
"from sklearn.decomposition import PCA\n\nclass RiskModelPCA():\n \n ANN_FACTOR = 252\n \n def __init__(self, num_factors):\n self._num_factors = num_factors\n self.num_stocks_ = None\n self.factor_betas_ = None\n self.factor_returns_ = None\n self.common_returns_ = None\n self.residuals_ = None\n self.factor_cov_matrix_ = None\n self.idio_var_matrix_ = None\n self.explained_variance_ratio_ = None\n\n def fit(self, returns):\n self.num_stocks_ = len(returns.columns)\n mod = PCA(n_components=self._num_factors, svd_solver='full')\n mod.fit(returns)\n \n self.factor_betas_ = pd.DataFrame(\n data=mod.components_.T,\n index=returns.columns\n )\n \n self.factor_returns_ = pd.DataFrame(\n data=mod.transform(returns),\n index=returns.index\n )\n \n self.explained_variance_ratio_ = mod.explained_variance_ratio_\n \n self.common_returns_ = pd.DataFrame(\n data=np.dot(self.factor_returns_, self.factor_betas_.T),\n index=returns.index\n )\n self.common_returns_.columns = returns.columns\n \n self.residuals_ = (returns - self.common_returns_)\n \n self.factor_cov_matrix_ = np.diag(\n self.factor_returns_.var(axis=0, ddof=1)*RiskModelPCA.ANN_FACTOR\n )\n \n self.idio_var_matrix_ = pd.DataFrame(\n data=np.diag(np.var(self.residuals_))*RiskModelPCA.ANN_FACTOR,\n index=returns.columns\n )\n \n self.idio_var_vector_ = pd.DataFrame(\n data=np.diag(self.idio_var_matrix_.values),\n index=returns.columns\n )\n \n self.idio_var_matrix_.columns = index=returns.columns\n\n def get_factor_exposures(self, weights):\n F = self.factor_betas_.loc[weights.index]\n return F.T.dot(weights)\n\n def predict(self, weights):\n \"\"\" Calculates expected portfolio risk as sqrt(h'XFX'h + h'Sh).\n This will fail if your portfolio has asset weights not in the risk model\"\"\"\n all_assets = pd.DataFrame(\n data=np.repeat(0, self.num_stocks_),\n index=self.factor_betas_.index)\n all_assets.loc[weights.index] = weights\n \n \n h = all_assets\n X = self.factor_betas_\n F = self.factor_cov_matrix_\n S = self.idio_var_matrix_\n \n return np.sqrt(h.T.dot(X).dot(F).dot(X.T).dot(h) + h.T.dot(S).dot(h))[0].values[0]\n\n",
"_____no_output_____"
],
[
"rm = RiskModelPCA(1)\nrm.fit(pd.DataFrame(X))",
"_____no_output_____"
],
[
"rm.idio_var_matrix_/252",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d04ca4208005b1f4d4dd16adc0edd8d0ea9f973d | 71,892 | ipynb | Jupyter Notebook | notebooks/sqa_predictions.ipynb | aniket371/tapas | f2ec648f5ea8112d1c5dd50040484f36b0719bb1 | [
"Apache-2.0"
] | null | null | null | notebooks/sqa_predictions.ipynb | aniket371/tapas | f2ec648f5ea8112d1c5dd50040484f36b0719bb1 | [
"Apache-2.0"
] | null | null | null | notebooks/sqa_predictions.ipynb | aniket371/tapas | f2ec648f5ea8112d1c5dd50040484f36b0719bb1 | [
"Apache-2.0"
] | null | null | null | 62.897638 | 1,552 | 0.514202 | [
[
[
"<a href=\"https://colab.research.google.com/github/google-research/tapas/blob/master/notebooks/sqa_predictions.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"##### Copyright 2020 The Google AI Language Team Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");",
"_____no_output_____"
]
],
[
[
"# Copyright 2019 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"Running a Tapas fine-tuned checkpoint\n---\nThis notebook shows how to load and make predictions with TAPAS model, which was introduced in the paper: [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349)",
"_____no_output_____"
],
[
"# Clone and install the repository\n",
"_____no_output_____"
],
[
"First, let's install the code.",
"_____no_output_____"
]
],
[
[
"! pip install tapas-table-parsing",
"Collecting tapas-table-parsing\n Downloading tapas_table_parsing-0.0.1.dev0-py3-none-any.whl (195 kB)\n\u001b[?25l\r\u001b[K |█▊ | 10 kB 22.3 MB/s eta 0:00:01\r\u001b[K |███▍ | 20 kB 28.7 MB/s eta 0:00:01\r\u001b[K |█████ | 30 kB 16.4 MB/s eta 0:00:01\r\u001b[K |██████▊ | 40 kB 11.4 MB/s eta 0:00:01\r\u001b[K |████████▍ | 51 kB 5.7 MB/s eta 0:00:01\r\u001b[K |██████████ | 61 kB 6.7 MB/s eta 0:00:01\r\u001b[K |███████████▊ | 71 kB 7.3 MB/s eta 0:00:01\r\u001b[K |█████████████▍ | 81 kB 5.6 MB/s eta 0:00:01\r\u001b[K |███████████████ | 92 kB 6.2 MB/s eta 0:00:01\r\u001b[K |████████████████▊ | 102 kB 6.8 MB/s eta 0:00:01\r\u001b[K |██████████████████▍ | 112 kB 6.8 MB/s eta 0:00:01\r\u001b[K |████████████████████ | 122 kB 6.8 MB/s eta 0:00:01\r\u001b[K |█████████████████████▉ | 133 kB 6.8 MB/s eta 0:00:01\r\u001b[K |███████████████████████▌ | 143 kB 6.8 MB/s eta 0:00:01\r\u001b[K |█████████████████████████▏ | 153 kB 6.8 MB/s eta 0:00:01\r\u001b[K |██████████████████████████▉ | 163 kB 6.8 MB/s eta 0:00:01\r\u001b[K |████████████████████████████▌ | 174 kB 6.8 MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▏ | 184 kB 6.8 MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▉| 194 kB 6.8 MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 195 kB 6.8 MB/s \n\u001b[?25hCollecting frozendict==1.2\n Downloading frozendict-1.2.tar.gz (2.6 kB)\nCollecting pandas~=1.0.0\n Downloading pandas-1.0.5-cp37-cp37m-manylinux1_x86_64.whl (10.1 MB)\n\u001b[K |████████████████████████████████| 10.1 MB 51.1 MB/s \n\u001b[?25hCollecting tensorflow-probability==0.10.1\n Downloading tensorflow_probability-0.10.1-py2.py3-none-any.whl (3.5 MB)\n\u001b[K |████████████████████████████████| 3.5 MB 43.8 MB/s \n\u001b[?25hCollecting nltk~=3.5\n Downloading nltk-3.7-py3-none-any.whl (1.5 MB)\n\u001b[K |████████████████████████████████| 1.5 MB 47.9 MB/s \n\u001b[?25hCollecting scikit-learn~=0.22.1\n Downloading scikit_learn-0.22.2.post1-cp37-cp37m-manylinux1_x86_64.whl (7.1 MB)\n\u001b[K |████████████████████████████████| 7.1 MB 36.9 MB/s \n\u001b[?25hCollecting kaggle<1.5.8\n Downloading kaggle-1.5.6.tar.gz (58 kB)\n\u001b[K |████████████████████████████████| 58 kB 5.4 MB/s \n\u001b[?25hCollecting tf-models-official~=2.2.0\n Downloading tf_models_official-2.2.2-py2.py3-none-any.whl (711 kB)\n\u001b[K |████████████████████████████████| 711 kB 53.0 MB/s \n\u001b[?25hCollecting tensorflow~=2.2.0\n Downloading tensorflow-2.2.3-cp37-cp37m-manylinux2010_x86_64.whl (516.4 MB)\n\u001b[K |████████████████████████████████| 516.4 MB 17 kB/s \n\u001b[?25hCollecting apache-beam[gcp]==2.20.0\n Downloading apache_beam-2.20.0-cp37-cp37m-manylinux1_x86_64.whl (3.5 MB)\n\u001b[K |████████████████████████████████| 3.5 MB 45.4 MB/s \n\u001b[?25hCollecting tf-slim~=1.1.0\n Downloading tf_slim-1.1.0-py2.py3-none-any.whl (352 kB)\n\u001b[K |████████████████████████████████| 352 kB 56.0 MB/s \n\u001b[?25hRequirement already satisfied: future<1.0.0,>=0.16.0 in /usr/local/lib/python3.7/dist-packages (from apache-beam[gcp]==2.20.0->tapas-table-parsing) (0.16.0)\nRequirement already satisfied: pytz>=2018.3 in /usr/local/lib/python3.7/dist-packages (from apache-beam[gcp]==2.20.0->tapas-table-parsing) (2018.9)\nRequirement already satisfied: numpy<2,>=1.14.3 in /usr/local/lib/python3.7/dist-packages (from apache-beam[gcp]==2.20.0->tapas-table-parsing) (1.21.5)\nCollecting fastavro<0.22,>=0.21.4\n Downloading fastavro-0.21.24-cp37-cp37m-manylinux1_x86_64.whl (1.2 MB)\n\u001b[K |████████████████████████████████| 1.2 MB 36.6 MB/s \n\u001b[?25hRequirement already satisfied: protobuf<4,>=3.5.0.post1 in /usr/local/lib/python3.7/dist-packages (from apache-beam[gcp]==2.20.0->tapas-table-parsing) (3.17.3)\nCollecting dill<0.3.2,>=0.3.1.1\n Downloading dill-0.3.1.1.tar.gz (151 kB)\n\u001b[K |████████████████████████████████| 151 kB 42.9 MB/s \n\u001b[?25hCollecting httplib2<=0.12.0,>=0.8\n Downloading httplib2-0.12.0.tar.gz (218 kB)\n\u001b[K |████████████████████████████████| 218 kB 50.0 MB/s \n\u001b[?25hCollecting oauth2client<4,>=2.0.1\n Downloading oauth2client-3.0.0.tar.gz (77 kB)\n\u001b[K |████████████████████████████████| 77 kB 5.8 MB/s \n\u001b[?25hCollecting mock<3.0.0,>=1.0.1\n Downloading mock-2.0.0-py2.py3-none-any.whl (56 kB)\n\u001b[K |████████████████████████████████| 56 kB 4.7 MB/s \n\u001b[?25hCollecting pymongo<4.0.0,>=3.8.0\n Downloading pymongo-3.12.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (508 kB)\n\u001b[K |████████████████████████████████| 508 kB 31.5 MB/s \n\u001b[?25hCollecting hdfs<3.0.0,>=2.1.0\n Downloading hdfs-2.7.0-py3-none-any.whl (34 kB)\nCollecting typing-extensions<3.8.0,>=3.7.0\n Downloading typing_extensions-3.7.4.3-py3-none-any.whl (22 kB)\nCollecting avro-python3!=1.9.2,<1.10.0,>=1.8.1\n Downloading avro-python3-1.9.2.1.tar.gz (37 kB)\nRequirement already satisfied: crcmod<2.0,>=1.7 in /usr/local/lib/python3.7/dist-packages (from apache-beam[gcp]==2.20.0->tapas-table-parsing) (1.7)\nRequirement already satisfied: python-dateutil<3,>=2.8.0 in /usr/local/lib/python3.7/dist-packages (from apache-beam[gcp]==2.20.0->tapas-table-parsing) (2.8.2)\nCollecting pyarrow<0.17.0,>=0.15.1\n Downloading pyarrow-0.16.0-cp37-cp37m-manylinux2014_x86_64.whl (63.1 MB)\n\u001b[K |████████████████████████████████| 63.1 MB 35 kB/s \n\u001b[?25hRequirement already satisfied: grpcio<2,>=1.12.1 in /usr/local/lib/python3.7/dist-packages (from apache-beam[gcp]==2.20.0->tapas-table-parsing) (1.44.0)\nRequirement already satisfied: pydot<2,>=1.2.0 in /usr/local/lib/python3.7/dist-packages (from apache-beam[gcp]==2.20.0->tapas-table-parsing) (1.3.0)\nCollecting google-cloud-dlp<=0.13.0,>=0.12.0\n Downloading google_cloud_dlp-0.13.0-py2.py3-none-any.whl (151 kB)\n\u001b[K |████████████████████████████████| 151 kB 51.1 MB/s \n\u001b[?25hRequirement already satisfied: google-cloud-core<2,>=0.28.1 in /usr/local/lib/python3.7/dist-packages (from apache-beam[gcp]==2.20.0->tapas-table-parsing) (1.0.3)\nCollecting google-cloud-bigtable<1.1.0,>=0.31.1\n Downloading google_cloud_bigtable-1.0.0-py2.py3-none-any.whl (232 kB)\n\u001b[K |████████████████████████████████| 232 kB 55.5 MB/s \n\u001b[?25hCollecting google-cloud-language<2,>=1.3.0\n Downloading google_cloud_language-1.3.0-py2.py3-none-any.whl (83 kB)\n\u001b[K |████████████████████████████████| 83 kB 1.7 MB/s \n\u001b[?25hCollecting google-cloud-vision<0.43.0,>=0.38.0\n Downloading google_cloud_vision-0.42.0-py2.py3-none-any.whl (435 kB)\n\u001b[K |████████████████████████████████| 435 kB 52.1 MB/s \n\u001b[?25hRequirement already satisfied: google-cloud-bigquery<=1.24.0,>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from apache-beam[gcp]==2.20.0->tapas-table-parsing) (1.21.0)\nCollecting grpcio-gcp<1,>=0.2.2\n Downloading grpcio_gcp-0.2.2-py2.py3-none-any.whl (9.4 kB)\nCollecting google-cloud-spanner<1.14.0,>=1.13.0\n Downloading google_cloud_spanner-1.13.0-py2.py3-none-any.whl (212 kB)\n\u001b[K |████████████████████████████████| 212 kB 68.0 MB/s \n\u001b[?25hCollecting google-cloud-datastore<1.8.0,>=1.7.1\n Downloading google_cloud_datastore-1.7.4-py2.py3-none-any.whl (82 kB)\n\u001b[K |████████████████████████████████| 82 kB 1.1 MB/s \n\u001b[?25hCollecting cachetools<4,>=3.1.0\n Downloading cachetools-3.1.1-py2.py3-none-any.whl (11 kB)\nCollecting google-cloud-videointelligence<1.14.0,>=1.8.0\n Downloading google_cloud_videointelligence-1.13.0-py2.py3-none-any.whl (177 kB)\n\u001b[K |████████████████████████████████| 177 kB 64.9 MB/s \n\u001b[?25hCollecting google-cloud-pubsub<1.1.0,>=0.39.0\n Downloading google_cloud_pubsub-1.0.2-py2.py3-none-any.whl (118 kB)\n\u001b[K |████████████████████████████████| 118 kB 68.4 MB/s \n\u001b[?25hCollecting google-apitools<0.5.29,>=0.5.28\n Downloading google-apitools-0.5.28.tar.gz (172 kB)\n\u001b[K |████████████████████████████████| 172 kB 65.3 MB/s \n\u001b[?25hRequirement already satisfied: decorator in /usr/local/lib/python3.7/dist-packages (from tensorflow-probability==0.10.1->tapas-table-parsing) (4.4.2)\nRequirement already satisfied: gast>=0.3.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow-probability==0.10.1->tapas-table-parsing) (0.5.3)\nRequirement already satisfied: cloudpickle==1.3 in /usr/local/lib/python3.7/dist-packages (from tensorflow-probability==0.10.1->tapas-table-parsing) (1.3.0)\nRequirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow-probability==0.10.1->tapas-table-parsing) (1.15.0)\nCollecting fasteners>=0.14\n Downloading fasteners-0.17.3-py3-none-any.whl (18 kB)\nRequirement already satisfied: google-resumable-media!=0.4.0,<0.5.0dev,>=0.3.1 in /usr/local/lib/python3.7/dist-packages (from google-cloud-bigquery<=1.24.0,>=1.6.0->apache-beam[gcp]==2.20.0->tapas-table-parsing) (0.4.1)\nRequirement already satisfied: google-api-core[grpc]<2.0.0dev,>=1.14.0 in /usr/local/lib/python3.7/dist-packages (from google-cloud-bigtable<1.1.0,>=0.31.1->apache-beam[gcp]==2.20.0->tapas-table-parsing) (1.26.3)\nCollecting grpc-google-iam-v1<0.13dev,>=0.12.3\n Downloading grpc-google-iam-v1-0.12.3.tar.gz (13 kB)\nRequirement already satisfied: google-auth<2.0dev,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from google-api-core[grpc]<2.0.0dev,>=1.14.0->google-cloud-bigtable<1.1.0,>=0.31.1->apache-beam[gcp]==2.20.0->tapas-table-parsing) (1.35.0)\nRequirement already satisfied: setuptools>=40.3.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core[grpc]<2.0.0dev,>=1.14.0->google-cloud-bigtable<1.1.0,>=0.31.1->apache-beam[gcp]==2.20.0->tapas-table-parsing) (57.4.0)\nRequirement already satisfied: packaging>=14.3 in /usr/local/lib/python3.7/dist-packages (from google-api-core[grpc]<2.0.0dev,>=1.14.0->google-cloud-bigtable<1.1.0,>=0.31.1->apache-beam[gcp]==2.20.0->tapas-table-parsing) (21.3)\nRequirement already satisfied: requests<3.0.0dev,>=2.18.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core[grpc]<2.0.0dev,>=1.14.0->google-cloud-bigtable<1.1.0,>=0.31.1->apache-beam[gcp]==2.20.0->tapas-table-parsing) (2.23.0)\nRequirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core[grpc]<2.0.0dev,>=1.14.0->google-cloud-bigtable<1.1.0,>=0.31.1->apache-beam[gcp]==2.20.0->tapas-table-parsing) (1.56.0)\nRequirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth<2.0dev,>=1.21.1->google-api-core[grpc]<2.0.0dev,>=1.14.0->google-cloud-bigtable<1.1.0,>=0.31.1->apache-beam[gcp]==2.20.0->tapas-table-parsing) (4.8)\nRequirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<2.0dev,>=1.21.1->google-api-core[grpc]<2.0.0dev,>=1.14.0->google-cloud-bigtable<1.1.0,>=0.31.1->apache-beam[gcp]==2.20.0->tapas-table-parsing) (0.2.8)\nRequirement already satisfied: docopt in /usr/local/lib/python3.7/dist-packages (from hdfs<3.0.0,>=2.1.0->apache-beam[gcp]==2.20.0->tapas-table-parsing) (0.6.2)\nRequirement already satisfied: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from kaggle<1.5.8->tapas-table-parsing) (1.24.3)\nRequirement already satisfied: certifi in /usr/local/lib/python3.7/dist-packages (from kaggle<1.5.8->tapas-table-parsing) (2021.10.8)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from kaggle<1.5.8->tapas-table-parsing) (4.63.0)\nRequirement already satisfied: python-slugify in /usr/local/lib/python3.7/dist-packages (from kaggle<1.5.8->tapas-table-parsing) (6.1.1)\nCollecting pbr>=0.11\n Downloading pbr-5.8.1-py2.py3-none-any.whl (113 kB)\n\u001b[K |████████████████████████████████| 113 kB 63.8 MB/s \n\u001b[?25hRequirement already satisfied: click in /usr/local/lib/python3.7/dist-packages (from nltk~=3.5->tapas-table-parsing) (7.1.2)\nRequirement already satisfied: joblib in /usr/local/lib/python3.7/dist-packages (from nltk~=3.5->tapas-table-parsing) (1.1.0)\nCollecting regex>=2021.8.3\n Downloading regex-2022.3.15-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (749 kB)\n\u001b[K |████████████████████████████████| 749 kB 45.4 MB/s \n\u001b[?25hRequirement already satisfied: pyasn1>=0.1.7 in /usr/local/lib/python3.7/dist-packages (from oauth2client<4,>=2.0.1->apache-beam[gcp]==2.20.0->tapas-table-parsing) (0.4.8)\nRequirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=14.3->google-api-core[grpc]<2.0.0dev,>=1.14.0->google-cloud-bigtable<1.1.0,>=0.31.1->apache-beam[gcp]==2.20.0->tapas-table-parsing) (3.0.7)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<2.0.0dev,>=1.14.0->google-cloud-bigtable<1.1.0,>=0.31.1->apache-beam[gcp]==2.20.0->tapas-table-parsing) (2.10)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<2.0.0dev,>=1.14.0->google-cloud-bigtable<1.1.0,>=0.31.1->apache-beam[gcp]==2.20.0->tapas-table-parsing) (3.0.4)\nRequirement already satisfied: scipy>=0.17.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn~=0.22.1->tapas-table-parsing) (1.4.1)\nRequirement already satisfied: astunparse==1.6.3 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.2.0->tapas-table-parsing) (1.6.3)\nRequirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.2.0->tapas-table-parsing) (1.1.0)\nRequirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.2.0->tapas-table-parsing) (1.14.0)\nRequirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.2.0->tapas-table-parsing) (3.3.0)\nRequirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.2.0->tapas-table-parsing) (0.37.1)\nCollecting tensorboard<2.3.0,>=2.2.0\n Downloading tensorboard-2.2.2-py3-none-any.whl (3.0 MB)\n\u001b[K |████████████████████████████████| 3.0 MB 39.4 MB/s \n\u001b[?25hCollecting h5py<2.11.0,>=2.10.0\n Downloading h5py-2.10.0-cp37-cp37m-manylinux1_x86_64.whl (2.9 MB)\n\u001b[K |████████████████████████████████| 2.9 MB 44.0 MB/s \n\u001b[?25hRequirement already satisfied: google-pasta>=0.1.8 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.2.0->tapas-table-parsing) (0.2.0)\nCollecting numpy<2,>=1.14.3\n Downloading numpy-1.18.5-cp37-cp37m-manylinux1_x86_64.whl (20.1 MB)\n\u001b[K |████████████████████████████████| 20.1 MB 82.3 MB/s \n\u001b[?25hCollecting tensorflow-estimator<2.3.0,>=2.2.0\n Downloading tensorflow_estimator-2.2.0-py2.py3-none-any.whl (454 kB)\n\u001b[K |████████████████████████████████| 454 kB 65.6 MB/s \n\u001b[?25hCollecting gast>=0.3.2\n Downloading gast-0.3.3-py2.py3-none-any.whl (9.7 kB)\nRequirement already satisfied: keras-preprocessing>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.2.0->tapas-table-parsing) (1.1.2)\nRequirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.2.0->tapas-table-parsing) (1.0.0)\nRequirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow~=2.2.0->tapas-table-parsing) (0.4.6)\nRequirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow~=2.2.0->tapas-table-parsing) (1.8.1)\nRequirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow~=2.2.0->tapas-table-parsing) (1.0.1)\nRequirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow~=2.2.0->tapas-table-parsing) (3.3.6)\nRequirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.3.0,>=2.2.0->tensorflow~=2.2.0->tapas-table-parsing) (1.3.1)\nRequirement already satisfied: importlib-metadata>=4.4 in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard<2.3.0,>=2.2.0->tensorflow~=2.2.0->tapas-table-parsing) (4.11.3)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard<2.3.0,>=2.2.0->tensorflow~=2.2.0->tapas-table-parsing) (3.7.0)\nRequirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.3.0,>=2.2.0->tensorflow~=2.2.0->tapas-table-parsing) (3.2.0)\nCollecting mlperf-compliance==0.0.10\n Downloading mlperf_compliance-0.0.10-py3-none-any.whl (24 kB)\nRequirement already satisfied: tensorflow-hub>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tf-models-official~=2.2.0->tapas-table-parsing) (0.12.0)\nCollecting sentencepiece\n Downloading sentencepiece-0.1.96-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2 MB)\n\u001b[K |████████████████████████████████| 1.2 MB 37.7 MB/s \n\u001b[?25hCollecting typing==3.7.4.1\n Downloading typing-3.7.4.1-py3-none-any.whl (25 kB)\nRequirement already satisfied: gin-config in /usr/local/lib/python3.7/dist-packages (from tf-models-official~=2.2.0->tapas-table-parsing) (0.5.0)\nRequirement already satisfied: pyyaml in /usr/local/lib/python3.7/dist-packages (from tf-models-official~=2.2.0->tapas-table-parsing) (3.13)\nCollecting opencv-python-headless\n Downloading opencv_python_headless-4.5.5.64-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (47.8 MB)\n\u001b[K |████████████████████████████████| 47.8 MB 49 kB/s \n\u001b[?25hRequirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (from tf-models-official~=2.2.0->tapas-table-parsing) (3.2.2)\nRequirement already satisfied: Cython in /usr/local/lib/python3.7/dist-packages (from tf-models-official~=2.2.0->tapas-table-parsing) (0.29.28)\nRequirement already satisfied: tensorflow-datasets in /usr/local/lib/python3.7/dist-packages (from tf-models-official~=2.2.0->tapas-table-parsing) (4.0.1)\nRequirement already satisfied: psutil>=5.4.3 in /usr/local/lib/python3.7/dist-packages (from tf-models-official~=2.2.0->tapas-table-parsing) (5.4.8)\nCollecting tensorflow-model-optimization>=0.2.1\n Downloading tensorflow_model_optimization-0.7.2-py2.py3-none-any.whl (237 kB)\n\u001b[K |████████████████████████████████| 237 kB 64.3 MB/s \n\u001b[?25hCollecting py-cpuinfo>=3.3.0\n Downloading py-cpuinfo-8.0.0.tar.gz (99 kB)\n\u001b[K |████████████████████████████████| 99 kB 9.3 MB/s \n\u001b[?25hRequirement already satisfied: Pillow in /usr/local/lib/python3.7/dist-packages (from tf-models-official~=2.2.0->tapas-table-parsing) (7.1.2)\nRequirement already satisfied: google-api-python-client>=1.6.7 in /usr/local/lib/python3.7/dist-packages (from tf-models-official~=2.2.0->tapas-table-parsing) (1.12.11)\nCollecting dataclasses\n Downloading dataclasses-0.6-py3-none-any.whl (14 kB)\nCollecting tensorflow-addons\n Downloading tensorflow_addons-0.16.1-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (1.1 MB)\n\u001b[K |████████████████████████████████| 1.1 MB 37.8 MB/s \n\u001b[?25hCollecting google-api-python-client>=1.6.7\n Downloading google_api_python_client-2.42.0-py2.py3-none-any.whl (8.3 MB)\n\u001b[K |████████████████████████████████| 8.3 MB 64.3 MB/s \n\u001b[?25h Downloading google_api_python_client-2.41.0-py2.py3-none-any.whl (8.3 MB)\n\u001b[K |████████████████████████████████| 8.3 MB 21.1 MB/s \n\u001b[?25h Downloading google_api_python_client-2.40.0-py2.py3-none-any.whl (8.2 MB)\n\u001b[K |████████████████████████████████| 8.2 MB 44.2 MB/s \n\u001b[?25h Downloading google_api_python_client-2.39.0-py2.py3-none-any.whl (8.2 MB)\n\u001b[K |████████████████████████████████| 8.2 MB 38.0 MB/s \n\u001b[?25h Downloading google_api_python_client-2.38.0-py2.py3-none-any.whl (8.2 MB)\n\u001b[K |████████████████████████████████| 8.2 MB 41.1 MB/s \n\u001b[?25h Downloading google_api_python_client-2.37.0-py2.py3-none-any.whl (8.1 MB)\n\u001b[K |████████████████████████████████| 8.1 MB 30.0 MB/s \n\u001b[?25h Downloading google_api_python_client-2.36.0-py2.py3-none-any.whl (8.0 MB)\n\u001b[K |████████████████████████████████| 8.0 MB 50.5 MB/s \n\u001b[?25h Downloading google_api_python_client-2.35.0-py2.py3-none-any.whl (8.0 MB)\n\u001b[K |████████████████████████████████| 8.0 MB 35.1 MB/s \n\u001b[?25h Downloading google_api_python_client-2.34.0-py2.py3-none-any.whl (7.9 MB)\n\u001b[K |████████████████████████████████| 7.9 MB 50.6 MB/s \n\u001b[?25h Downloading google_api_python_client-2.33.0-py2.py3-none-any.whl (7.9 MB)\n\u001b[K |████████████████████████████████| 7.9 MB 26.7 MB/s \n\u001b[?25h Downloading google_api_python_client-2.32.0-py2.py3-none-any.whl (7.8 MB)\n\u001b[K |████████████████████████████████| 7.8 MB 50.7 MB/s \n\u001b[?25h Downloading google_api_python_client-2.31.0-py2.py3-none-any.whl (7.8 MB)\n\u001b[K |████████████████████████████████| 7.8 MB 31.8 MB/s \n\u001b[?25h Downloading google_api_python_client-2.30.0-py2.py3-none-any.whl (7.8 MB)\n\u001b[K |████████████████████████████████| 7.8 MB 25.6 MB/s \n\u001b[?25h Downloading google_api_python_client-2.29.0-py2.py3-none-any.whl (7.7 MB)\n\u001b[K |████████████████████████████████| 7.7 MB 45.6 MB/s \n\u001b[?25h Downloading google_api_python_client-2.28.0-py2.py3-none-any.whl (7.7 MB)\n\u001b[K |████████████████████████████████| 7.7 MB 32.2 MB/s \n\u001b[?25h Downloading google_api_python_client-2.27.0-py2.py3-none-any.whl (7.7 MB)\n\u001b[K |████████████████████████████████| 7.7 MB 37.2 MB/s \n\u001b[?25h Downloading google_api_python_client-2.26.1-py2.py3-none-any.whl (7.6 MB)\n\u001b[K |████████████████████████████████| 7.6 MB 33.4 MB/s \n\u001b[?25h Downloading google_api_python_client-2.26.0-py2.py3-none-any.whl (7.6 MB)\n\u001b[K |████████████████████████████████| 7.6 MB 19.4 MB/s \n\u001b[?25h Downloading google_api_python_client-2.25.0-py2.py3-none-any.whl (7.5 MB)\n\u001b[K |████████████████████████████████| 7.5 MB 41.9 MB/s \n\u001b[?25h Downloading google_api_python_client-2.24.0-py2.py3-none-any.whl (7.5 MB)\n\u001b[K |████████████████████████████████| 7.5 MB 9.5 MB/s \n\u001b[?25h Downloading google_api_python_client-2.23.0-py2.py3-none-any.whl (7.5 MB)\n\u001b[K |████████████████████████████████| 7.5 MB 11.0 MB/s \n\u001b[?25h Downloading google_api_python_client-2.22.0-py2.py3-none-any.whl (7.5 MB)\n\u001b[K |████████████████████████████████| 7.5 MB 8.1 MB/s \n\u001b[?25h Downloading google_api_python_client-2.21.0-py2.py3-none-any.whl (7.5 MB)\n\u001b[K |████████████████████████████████| 7.5 MB 37.5 MB/s \n\u001b[?25h Downloading google_api_python_client-2.20.0-py2.py3-none-any.whl (7.4 MB)\n\u001b[K |████████████████████████████████| 7.4 MB 29.5 MB/s \n\u001b[?25h Downloading google_api_python_client-2.19.1-py2.py3-none-any.whl (7.4 MB)\n\u001b[K |████████████████████████████████| 7.4 MB 6.7 MB/s \n\u001b[?25h Downloading google_api_python_client-2.19.0-py2.py3-none-any.whl (7.4 MB)\n\u001b[K |████████████████████████████████| 7.4 MB 31.1 MB/s \n\u001b[?25h Downloading google_api_python_client-2.18.0-py2.py3-none-any.whl (7.4 MB)\n\u001b[K |████████████████████████████████| 7.4 MB 30.6 MB/s \n\u001b[?25h Downloading google_api_python_client-2.17.0-py2.py3-none-any.whl (7.3 MB)\n\u001b[K |████████████████████████████████| 7.3 MB 21.9 MB/s \n\u001b[?25h Downloading google_api_python_client-2.16.0-py2.py3-none-any.whl (7.3 MB)\n\u001b[K |████████████████████████████████| 7.3 MB 27.5 MB/s \n\u001b[?25h Downloading google_api_python_client-2.15.0-py2.py3-none-any.whl (7.2 MB)\n\u001b[K |████████████████████████████████| 7.2 MB 19.3 MB/s \n\u001b[?25h Downloading google_api_python_client-2.14.1-py2.py3-none-any.whl (7.1 MB)\n\u001b[K |████████████████████████████████| 7.1 MB 25.4 MB/s \n\u001b[?25h Downloading google_api_python_client-2.14.0-py2.py3-none-any.whl (7.1 MB)\n\u001b[K |████████████████████████████████| 7.1 MB 22.6 MB/s \n\u001b[?25h Downloading google_api_python_client-2.13.0-py2.py3-none-any.whl (7.1 MB)\n\u001b[K |████████████████████████████████| 7.1 MB 16.9 MB/s \n\u001b[?25h Downloading google_api_python_client-2.12.0-py2.py3-none-any.whl (7.1 MB)\n\u001b[K |████████████████████████████████| 7.1 MB 22.3 MB/s \n\u001b[?25h Downloading google_api_python_client-2.11.0-py2.py3-none-any.whl (7.0 MB)\n\u001b[K |████████████████████████████████| 7.0 MB 5.1 MB/s \n\u001b[?25h Downloading google_api_python_client-2.10.0-py2.py3-none-any.whl (7.0 MB)\n\u001b[K |████████████████████████████████| 7.0 MB 26.8 MB/s \n\u001b[?25h Downloading google_api_python_client-2.9.0-py2.py3-none-any.whl (7.0 MB)\n\u001b[K |████████████████████████████████| 7.0 MB 19.7 MB/s \n\u001b[?25h Downloading google_api_python_client-2.8.0-py2.py3-none-any.whl (7.0 MB)\n\u001b[K |████████████████████████████████| 7.0 MB 24.8 MB/s \n\u001b[?25h Downloading google_api_python_client-2.7.0-py2.py3-none-any.whl (7.3 MB)\n\u001b[K |████████████████████████████████| 7.3 MB 19.0 MB/s \n\u001b[?25h Downloading google_api_python_client-2.6.0-py2.py3-none-any.whl (7.2 MB)\n\u001b[K |████████████████████████████████| 7.2 MB 34.9 MB/s \n\u001b[?25h Downloading google_api_python_client-2.5.0-py2.py3-none-any.whl (7.1 MB)\n\u001b[K |████████████████████████████████| 7.1 MB 17.7 MB/s \n\u001b[?25h Downloading google_api_python_client-2.4.0-py2.py3-none-any.whl (7.1 MB)\n\u001b[K |████████████████████████████████| 7.1 MB 31.2 MB/s \n\u001b[?25h Downloading google_api_python_client-2.3.0-py2.py3-none-any.whl (7.1 MB)\n\u001b[K |████████████████████████████████| 7.1 MB 37.4 MB/s \n\u001b[?25h Downloading google_api_python_client-2.2.0-py2.py3-none-any.whl (7.0 MB)\n\u001b[K |████████████████████████████████| 7.0 MB 6.2 MB/s \n\u001b[?25h Downloading google_api_python_client-2.1.0-py2.py3-none-any.whl (6.6 MB)\n\u001b[K |████████████████████████████████| 6.6 MB 14.4 MB/s \n\u001b[?25h Downloading google_api_python_client-2.0.2-py2.py3-none-any.whl (6.5 MB)\n\u001b[K |████████████████████████████████| 6.5 MB 37.7 MB/s \n\u001b[?25h Downloading google_api_python_client-1.12.10-py2.py3-none-any.whl (61 kB)\n\u001b[K |████████████████████████████████| 61 kB 119 kB/s \n\u001b[?25h Downloading google_api_python_client-1.12.8-py2.py3-none-any.whl (61 kB)\n\u001b[K |████████████████████████████████| 61 kB 785 bytes/s \n\u001b[?25h Downloading google_api_python_client-1.12.7-py2.py3-none-any.whl (61 kB)\n\u001b[K |████████████████████████████████| 61 kB 29 kB/s \n\u001b[?25h Downloading google_api_python_client-1.12.6-py2.py3-none-any.whl (61 kB)\n\u001b[K |████████████████████████████████| 61 kB 27 kB/s \n\u001b[?25h Downloading google_api_python_client-1.12.5-py2.py3-none-any.whl (61 kB)\n\u001b[K |████████████████████████████████| 61 kB 7.5 MB/s \n\u001b[?25h Downloading google_api_python_client-1.12.4-py2.py3-none-any.whl (61 kB)\n\u001b[K |████████████████████████████████| 61 kB 7.1 MB/s \n\u001b[?25h Downloading google_api_python_client-1.12.3-py2.py3-none-any.whl (61 kB)\n\u001b[K |████████████████████████████████| 61 kB 5.8 MB/s \n\u001b[?25h Downloading google_api_python_client-1.12.2-py2.py3-none-any.whl (61 kB)\n\u001b[K |████████████████████████████████| 61 kB 8.4 MB/s \n\u001b[?25hRequirement already satisfied: google-auth-httplib2>=0.0.3 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official~=2.2.0->tapas-table-parsing) (0.0.4)\nRequirement already satisfied: uritemplate<4dev,>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official~=2.2.0->tapas-table-parsing) (3.0.1)\nRequirement already satisfied: dm-tree~=0.1.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow-model-optimization>=0.2.1->tf-models-official~=2.2.0->tapas-table-parsing) (0.1.6)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->tf-models-official~=2.2.0->tapas-table-parsing) (1.4.0)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib->tf-models-official~=2.2.0->tapas-table-parsing) (0.11.0)\nRequirement already satisfied: text-unidecode>=1.3 in /usr/local/lib/python3.7/dist-packages (from python-slugify->kaggle<1.5.8->tapas-table-parsing) (1.3)\nRequirement already satisfied: typeguard>=2.7 in /usr/local/lib/python3.7/dist-packages (from tensorflow-addons->tf-models-official~=2.2.0->tapas-table-parsing) (2.7.1)\nRequirement already satisfied: importlib-resources in /usr/local/lib/python3.7/dist-packages (from tensorflow-datasets->tf-models-official~=2.2.0->tapas-table-parsing) (5.4.0)\nRequirement already satisfied: tensorflow-metadata in /usr/local/lib/python3.7/dist-packages (from tensorflow-datasets->tf-models-official~=2.2.0->tapas-table-parsing) (1.7.0)\nRequirement already satisfied: promise in /usr/local/lib/python3.7/dist-packages (from tensorflow-datasets->tf-models-official~=2.2.0->tapas-table-parsing) (2.3)\nRequirement already satisfied: attrs>=18.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow-datasets->tf-models-official~=2.2.0->tapas-table-parsing) (21.4.0)\nBuilding wheels for collected packages: frozendict, avro-python3, dill, google-apitools, grpc-google-iam-v1, httplib2, kaggle, oauth2client, py-cpuinfo\n Building wheel for frozendict (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for frozendict: filename=frozendict-1.2-py3-none-any.whl size=3166 sha256=bcbf7ecdf36cf16604986862f798d3cfd039a27a02d608e86236c97dac08c3ae\n Stored in directory: /root/.cache/pip/wheels/68/17/69/ac196dd181e620bba5fae5488e4fd6366a7316dce13cf88776\n Building wheel for avro-python3 (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for avro-python3: filename=avro_python3-1.9.2.1-py3-none-any.whl size=43513 sha256=8d5079abbdcb60a53a8929f07491be91b469e9ca1e0b266eb0f868e065147dae\n Stored in directory: /root/.cache/pip/wheels/bc/49/5f/fdb5b9d85055c478213e0158ac122b596816149a02d82e0ab1\n Building wheel for dill (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for dill: filename=dill-0.3.1.1-py3-none-any.whl size=78544 sha256=5847f08d96cd5f1809473da81ce0e7cea3badef87bbcc5e8f88f7136bb233a9c\n Stored in directory: /root/.cache/pip/wheels/a4/61/fd/c57e374e580aa78a45ed78d5859b3a44436af17e22ca53284f\n Building wheel for google-apitools (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for google-apitools: filename=google_apitools-0.5.28-py3-none-any.whl size=130109 sha256=3a5edaac514084485549d713c595af9a20d638189d5ab3dfa0107675ce6c2937\n Stored in directory: /root/.cache/pip/wheels/34/3b/69/ecd8e6ae89d9d71102a58962c29faa7a9467ba45f99f205920\n Building wheel for grpc-google-iam-v1 (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for grpc-google-iam-v1: filename=grpc_google_iam_v1-0.12.3-py3-none-any.whl size=18515 sha256=c6a155cf0d184085c4d718e1e3c19356ba32b9f7c29d3884f6de71f8d14a6387\n Stored in directory: /root/.cache/pip/wheels/b9/ee/67/2e444183030cb8d31ce8b34cee34a7afdbd3ba5959ea846380\n Building wheel for httplib2 (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for httplib2: filename=httplib2-0.12.0-py3-none-any.whl size=93465 sha256=e1e718e4ceca2290ca872bd2434defee84953402a8baad2e2b183a115bb6b901\n Stored in directory: /root/.cache/pip/wheels/0d/e7/b6/0dd30343ceca921cfbd91f355041bd9c69e0f40b49f25b7b8a\n Building wheel for kaggle (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for kaggle: filename=kaggle-1.5.6-py3-none-any.whl size=72858 sha256=f911a59bdadc590e7f089c41c23f24c49e3d2d586bbefe73925d026b7989d7fc\n Stored in directory: /root/.cache/pip/wheels/aa/e7/e7/eb3c3d514c33294d77ddd5a856bdd58dc9c1fabbed59a02a2b\n Building wheel for oauth2client (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for oauth2client: filename=oauth2client-3.0.0-py3-none-any.whl size=106375 sha256=a0b226e54e128315e6205fc9380270bca443fc3e1bac6e135e51a4cd24bb3622\n Stored in directory: /root/.cache/pip/wheels/86/73/7a/3b3f76a2142176605ff38fbca574327962c71e25a43197a4c1\n Building wheel for py-cpuinfo (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for py-cpuinfo: filename=py_cpuinfo-8.0.0-py3-none-any.whl size=22257 sha256=113156ecdb59f6181b20ec42ed7d3373f3b3be32ec31144ee5cb7efd4caca5f3\n Stored in directory: /root/.cache/pip/wheels/d2/f1/1f/041add21dc9c4220157f1bd2bd6afe1f1a49524c3396b94401\nSuccessfully built frozendict avro-python3 dill google-apitools grpc-google-iam-v1 httplib2 kaggle oauth2client py-cpuinfo\nInstalling collected packages: typing-extensions, cachetools, pbr, numpy, httplib2, grpcio-gcp, tensorflow-estimator, tensorboard, pymongo, pyarrow, oauth2client, mock, hdfs, h5py, grpc-google-iam-v1, gast, fasteners, fastavro, dill, avro-python3, typing, tensorflow-model-optimization, tensorflow-addons, tensorflow, sentencepiece, regex, py-cpuinfo, pandas, opencv-python-headless, mlperf-compliance, kaggle, google-cloud-vision, google-cloud-videointelligence, google-cloud-spanner, google-cloud-pubsub, google-cloud-language, google-cloud-dlp, google-cloud-datastore, google-cloud-bigtable, google-apitools, google-api-python-client, dataclasses, apache-beam, tf-slim, tf-models-official, tensorflow-probability, scikit-learn, nltk, frozendict, tapas-table-parsing\n Attempting uninstall: typing-extensions\n Found existing installation: typing-extensions 3.10.0.2\n Uninstalling typing-extensions-3.10.0.2:\n Successfully uninstalled typing-extensions-3.10.0.2\n Attempting uninstall: cachetools\n Found existing installation: cachetools 4.2.4\n Uninstalling cachetools-4.2.4:\n Successfully uninstalled cachetools-4.2.4\n Attempting uninstall: numpy\n Found existing installation: numpy 1.21.5\n Uninstalling numpy-1.21.5:\n Successfully uninstalled numpy-1.21.5\n Attempting uninstall: httplib2\n Found existing installation: httplib2 0.17.4\n Uninstalling httplib2-0.17.4:\n Successfully uninstalled httplib2-0.17.4\n Attempting uninstall: tensorflow-estimator\n Found existing installation: tensorflow-estimator 2.8.0\n Uninstalling tensorflow-estimator-2.8.0:\n Successfully uninstalled tensorflow-estimator-2.8.0\n Attempting uninstall: tensorboard\n Found existing installation: tensorboard 2.8.0\n Uninstalling tensorboard-2.8.0:\n Successfully uninstalled tensorboard-2.8.0\n Attempting uninstall: pymongo\n Found existing installation: pymongo 4.0.2\n Uninstalling pymongo-4.0.2:\n Successfully uninstalled pymongo-4.0.2\n Attempting uninstall: pyarrow\n Found existing installation: pyarrow 6.0.1\n Uninstalling pyarrow-6.0.1:\n Successfully uninstalled pyarrow-6.0.1\n Attempting uninstall: oauth2client\n Found existing installation: oauth2client 4.1.3\n Uninstalling oauth2client-4.1.3:\n Successfully uninstalled oauth2client-4.1.3\n Attempting uninstall: h5py\n Found existing installation: h5py 3.1.0\n Uninstalling h5py-3.1.0:\n Successfully uninstalled h5py-3.1.0\n Attempting uninstall: gast\n Found existing installation: gast 0.5.3\n Uninstalling gast-0.5.3:\n Successfully uninstalled gast-0.5.3\n Attempting uninstall: dill\n Found existing installation: dill 0.3.4\n Uninstalling dill-0.3.4:\n Successfully uninstalled dill-0.3.4\n Attempting uninstall: tensorflow\n Found existing installation: tensorflow 2.8.0\n Uninstalling tensorflow-2.8.0:\n Successfully uninstalled tensorflow-2.8.0\n Attempting uninstall: regex\n Found existing installation: regex 2019.12.20\n Uninstalling regex-2019.12.20:\n Successfully uninstalled regex-2019.12.20\n Attempting uninstall: pandas\n Found existing installation: pandas 1.3.5\n Uninstalling pandas-1.3.5:\n Successfully uninstalled pandas-1.3.5\n Attempting uninstall: kaggle\n Found existing installation: kaggle 1.5.12\n Uninstalling kaggle-1.5.12:\n Successfully uninstalled kaggle-1.5.12\n Attempting uninstall: google-cloud-language\n Found existing installation: google-cloud-language 1.2.0\n Uninstalling google-cloud-language-1.2.0:\n Successfully uninstalled google-cloud-language-1.2.0\n Attempting uninstall: google-cloud-datastore\n Found existing installation: google-cloud-datastore 1.8.0\n Uninstalling google-cloud-datastore-1.8.0:\n Successfully uninstalled google-cloud-datastore-1.8.0\n Attempting uninstall: google-api-python-client\n Found existing installation: google-api-python-client 1.12.11\n Uninstalling google-api-python-client-1.12.11:\n Successfully uninstalled google-api-python-client-1.12.11\n Attempting uninstall: tensorflow-probability\n Found existing installation: tensorflow-probability 0.16.0\n Uninstalling tensorflow-probability-0.16.0:\n Successfully uninstalled tensorflow-probability-0.16.0\n Attempting uninstall: scikit-learn\n Found existing installation: scikit-learn 1.0.2\n Uninstalling scikit-learn-1.0.2:\n Successfully uninstalled scikit-learn-1.0.2\n Attempting uninstall: nltk\n Found existing installation: nltk 3.2.5\n Uninstalling nltk-3.2.5:\n Successfully uninstalled nltk-3.2.5\n\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\nyellowbrick 1.4 requires scikit-learn>=1.0.0, but you have scikit-learn 0.22.2.post1 which is incompatible.\ntables 3.7.0 requires numpy>=1.19.0, but you have numpy 1.18.5 which is incompatible.\npymc3 3.11.4 requires cachetools>=4.2.1, but you have cachetools 3.1.1 which is incompatible.\npydrive 1.3.1 requires oauth2client>=4.0.0, but you have oauth2client 3.0.0 which is incompatible.\nmultiprocess 0.70.12.2 requires dill>=0.3.4, but you have dill 0.3.1.1 which is incompatible.\njaxlib 0.3.2+cuda11.cudnn805 requires numpy>=1.19, but you have numpy 1.18.5 which is incompatible.\njax 0.3.4 requires numpy>=1.19, but you have numpy 1.18.5 which is incompatible.\nimbalanced-learn 0.8.1 requires scikit-learn>=0.24, but you have scikit-learn 0.22.2.post1 which is incompatible.\ngoogle-colab 1.0.0 requires pandas>=1.1.0; python_version >= \"3.0\", but you have pandas 1.0.5 which is incompatible.\ndatascience 0.10.6 requires folium==0.2.1, but you have folium 0.8.3 which is incompatible.\nalbumentations 0.1.12 requires imgaug<0.2.7,>=0.2.5, but you have imgaug 0.2.9 which is incompatible.\u001b[0m\nSuccessfully installed apache-beam-2.20.0 avro-python3-1.9.2.1 cachetools-3.1.1 dataclasses-0.6 dill-0.3.1.1 fastavro-0.21.24 fasteners-0.17.3 frozendict-1.2 gast-0.3.3 google-api-python-client-1.12.2 google-apitools-0.5.28 google-cloud-bigtable-1.0.0 google-cloud-datastore-1.7.4 google-cloud-dlp-0.13.0 google-cloud-language-1.3.0 google-cloud-pubsub-1.0.2 google-cloud-spanner-1.13.0 google-cloud-videointelligence-1.13.0 google-cloud-vision-0.42.0 grpc-google-iam-v1-0.12.3 grpcio-gcp-0.2.2 h5py-2.10.0 hdfs-2.7.0 httplib2-0.12.0 kaggle-1.5.6 mlperf-compliance-0.0.10 mock-2.0.0 nltk-3.7 numpy-1.18.5 oauth2client-3.0.0 opencv-python-headless-4.5.5.64 pandas-1.0.5 pbr-5.8.1 py-cpuinfo-8.0.0 pyarrow-0.16.0 pymongo-3.12.3 regex-2022.3.15 scikit-learn-0.22.2.post1 sentencepiece-0.1.96 tapas-table-parsing-0.0.1.dev0 tensorboard-2.2.2 tensorflow-2.2.3 tensorflow-addons-0.16.1 tensorflow-estimator-2.2.0 tensorflow-model-optimization-0.7.2 tensorflow-probability-0.10.1 tf-models-official-2.2.2 tf-slim-1.1.0 typing-3.7.4.1 typing-extensions-3.7.4.3\n"
]
],
[
[
"# Fetch models fom Google Storage",
"_____no_output_____"
],
[
"Next we can get pretrained checkpoint from Google Storage. For the sake of speed, this is base sized model trained on [SQA](https://www.microsoft.com/en-us/download/details.aspx?id=54253). Note that best results in the paper were obtained with a large model, with 24 layers instead of 12.",
"_____no_output_____"
]
],
[
[
"! gsutil cp gs://tapas_models/2020_04_21/tapas_sqa_base.zip . && unzip tapas_sqa_base.zip",
"Copying gs://tapas_models/2020_04_21/tapas_sqa_base.zip...\n| [1 files][ 1.0 GiB/ 1.0 GiB] 51.4 MiB/s \nOperation completed over 1 objects/1.0 GiB. \nArchive: tapas_sqa_base.zip\nreplace tapas_sqa_base/model.ckpt.data-00000-of-00001? [y]es, [n]o, [A]ll, [N]one, [r]ename: y\n inflating: tapas_sqa_base/model.ckpt.data-00000-of-00001 y\ny\n\n inflating: tapas_sqa_base/model.ckpt.index \n inflating: tapas_sqa_base/README.txt \n inflating: tapas_sqa_base/vocab.txt \n inflating: tapas_sqa_base/bert_config.json \n inflating: tapas_sqa_base/model.ckpt.meta \n"
]
],
[
[
"# Imports",
"_____no_output_____"
]
],
[
[
"import tensorflow.compat.v1 as tf\nimport os \nimport shutil\nimport csv\nimport pandas as pd\nimport IPython\n\ntf.get_logger().setLevel('ERROR')",
"_____no_output_____"
],
[
"from tapas.utils import tf_example_utils\nfrom tapas.protos import interaction_pb2\nfrom tapas.utils import number_annotation_utils\nfrom tapas.scripts import prediction_utils",
"_____no_output_____"
]
],
[
[
"# Load checkpoint for prediction",
"_____no_output_____"
],
[
"Here's the prediction code, which will create and `interaction_pb2.Interaction` protobuf object, which is the datastructure we use to store examples, and then call the prediction script.",
"_____no_output_____"
]
],
[
[
"os.makedirs('results/sqa/tf_examples', exist_ok=True)\nos.makedirs('results/sqa/model', exist_ok=True)\nwith open('results/sqa/model/checkpoint', 'w') as f:\n f.write('model_checkpoint_path: \"model.ckpt-0\"')\nfor suffix in ['.data-00000-of-00001', '.index', '.meta']:\n shutil.copyfile(f'tapas_sqa_base/model.ckpt{suffix}', f'results/sqa/model/model.ckpt-0{suffix}')",
"_____no_output_____"
],
[
"max_seq_length = 512\nvocab_file = \"tapas_sqa_base/vocab.txt\"\nconfig = tf_example_utils.ClassifierConversionConfig(\n vocab_file=vocab_file,\n max_seq_length=max_seq_length,\n max_column_id=max_seq_length,\n max_row_id=max_seq_length,\n strip_column_names=False,\n add_aggregation_candidates=False,\n)\nconverter = tf_example_utils.ToClassifierTensorflowExample(config)\n\ndef convert_interactions_to_examples(tables_and_queries):\n \"\"\"Calls Tapas converter to convert interaction to example.\"\"\"\n for idx, (table, queries) in enumerate(tables_and_queries):\n interaction = interaction_pb2.Interaction()\n for position, query in enumerate(queries):\n question = interaction.questions.add()\n question.original_text = query\n question.id = f\"{idx}-0_{position}\"\n for header in table[0]:\n interaction.table.columns.add().text = header\n for line in table[1:]:\n row = interaction.table.rows.add()\n for cell in line:\n row.cells.add().text = cell\n number_annotation_utils.add_numeric_values(interaction)\n for i in range(len(interaction.questions)):\n try:\n yield converter.convert(interaction, i)\n except ValueError as e:\n print(f\"Can't convert interaction: {interaction.id} error: {e}\")\n \ndef write_tf_example(filename, examples):\n with tf.io.TFRecordWriter(filename) as writer:\n for example in examples:\n writer.write(example.SerializeToString())\n\ndef predict(table_data, queries):\n table = [list(map(lambda s: s.strip(), row.split(\"|\"))) \n for row in table_data.split(\"\\n\") if row.strip()]\n examples = convert_interactions_to_examples([(table, queries)])\n write_tf_example(\"results/sqa/tf_examples/test.tfrecord\", examples)\n write_tf_example(\"results/sqa/tf_examples/random-split-1-dev.tfrecord\", [])\n \n ! python -m tapas.run_task_main \\\n --task=\"SQA\" \\\n --output_dir=\"results\" \\\n --noloop_predict \\\n --test_batch_size={len(queries)} \\\n --tapas_verbosity=\"ERROR\" \\\n --compression_type= \\\n --init_checkpoint=\"tapas_sqa_base/model.ckpt\" \\\n --bert_config_file=\"tapas_sqa_base/bert_config.json\" \\\n --mode=\"predict\" 2> error\n\n\n results_path = \"results/sqa/model/test_sequence.tsv\"\n all_coordinates = []\n df = pd.DataFrame(table[1:], columns=table[0])\n display(IPython.display.HTML(df.to_html(index=False)))\n print()\n with open(results_path) as csvfile:\n reader = csv.DictReader(csvfile, delimiter='\\t')\n for row in reader:\n coordinates = prediction_utils.parse_coordinates(row[\"answer_coordinates\"])\n all_coordinates.append(coordinates)\n answers = ', '.join([table[row + 1][col] for row, col in coordinates])\n position = int(row['position'])\n print(\">\", queries[position])\n print(answers)\n return all_coordinates",
"_____no_output_____"
]
],
[
[
"# Predict",
"_____no_output_____"
]
],
[
[
"# Example nu-1000-0\nresult = predict(\"\"\"\nDoctor_ID|Doctor_Name|Department|opd_day|Morning_time|Evening_time\n1|ABCD|Nephrology|Monday|9|5\n2|ABC|Opthomology|Tuesday|9|6\n3|DEF|Nephrology|Wednesday|9|6\n4|GHI|Gynaecology|Thursday|9|6\n5|JKL|Orthopeadics|Friday|9|6\n6|MNO|Cardiology|Saturday|9|6\n7|PQR|Dentistry|Sunday|9|5\n8|STU|Epidemology|Monday|9|6\n9|WVX|ENT|Tuesday|9|5\n10|GILOY|Genetics|Wednesday|9|6\n11|Rajeev|Neurology|Wednesday|10|4:30\n12|Makan|Immunology|Tuesday|9|4:30\n13|Arora|Paediatrics|Sunday|11|4:30\n14|Piyush|Radiology|Monday|11:20|2\n15|Roha|Gynaecology|Wednesday|9:20|2\n16|Bohra|Dentistry|Thursday|11|2\n17|Rajeev Khan|Virology|Tuesday|10|2\n18|Arnab|Pharmocology|Sunday|10|2\n19|Muskan|ENT|Friday|10|2\n20|pamela|Epidemology|Monday|10|2\n21|Rohit|Radiology|Tuesday|10|2\n22|Aniket|Cardiology|Saturday|10|2\n23|Darbar|Genetics|Saturday|10|2\n24|Suyash|Neurology|Friday|10|2\n25|Abhishek|Immunology|Wednesday|10|2\n26|Yogesh|Immunology|Saturday|10|2\n27|Kunal|Paediatrics|Monday|10|2\n28|Vimal|Pharmocology|Friday|10|2\n29|Kalyan|Virology|Tuesday|10|2\n30|DSS|Nephrology|Thursday|10|2\n\n\"\"\", [\"How many doctors are there in Immunology department?\", \"of these, which doctor is available on Saturday?\"])",
"is_built_with_cuda: True\nis_gpu_available: False\nGPUs: []\nTraining or predicting ...\nEvaluation finished after training step 0.\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
d04cb1896164f74b7504b352aa1e004c7724f698 | 90,005 | ipynb | Jupyter Notebook | examples/tutorials/06_Going_Deeper_on_Molecular_Featurizations.ipynb | patrickphatnguyen/deepchem | f310f0a8d9eeb804f5e04974edff10ba62efab63 | [
"MIT"
] | null | null | null | examples/tutorials/06_Going_Deeper_on_Molecular_Featurizations.ipynb | patrickphatnguyen/deepchem | f310f0a8d9eeb804f5e04974edff10ba62efab63 | [
"MIT"
] | null | null | null | examples/tutorials/06_Going_Deeper_on_Molecular_Featurizations.ipynb | patrickphatnguyen/deepchem | f310f0a8d9eeb804f5e04974edff10ba62efab63 | [
"MIT"
] | null | null | null | 58.030303 | 2,868 | 0.469096 | [
[
[
"# Tutorial Part 6: Going Deeper On Molecular Featurizations\n\nOne of the most important steps of doing machine learning on molecular data is transforming this data into a form amenable to the application of learning algorithms. This process is broadly called \"featurization\" and involves tutrning a molecule into a vector or tensor of some sort. There are a number of different ways of doing such transformations, and the choice of featurization is often dependent on the problem at hand.\n\nIn this tutorial, we explore the different featurization methods available for molecules. These featurization methods include:\n\n1. `ConvMolFeaturizer`, \n2. `WeaveFeaturizer`, \n3. `CircularFingerprints`\n4. `RDKitDescriptors`\n5. `BPSymmetryFunction`\n6. `CoulombMatrix`\n7. `CoulombMatrixEig`\n8. `AdjacencyFingerprints`\n\n## Colab\n\nThis tutorial and the rest in this sequence are designed to be done in Google colab. If you'd like to open this notebook in colab, you can use the following link.\n\n[](https://colab.research.google.com/github/deepchem/deepchem/blob/master/examples/tutorials/06_Going_Deeper_on_Molecular_Featurizations.ipynb)\n\n## Setup\n\nTo run DeepChem within Colab, you'll need to run the following cell of installation commands. This will take about 5 minutes to run to completion and install your environment.",
"_____no_output_____"
]
],
[
[
"!wget -c https://repo.anaconda.com/archive/Anaconda3-2019.10-Linux-x86_64.sh\n!chmod +x Anaconda3-2019.10-Linux-x86_64.sh\n!bash ./Anaconda3-2019.10-Linux-x86_64.sh -b -f -p /usr/local\n!conda install -y -c deepchem -c rdkit -c conda-forge -c omnia deepchem-gpu=2.3.0\nimport sys\nsys.path.append('/usr/local/lib/python3.7/site-packages/')",
"--2020-03-07 01:06:34-- https://repo.anaconda.com/archive/Anaconda3-2019.10-Linux-x86_64.sh\nResolving repo.anaconda.com (repo.anaconda.com)... 104.16.130.3, 104.16.131.3, 2606:4700::6810:8303, ...\nConnecting to repo.anaconda.com (repo.anaconda.com)|104.16.130.3|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 530308481 (506M) [application/x-sh]\nSaving to: ‘Anaconda3-2019.10-Linux-x86_64.sh’\n\nAnaconda3-2019.10-L 100%[===================>] 505.74M 105MB/s in 5.1s \n\n2020-03-07 01:06:39 (99.5 MB/s) - ‘Anaconda3-2019.10-Linux-x86_64.sh’ saved [530308481/530308481]\n\nPREFIX=/usr/local\nUnpacking payload ...\nCollecting package metadata (current_repodata.json): - \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\bdone\nSolving environment: - \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\bdone\n\n## Package Plan ##\n\n environment location: /usr/local\n\n added / updated specs:\n - _ipyw_jlab_nb_ext_conf==0.1.0=py37_0\n - _libgcc_mutex==0.1=main\n - alabaster==0.7.12=py37_0\n - anaconda-client==1.7.2=py37_0\n - anaconda-navigator==1.9.7=py37_0\n - anaconda-project==0.8.3=py_0\n - anaconda==2019.10=py37_0\n - asn1crypto==1.0.1=py37_0\n - astroid==2.3.1=py37_0\n - astropy==3.2.2=py37h7b6447c_0\n - atomicwrites==1.3.0=py37_1\n - attrs==19.2.0=py_0\n - babel==2.7.0=py_0\n - backcall==0.1.0=py37_0\n - backports.functools_lru_cache==1.5=py_2\n - backports.os==0.1.1=py37_0\n - backports.shutil_get_terminal_size==1.0.0=py37_2\n - backports.tempfile==1.0=py_1\n - backports.weakref==1.0.post1=py_1\n - backports==1.0=py_2\n - beautifulsoup4==4.8.0=py37_0\n - bitarray==1.0.1=py37h7b6447c_0\n - bkcharts==0.2=py37_0\n - blas==1.0=mkl\n - bleach==3.1.0=py37_0\n - blosc==1.16.3=hd408876_0\n - bokeh==1.3.4=py37_0\n - boto==2.49.0=py37_0\n - bottleneck==1.2.1=py37h035aef0_1\n - bzip2==1.0.8=h7b6447c_0\n - ca-certificates==2019.8.28=0\n - cairo==1.14.12=h8948797_3\n - certifi==2019.9.11=py37_0\n - cffi==1.12.3=py37h2e261b9_0\n - chardet==3.0.4=py37_1003\n - click==7.0=py37_0\n - cloudpickle==1.2.2=py_0\n - clyent==1.2.2=py37_1\n - colorama==0.4.1=py37_0\n - conda-build==3.18.9=py37_3\n - conda-env==2.6.0=1\n - conda-package-handling==1.6.0=py37h7b6447c_0\n - conda-verify==3.4.2=py_1\n - conda==4.7.12=py37_0\n - contextlib2==0.6.0=py_0\n - cryptography==2.7=py37h1ba5d50_0\n - curl==7.65.3=hbc83047_0\n - cycler==0.10.0=py37_0\n - cython==0.29.13=py37he6710b0_0\n - cytoolz==0.10.0=py37h7b6447c_0\n - dask-core==2.5.2=py_0\n - dask==2.5.2=py_0\n - dbus==1.13.6=h746ee38_0\n - decorator==4.4.0=py37_1\n - defusedxml==0.6.0=py_0\n - distributed==2.5.2=py_0\n - docutils==0.15.2=py37_0\n - entrypoints==0.3=py37_0\n - et_xmlfile==1.0.1=py37_0\n - expat==2.2.6=he6710b0_0\n - fastcache==1.1.0=py37h7b6447c_0\n - filelock==3.0.12=py_0\n - flask==1.1.1=py_0\n - fontconfig==2.13.0=h9420a91_0\n - freetype==2.9.1=h8a8886c_1\n - fribidi==1.0.5=h7b6447c_0\n - fsspec==0.5.2=py_0\n - future==0.17.1=py37_0\n - get_terminal_size==1.0.0=haa9412d_0\n - gevent==1.4.0=py37h7b6447c_0\n - glib==2.56.2=hd408876_0\n - glob2==0.7=py_0\n - gmp==6.1.2=h6c8ec71_1\n - gmpy2==2.0.8=py37h10f8cd9_2\n - graphite2==1.3.13=h23475e2_0\n - greenlet==0.4.15=py37h7b6447c_0\n - gst-plugins-base==1.14.0=hbbd80ab_1\n - gstreamer==1.14.0=hb453b48_1\n - h5py==2.9.0=py37h7918eee_0\n - harfbuzz==1.8.8=hffaf4a1_0\n - hdf5==1.10.4=hb1b8bf9_0\n - heapdict==1.0.1=py_0\n - html5lib==1.0.1=py37_0\n - icu==58.2=h9c2bf20_1\n - idna==2.8=py37_0\n - imageio==2.6.0=py37_0\n - imagesize==1.1.0=py37_0\n - importlib_metadata==0.23=py37_0\n - intel-openmp==2019.4=243\n - ipykernel==5.1.2=py37h39e3cac_0\n - ipython==7.8.0=py37h39e3cac_0\n - ipython_genutils==0.2.0=py37_0\n - ipywidgets==7.5.1=py_0\n - isort==4.3.21=py37_0\n - itsdangerous==1.1.0=py37_0\n - jbig==2.1=hdba287a_0\n - jdcal==1.4.1=py_0\n - jedi==0.15.1=py37_0\n - jeepney==0.4.1=py_0\n - jinja2==2.10.3=py_0\n - joblib==0.13.2=py37_0\n - jpeg==9b=h024ee3a_2\n - json5==0.8.5=py_0\n - jsonschema==3.0.2=py37_0\n - jupyter==1.0.0=py37_7\n - jupyter_client==5.3.3=py37_1\n - jupyter_console==6.0.0=py37_0\n - jupyter_core==4.5.0=py_0\n - jupyterlab==1.1.4=pyhf63ae98_0\n - jupyterlab_server==1.0.6=py_0\n - keyring==18.0.0=py37_0\n - kiwisolver==1.1.0=py37he6710b0_0\n - krb5==1.16.1=h173b8e3_7\n - lazy-object-proxy==1.4.2=py37h7b6447c_0\n - libarchive==3.3.3=h5d8350f_5\n - libcurl==7.65.3=h20c2e04_0\n - libedit==3.1.20181209=hc058e9b_0\n - libffi==3.2.1=hd88cf55_4\n - libgcc-ng==9.1.0=hdf63c60_0\n - libgfortran-ng==7.3.0=hdf63c60_0\n - liblief==0.9.0=h7725739_2\n - libpng==1.6.37=hbc83047_0\n - libsodium==1.0.16=h1bed415_0\n - libssh2==1.8.2=h1ba5d50_0\n - libstdcxx-ng==9.1.0=hdf63c60_0\n - libtiff==4.0.10=h2733197_2\n - libtool==2.4.6=h7b6447c_5\n - libuuid==1.0.3=h1bed415_2\n - libxcb==1.13=h1bed415_1\n - libxml2==2.9.9=hea5a465_1\n - libxslt==1.1.33=h7d1a2b0_0\n - llvmlite==0.29.0=py37hd408876_0\n - locket==0.2.0=py37_1\n - lxml==4.4.1=py37hefd8a0e_0\n - lz4-c==1.8.1.2=h14c3975_0\n - lzo==2.10=h49e0be7_2\n - markupsafe==1.1.1=py37h7b6447c_0\n - matplotlib==3.1.1=py37h5429711_0\n - mccabe==0.6.1=py37_1\n - mistune==0.8.4=py37h7b6447c_0\n - mkl-service==2.3.0=py37he904b0f_0\n - mkl==2019.4=243\n - mkl_fft==1.0.14=py37ha843d7b_0\n - mkl_random==1.1.0=py37hd6b4f25_0\n - mock==3.0.5=py37_0\n - more-itertools==7.2.0=py37_0\n - mpc==1.1.0=h10f8cd9_1\n - mpfr==4.0.1=hdf1c602_3\n - mpmath==1.1.0=py37_0\n - msgpack-python==0.6.1=py37hfd86e86_1\n - multipledispatch==0.6.0=py37_0\n - navigator-updater==0.2.1=py37_0\n - nbconvert==5.6.0=py37_1\n - nbformat==4.4.0=py37_0\n - ncurses==6.1=he6710b0_1\n - networkx==2.3=py_0\n - nltk==3.4.5=py37_0\n - nose==1.3.7=py37_2\n - notebook==6.0.1=py37_0\n - numba==0.45.1=py37h962f231_0\n - numexpr==2.7.0=py37h9e4a6bb_0\n - numpy-base==1.17.2=py37hde5b4d6_0\n - numpy==1.17.2=py37haad9e8e_0\n - numpydoc==0.9.1=py_0\n - olefile==0.46=py37_0\n - openpyxl==3.0.0=py_0\n - openssl==1.1.1d=h7b6447c_2\n - packaging==19.2=py_0\n - pandas==0.25.1=py37he6710b0_0\n - pandoc==2.2.3.2=0\n - pandocfilters==1.4.2=py37_1\n - pango==1.42.4=h049681c_0\n - parso==0.5.1=py_0\n - partd==1.0.0=py_0\n - patchelf==0.9=he6710b0_3\n - path.py==12.0.1=py_0\n - pathlib2==2.3.5=py37_0\n - patsy==0.5.1=py37_0\n - pcre==8.43=he6710b0_0\n - pep8==1.7.1=py37_0\n - pexpect==4.7.0=py37_0\n - pickleshare==0.7.5=py37_0\n - pillow==6.2.0=py37h34e0f95_0\n - pip==19.2.3=py37_0\n - pixman==0.38.0=h7b6447c_0\n - pkginfo==1.5.0.1=py37_0\n - pluggy==0.13.0=py37_0\n - ply==3.11=py37_0\n - prometheus_client==0.7.1=py_0\n - prompt_toolkit==2.0.10=py_0\n - psutil==5.6.3=py37h7b6447c_0\n - ptyprocess==0.6.0=py37_0\n - py-lief==0.9.0=py37h7725739_2\n - py==1.8.0=py37_0\n - pycodestyle==2.5.0=py37_0\n - pycosat==0.6.3=py37h14c3975_0\n - pycparser==2.19=py37_0\n - pycrypto==2.6.1=py37h14c3975_9\n - pycurl==7.43.0.3=py37h1ba5d50_0\n - pyflakes==2.1.1=py37_0\n - pygments==2.4.2=py_0\n - pylint==2.4.2=py37_0\n - pyodbc==4.0.27=py37he6710b0_0\n - pyopenssl==19.0.0=py37_0\n - pyparsing==2.4.2=py_0\n - pyqt==5.9.2=py37h05f1152_2\n - pyrsistent==0.15.4=py37h7b6447c_0\n - pysocks==1.7.1=py37_0\n - pytables==3.5.2=py37h71ec239_1\n - pytest-arraydiff==0.3=py37h39e3cac_0\n - pytest-astropy==0.5.0=py37_0\n - pytest-doctestplus==0.4.0=py_0\n - pytest-openfiles==0.4.0=py_0\n - pytest-remotedata==0.3.2=py37_0\n - pytest==5.2.1=py37_0\n - python-dateutil==2.8.0=py37_0\n - python-libarchive-c==2.8=py37_13\n - python==3.7.4=h265db76_1\n - pytz==2019.3=py_0\n - pywavelets==1.0.3=py37hdd07704_1\n - pyyaml==5.1.2=py37h7b6447c_0\n - pyzmq==18.1.0=py37he6710b0_0\n - qt==5.9.7=h5867ecd_1\n - qtawesome==0.6.0=py_0\n - qtconsole==4.5.5=py_0\n - qtpy==1.9.0=py_0\n - readline==7.0=h7b6447c_5\n - requests==2.22.0=py37_0\n - ripgrep==0.10.0=hc07d326_0\n - rope==0.14.0=py_0\n - ruamel_yaml==0.15.46=py37h14c3975_0\n - scikit-image==0.15.0=py37he6710b0_0\n - scikit-learn==0.21.3=py37hd81dba3_0\n - scipy==1.3.1=py37h7c811a0_0\n - seaborn==0.9.0=py37_0\n - secretstorage==3.1.1=py37_0\n - send2trash==1.5.0=py37_0\n - setuptools==41.4.0=py37_0\n - simplegeneric==0.8.1=py37_2\n - singledispatch==3.4.0.3=py37_0\n - sip==4.19.8=py37hf484d3e_0\n - six==1.12.0=py37_0\n - snappy==1.1.7=hbae5bb6_3\n - snowballstemmer==2.0.0=py_0\n - sortedcollections==1.1.2=py37_0\n - sortedcontainers==2.1.0=py37_0\n - soupsieve==1.9.3=py37_0\n - sphinx==2.2.0=py_0\n - sphinxcontrib-applehelp==1.0.1=py_0\n - sphinxcontrib-devhelp==1.0.1=py_0\n - sphinxcontrib-htmlhelp==1.0.2=py_0\n - sphinxcontrib-jsmath==1.0.1=py_0\n - sphinxcontrib-qthelp==1.0.2=py_0\n - sphinxcontrib-serializinghtml==1.1.3=py_0\n - sphinxcontrib-websupport==1.1.2=py_0\n - sphinxcontrib==1.0=py37_1\n - spyder-kernels==0.5.2=py37_0\n - spyder==3.3.6=py37_0\n - sqlalchemy==1.3.9=py37h7b6447c_0\n - sqlite==3.30.0=h7b6447c_0\n - statsmodels==0.10.1=py37hdd07704_0\n - sympy==1.4=py37_0\n - tbb==2019.4=hfd86e86_0\n - tblib==1.4.0=py_0\n - terminado==0.8.2=py37_0\n - testpath==0.4.2=py37_0\n - tk==8.6.8=hbc83047_0\n - toolz==0.10.0=py_0\n - tornado==6.0.3=py37h7b6447c_0\n - tqdm==4.36.1=py_0\n - traitlets==4.3.3=py37_0\n - unicodecsv==0.14.1=py37_0\n - unixodbc==2.3.7=h14c3975_0\n - urllib3==1.24.2=py37_0\n - wcwidth==0.1.7=py37_0\n - webencodings==0.5.1=py37_1\n - werkzeug==0.16.0=py_0\n - wheel==0.33.6=py37_0\n - widgetsnbextension==3.5.1=py37_0\n - wrapt==1.11.2=py37h7b6447c_0\n - wurlitzer==1.0.3=py37_0\n - xlrd==1.2.0=py37_0\n - xlsxwriter==1.2.1=py_0\n - xlwt==1.3.0=py37_0\n - xz==5.2.4=h14c3975_4\n - yaml==0.1.7=had09818_2\n - zeromq==4.3.1=he6710b0_3\n - zict==1.0.0=py_0\n - zipp==0.6.0=py_0\n - zlib==1.2.11=h7b6447c_3\n - zstd==1.3.7=h0b5b093_0\n\n\nThe following NEW packages will be INSTALLED:\n\n _ipyw_jlab_nb_ext~ pkgs/main/linux-64::_ipyw_jlab_nb_ext_conf-0.1.0-py37_0\n _libgcc_mutex pkgs/main/linux-64::_libgcc_mutex-0.1-main\n alabaster pkgs/main/linux-64::alabaster-0.7.12-py37_0\n anaconda pkgs/main/linux-64::anaconda-2019.10-py37_0\n anaconda-client pkgs/main/linux-64::anaconda-client-1.7.2-py37_0\n anaconda-navigator pkgs/main/linux-64::anaconda-navigator-1.9.7-py37_0\n anaconda-project pkgs/main/noarch::anaconda-project-0.8.3-py_0\n asn1crypto pkgs/main/linux-64::asn1crypto-1.0.1-py37_0\n astroid pkgs/main/linux-64::astroid-2.3.1-py37_0\n astropy pkgs/main/linux-64::astropy-3.2.2-py37h7b6447c_0\n atomicwrites pkgs/main/linux-64::atomicwrites-1.3.0-py37_1\n attrs pkgs/main/noarch::attrs-19.2.0-py_0\n babel pkgs/main/noarch::babel-2.7.0-py_0\n backcall pkgs/main/linux-64::backcall-0.1.0-py37_0\n backports pkgs/main/noarch::backports-1.0-py_2\n backports.functoo~ pkgs/main/noarch::backports.functools_lru_cache-1.5-py_2\n backports.os pkgs/main/linux-64::backports.os-0.1.1-py37_0\n backports.shutil_~ pkgs/main/linux-64::backports.shutil_get_terminal_size-1.0.0-py37_2\n backports.tempfile pkgs/main/noarch::backports.tempfile-1.0-py_1\n backports.weakref pkgs/main/noarch::backports.weakref-1.0.post1-py_1\n beautifulsoup4 pkgs/main/linux-64::beautifulsoup4-4.8.0-py37_0\n bitarray pkgs/main/linux-64::bitarray-1.0.1-py37h7b6447c_0\n bkcharts pkgs/main/linux-64::bkcharts-0.2-py37_0\n blas pkgs/main/linux-64::blas-1.0-mkl\n bleach pkgs/main/linux-64::bleach-3.1.0-py37_0\n blosc pkgs/main/linux-64::blosc-1.16.3-hd408876_0\n bokeh pkgs/main/linux-64::bokeh-1.3.4-py37_0\n boto pkgs/main/linux-64::boto-2.49.0-py37_0\n bottleneck pkgs/main/linux-64::bottleneck-1.2.1-py37h035aef0_1\n bzip2 pkgs/main/linux-64::bzip2-1.0.8-h7b6447c_0\n ca-certificates pkgs/main/linux-64::ca-certificates-2019.8.28-0\n cairo pkgs/main/linux-64::cairo-1.14.12-h8948797_3\n certifi pkgs/main/linux-64::certifi-2019.9.11-py37_0\n cffi pkgs/main/linux-64::cffi-1.12.3-py37h2e261b9_0\n chardet pkgs/main/linux-64::chardet-3.0.4-py37_1003\n click pkgs/main/linux-64::click-7.0-py37_0\n cloudpickle pkgs/main/noarch::cloudpickle-1.2.2-py_0\n clyent pkgs/main/linux-64::clyent-1.2.2-py37_1\n colorama pkgs/main/linux-64::colorama-0.4.1-py37_0\n conda pkgs/main/linux-64::conda-4.7.12-py37_0\n conda-build pkgs/main/linux-64::conda-build-3.18.9-py37_3\n conda-env pkgs/main/linux-64::conda-env-2.6.0-1\n conda-package-han~ pkgs/main/linux-64::conda-package-handling-1.6.0-py37h7b6447c_0\n conda-verify pkgs/main/noarch::conda-verify-3.4.2-py_1\n contextlib2 pkgs/main/noarch::contextlib2-0.6.0-py_0\n cryptography pkgs/main/linux-64::cryptography-2.7-py37h1ba5d50_0\n curl pkgs/main/linux-64::curl-7.65.3-hbc83047_0\n cycler pkgs/main/linux-64::cycler-0.10.0-py37_0\n cython pkgs/main/linux-64::cython-0.29.13-py37he6710b0_0\n cytoolz pkgs/main/linux-64::cytoolz-0.10.0-py37h7b6447c_0\n dask pkgs/main/noarch::dask-2.5.2-py_0\n dask-core pkgs/main/noarch::dask-core-2.5.2-py_0\n dbus pkgs/main/linux-64::dbus-1.13.6-h746ee38_0\n decorator pkgs/main/linux-64::decorator-4.4.0-py37_1\n defusedxml pkgs/main/noarch::defusedxml-0.6.0-py_0\n distributed pkgs/main/noarch::distributed-2.5.2-py_0\n docutils pkgs/main/linux-64::docutils-0.15.2-py37_0\n entrypoints pkgs/main/linux-64::entrypoints-0.3-py37_0\n et_xmlfile pkgs/main/linux-64::et_xmlfile-1.0.1-py37_0\n expat pkgs/main/linux-64::expat-2.2.6-he6710b0_0\n fastcache pkgs/main/linux-64::fastcache-1.1.0-py37h7b6447c_0\n filelock pkgs/main/noarch::filelock-3.0.12-py_0\n flask pkgs/main/noarch::flask-1.1.1-py_0\n fontconfig pkgs/main/linux-64::fontconfig-2.13.0-h9420a91_0\n freetype pkgs/main/linux-64::freetype-2.9.1-h8a8886c_1\n fribidi pkgs/main/linux-64::fribidi-1.0.5-h7b6447c_0\n fsspec pkgs/main/noarch::fsspec-0.5.2-py_0\n future pkgs/main/linux-64::future-0.17.1-py37_0\n get_terminal_size pkgs/main/linux-64::get_terminal_size-1.0.0-haa9412d_0\n gevent pkgs/main/linux-64::gevent-1.4.0-py37h7b6447c_0\n glib pkgs/main/linux-64::glib-2.56.2-hd408876_0\n glob2 pkgs/main/noarch::glob2-0.7-py_0\n gmp pkgs/main/linux-64::gmp-6.1.2-h6c8ec71_1\n gmpy2 pkgs/main/linux-64::gmpy2-2.0.8-py37h10f8cd9_2\n graphite2 pkgs/main/linux-64::graphite2-1.3.13-h23475e2_0\n greenlet pkgs/main/linux-64::greenlet-0.4.15-py37h7b6447c_0\n gst-plugins-base pkgs/main/linux-64::gst-plugins-base-1.14.0-hbbd80ab_1\n gstreamer pkgs/main/linux-64::gstreamer-1.14.0-hb453b48_1\n h5py pkgs/main/linux-64::h5py-2.9.0-py37h7918eee_0\n harfbuzz pkgs/main/linux-64::harfbuzz-1.8.8-hffaf4a1_0\n hdf5 pkgs/main/linux-64::hdf5-1.10.4-hb1b8bf9_0\n heapdict pkgs/main/noarch::heapdict-1.0.1-py_0\n html5lib pkgs/main/linux-64::html5lib-1.0.1-py37_0\n icu pkgs/main/linux-64::icu-58.2-h9c2bf20_1\n idna pkgs/main/linux-64::idna-2.8-py37_0\n imageio pkgs/main/linux-64::imageio-2.6.0-py37_0\n imagesize pkgs/main/linux-64::imagesize-1.1.0-py37_0\n importlib_metadata pkgs/main/linux-64::importlib_metadata-0.23-py37_0\n intel-openmp pkgs/main/linux-64::intel-openmp-2019.4-243\n ipykernel pkgs/main/linux-64::ipykernel-5.1.2-py37h39e3cac_0\n ipython pkgs/main/linux-64::ipython-7.8.0-py37h39e3cac_0\n ipython_genutils pkgs/main/linux-64::ipython_genutils-0.2.0-py37_0\n ipywidgets pkgs/main/noarch::ipywidgets-7.5.1-py_0\n isort pkgs/main/linux-64::isort-4.3.21-py37_0\n itsdangerous pkgs/main/linux-64::itsdangerous-1.1.0-py37_0\n jbig pkgs/main/linux-64::jbig-2.1-hdba287a_0\n jdcal pkgs/main/noarch::jdcal-1.4.1-py_0\n jedi pkgs/main/linux-64::jedi-0.15.1-py37_0\n jeepney pkgs/main/noarch::jeepney-0.4.1-py_0\n jinja2 pkgs/main/noarch::jinja2-2.10.3-py_0\n joblib pkgs/main/linux-64::joblib-0.13.2-py37_0\n jpeg pkgs/main/linux-64::jpeg-9b-h024ee3a_2\n json5 pkgs/main/noarch::json5-0.8.5-py_0\n jsonschema pkgs/main/linux-64::jsonschema-3.0.2-py37_0\n jupyter pkgs/main/linux-64::jupyter-1.0.0-py37_7\n jupyter_client pkgs/main/linux-64::jupyter_client-5.3.3-py37_1\n jupyter_console pkgs/main/linux-64::jupyter_console-6.0.0-py37_0\n jupyter_core pkgs/main/noarch::jupyter_core-4.5.0-py_0\n jupyterlab pkgs/main/noarch::jupyterlab-1.1.4-pyhf63ae98_0\n jupyterlab_server pkgs/main/noarch::jupyterlab_server-1.0.6-py_0\n keyring pkgs/main/linux-64::keyring-18.0.0-py37_0\n kiwisolver pkgs/main/linux-64::kiwisolver-1.1.0-py37he6710b0_0\n krb5 pkgs/main/linux-64::krb5-1.16.1-h173b8e3_7\n lazy-object-proxy pkgs/main/linux-64::lazy-object-proxy-1.4.2-py37h7b6447c_0\n libarchive pkgs/main/linux-64::libarchive-3.3.3-h5d8350f_5\n libcurl pkgs/main/linux-64::libcurl-7.65.3-h20c2e04_0\n libedit pkgs/main/linux-64::libedit-3.1.20181209-hc058e9b_0\n libffi pkgs/main/linux-64::libffi-3.2.1-hd88cf55_4\n libgcc-ng pkgs/main/linux-64::libgcc-ng-9.1.0-hdf63c60_0\n libgfortran-ng pkgs/main/linux-64::libgfortran-ng-7.3.0-hdf63c60_0\n liblief pkgs/main/linux-64::liblief-0.9.0-h7725739_2\n libpng pkgs/main/linux-64::libpng-1.6.37-hbc83047_0\n libsodium pkgs/main/linux-64::libsodium-1.0.16-h1bed415_0\n libssh2 pkgs/main/linux-64::libssh2-1.8.2-h1ba5d50_0\n libstdcxx-ng pkgs/main/linux-64::libstdcxx-ng-9.1.0-hdf63c60_0\n libtiff pkgs/main/linux-64::libtiff-4.0.10-h2733197_2\n libtool pkgs/main/linux-64::libtool-2.4.6-h7b6447c_5\n libuuid pkgs/main/linux-64::libuuid-1.0.3-h1bed415_2\n libxcb pkgs/main/linux-64::libxcb-1.13-h1bed415_1\n libxml2 pkgs/main/linux-64::libxml2-2.9.9-hea5a465_1\n libxslt pkgs/main/linux-64::libxslt-1.1.33-h7d1a2b0_0\n llvmlite pkgs/main/linux-64::llvmlite-0.29.0-py37hd408876_0\n locket pkgs/main/linux-64::locket-0.2.0-py37_1\n lxml pkgs/main/linux-64::lxml-4.4.1-py37hefd8a0e_0\n lz4-c pkgs/main/linux-64::lz4-c-1.8.1.2-h14c3975_0\n lzo pkgs/main/linux-64::lzo-2.10-h49e0be7_2\n markupsafe pkgs/main/linux-64::markupsafe-1.1.1-py37h7b6447c_0\n matplotlib pkgs/main/linux-64::matplotlib-3.1.1-py37h5429711_0\n mccabe pkgs/main/linux-64::mccabe-0.6.1-py37_1\n mistune pkgs/main/linux-64::mistune-0.8.4-py37h7b6447c_0\n mkl pkgs/main/linux-64::mkl-2019.4-243\n mkl-service pkgs/main/linux-64::mkl-service-2.3.0-py37he904b0f_0\n mkl_fft pkgs/main/linux-64::mkl_fft-1.0.14-py37ha843d7b_0\n mkl_random pkgs/main/linux-64::mkl_random-1.1.0-py37hd6b4f25_0\n mock pkgs/main/linux-64::mock-3.0.5-py37_0\n more-itertools pkgs/main/linux-64::more-itertools-7.2.0-py37_0\n mpc pkgs/main/linux-64::mpc-1.1.0-h10f8cd9_1\n mpfr pkgs/main/linux-64::mpfr-4.0.1-hdf1c602_3\n mpmath pkgs/main/linux-64::mpmath-1.1.0-py37_0\n msgpack-python pkgs/main/linux-64::msgpack-python-0.6.1-py37hfd86e86_1\n multipledispatch pkgs/main/linux-64::multipledispatch-0.6.0-py37_0\n navigator-updater pkgs/main/linux-64::navigator-updater-0.2.1-py37_0\n nbconvert pkgs/main/linux-64::nbconvert-5.6.0-py37_1\n nbformat pkgs/main/linux-64::nbformat-4.4.0-py37_0\n ncurses pkgs/main/linux-64::ncurses-6.1-he6710b0_1\n networkx pkgs/main/noarch::networkx-2.3-py_0\n nltk pkgs/main/linux-64::nltk-3.4.5-py37_0\n nose pkgs/main/linux-64::nose-1.3.7-py37_2\n notebook pkgs/main/linux-64::notebook-6.0.1-py37_0\n numba pkgs/main/linux-64::numba-0.45.1-py37h962f231_0\n numexpr pkgs/main/linux-64::numexpr-2.7.0-py37h9e4a6bb_0\n numpy pkgs/main/linux-64::numpy-1.17.2-py37haad9e8e_0\n numpy-base pkgs/main/linux-64::numpy-base-1.17.2-py37hde5b4d6_0\n numpydoc pkgs/main/noarch::numpydoc-0.9.1-py_0\n olefile pkgs/main/linux-64::olefile-0.46-py37_0\n openpyxl pkgs/main/noarch::openpyxl-3.0.0-py_0\n openssl pkgs/main/linux-64::openssl-1.1.1d-h7b6447c_2\n packaging pkgs/main/noarch::packaging-19.2-py_0\n pandas pkgs/main/linux-64::pandas-0.25.1-py37he6710b0_0\n pandoc pkgs/main/linux-64::pandoc-2.2.3.2-0\n pandocfilters pkgs/main/linux-64::pandocfilters-1.4.2-py37_1\n pango pkgs/main/linux-64::pango-1.42.4-h049681c_0\n parso pkgs/main/noarch::parso-0.5.1-py_0\n partd pkgs/main/noarch::partd-1.0.0-py_0\n patchelf pkgs/main/linux-64::patchelf-0.9-he6710b0_3\n path.py pkgs/main/noarch::path.py-12.0.1-py_0\n pathlib2 pkgs/main/linux-64::pathlib2-2.3.5-py37_0\n patsy pkgs/main/linux-64::patsy-0.5.1-py37_0\n pcre pkgs/main/linux-64::pcre-8.43-he6710b0_0\n pep8 pkgs/main/linux-64::pep8-1.7.1-py37_0\n pexpect pkgs/main/linux-64::pexpect-4.7.0-py37_0\n pickleshare pkgs/main/linux-64::pickleshare-0.7.5-py37_0\n pillow pkgs/main/linux-64::pillow-6.2.0-py37h34e0f95_0\n pip pkgs/main/linux-64::pip-19.2.3-py37_0\n pixman pkgs/main/linux-64::pixman-0.38.0-h7b6447c_0\n pkginfo pkgs/main/linux-64::pkginfo-1.5.0.1-py37_0\n pluggy pkgs/main/linux-64::pluggy-0.13.0-py37_0\n ply pkgs/main/linux-64::ply-3.11-py37_0\n prometheus_client pkgs/main/noarch::prometheus_client-0.7.1-py_0\n prompt_toolkit pkgs/main/noarch::prompt_toolkit-2.0.10-py_0\n psutil pkgs/main/linux-64::psutil-5.6.3-py37h7b6447c_0\n ptyprocess pkgs/main/linux-64::ptyprocess-0.6.0-py37_0\n py pkgs/main/linux-64::py-1.8.0-py37_0\n py-lief pkgs/main/linux-64::py-lief-0.9.0-py37h7725739_2\n pycodestyle pkgs/main/linux-64::pycodestyle-2.5.0-py37_0\n pycosat pkgs/main/linux-64::pycosat-0.6.3-py37h14c3975_0\n pycparser pkgs/main/linux-64::pycparser-2.19-py37_0\n pycrypto pkgs/main/linux-64::pycrypto-2.6.1-py37h14c3975_9\n pycurl pkgs/main/linux-64::pycurl-7.43.0.3-py37h1ba5d50_0\n pyflakes pkgs/main/linux-64::pyflakes-2.1.1-py37_0\n pygments pkgs/main/noarch::pygments-2.4.2-py_0\n pylint pkgs/main/linux-64::pylint-2.4.2-py37_0\n pyodbc pkgs/main/linux-64::pyodbc-4.0.27-py37he6710b0_0\n pyopenssl pkgs/main/linux-64::pyopenssl-19.0.0-py37_0\n pyparsing pkgs/main/noarch::pyparsing-2.4.2-py_0\n pyqt pkgs/main/linux-64::pyqt-5.9.2-py37h05f1152_2\n pyrsistent pkgs/main/linux-64::pyrsistent-0.15.4-py37h7b6447c_0\n pysocks pkgs/main/linux-64::pysocks-1.7.1-py37_0\n pytables pkgs/main/linux-64::pytables-3.5.2-py37h71ec239_1\n pytest pkgs/main/linux-64::pytest-5.2.1-py37_0\n pytest-arraydiff pkgs/main/linux-64::pytest-arraydiff-0.3-py37h39e3cac_0\n pytest-astropy pkgs/main/linux-64::pytest-astropy-0.5.0-py37_0\n pytest-doctestplus pkgs/main/noarch::pytest-doctestplus-0.4.0-py_0\n pytest-openfiles pkgs/main/noarch::pytest-openfiles-0.4.0-py_0\n pytest-remotedata pkgs/main/linux-64::pytest-remotedata-0.3.2-py37_0\n python pkgs/main/linux-64::python-3.7.4-h265db76_1\n python-dateutil pkgs/main/linux-64::python-dateutil-2.8.0-py37_0\n python-libarchive~ pkgs/main/linux-64::python-libarchive-c-2.8-py37_13\n pytz pkgs/main/noarch::pytz-2019.3-py_0\n pywavelets pkgs/main/linux-64::pywavelets-1.0.3-py37hdd07704_1\n pyyaml pkgs/main/linux-64::pyyaml-5.1.2-py37h7b6447c_0\n pyzmq pkgs/main/linux-64::pyzmq-18.1.0-py37he6710b0_0\n qt pkgs/main/linux-64::qt-5.9.7-h5867ecd_1\n qtawesome pkgs/main/noarch::qtawesome-0.6.0-py_0\n qtconsole pkgs/main/noarch::qtconsole-4.5.5-py_0\n qtpy pkgs/main/noarch::qtpy-1.9.0-py_0\n readline pkgs/main/linux-64::readline-7.0-h7b6447c_5\n requests pkgs/main/linux-64::requests-2.22.0-py37_0\n ripgrep pkgs/main/linux-64::ripgrep-0.10.0-hc07d326_0\n rope pkgs/main/noarch::rope-0.14.0-py_0\n ruamel_yaml pkgs/main/linux-64::ruamel_yaml-0.15.46-py37h14c3975_0\n scikit-image pkgs/main/linux-64::scikit-image-0.15.0-py37he6710b0_0\n scikit-learn pkgs/main/linux-64::scikit-learn-0.21.3-py37hd81dba3_0\n scipy pkgs/main/linux-64::scipy-1.3.1-py37h7c811a0_0\n seaborn pkgs/main/linux-64::seaborn-0.9.0-py37_0\n secretstorage pkgs/main/linux-64::secretstorage-3.1.1-py37_0\n send2trash pkgs/main/linux-64::send2trash-1.5.0-py37_0\n setuptools pkgs/main/linux-64::setuptools-41.4.0-py37_0\n simplegeneric pkgs/main/linux-64::simplegeneric-0.8.1-py37_2\n singledispatch pkgs/main/linux-64::singledispatch-3.4.0.3-py37_0\n sip pkgs/main/linux-64::sip-4.19.8-py37hf484d3e_0\n six pkgs/main/linux-64::six-1.12.0-py37_0\n snappy pkgs/main/linux-64::snappy-1.1.7-hbae5bb6_3\n snowballstemmer pkgs/main/noarch::snowballstemmer-2.0.0-py_0\n sortedcollections pkgs/main/linux-64::sortedcollections-1.1.2-py37_0\n sortedcontainers pkgs/main/linux-64::sortedcontainers-2.1.0-py37_0\n soupsieve pkgs/main/linux-64::soupsieve-1.9.3-py37_0\n sphinx pkgs/main/noarch::sphinx-2.2.0-py_0\n sphinxcontrib pkgs/main/linux-64::sphinxcontrib-1.0-py37_1\n sphinxcontrib-app~ pkgs/main/noarch::sphinxcontrib-applehelp-1.0.1-py_0\n sphinxcontrib-dev~ pkgs/main/noarch::sphinxcontrib-devhelp-1.0.1-py_0\n sphinxcontrib-htm~ pkgs/main/noarch::sphinxcontrib-htmlhelp-1.0.2-py_0\n sphinxcontrib-jsm~ pkgs/main/noarch::sphinxcontrib-jsmath-1.0.1-py_0\n sphinxcontrib-qth~ pkgs/main/noarch::sphinxcontrib-qthelp-1.0.2-py_0\n sphinxcontrib-ser~ pkgs/main/noarch::sphinxcontrib-serializinghtml-1.1.3-py_0\n sphinxcontrib-web~ pkgs/main/noarch::sphinxcontrib-websupport-1.1.2-py_0\n spyder pkgs/main/linux-64::spyder-3.3.6-py37_0\n spyder-kernels pkgs/main/linux-64::spyder-kernels-0.5.2-py37_0\n sqlalchemy pkgs/main/linux-64::sqlalchemy-1.3.9-py37h7b6447c_0\n sqlite pkgs/main/linux-64::sqlite-3.30.0-h7b6447c_0\n statsmodels pkgs/main/linux-64::statsmodels-0.10.1-py37hdd07704_0\n sympy pkgs/main/linux-64::sympy-1.4-py37_0\n tbb pkgs/main/linux-64::tbb-2019.4-hfd86e86_0\n tblib pkgs/main/noarch::tblib-1.4.0-py_0\n terminado pkgs/main/linux-64::terminado-0.8.2-py37_0\n testpath pkgs/main/linux-64::testpath-0.4.2-py37_0\n tk pkgs/main/linux-64::tk-8.6.8-hbc83047_0\n toolz pkgs/main/noarch::toolz-0.10.0-py_0\n tornado pkgs/main/linux-64::tornado-6.0.3-py37h7b6447c_0\n tqdm pkgs/main/noarch::tqdm-4.36.1-py_0\n traitlets pkgs/main/linux-64::traitlets-4.3.3-py37_0\n unicodecsv pkgs/main/linux-64::unicodecsv-0.14.1-py37_0\n unixodbc pkgs/main/linux-64::unixodbc-2.3.7-h14c3975_0\n urllib3 pkgs/main/linux-64::urllib3-1.24.2-py37_0\n wcwidth pkgs/main/linux-64::wcwidth-0.1.7-py37_0\n webencodings pkgs/main/linux-64::webencodings-0.5.1-py37_1\n werkzeug pkgs/main/noarch::werkzeug-0.16.0-py_0\n wheel pkgs/main/linux-64::wheel-0.33.6-py37_0\n widgetsnbextension pkgs/main/linux-64::widgetsnbextension-3.5.1-py37_0\n wrapt pkgs/main/linux-64::wrapt-1.11.2-py37h7b6447c_0\n wurlitzer pkgs/main/linux-64::wurlitzer-1.0.3-py37_0\n xlrd pkgs/main/linux-64::xlrd-1.2.0-py37_0\n xlsxwriter pkgs/main/noarch::xlsxwriter-1.2.1-py_0\n xlwt pkgs/main/linux-64::xlwt-1.3.0-py37_0\n xz pkgs/main/linux-64::xz-5.2.4-h14c3975_4\n yaml pkgs/main/linux-64::yaml-0.1.7-had09818_2\n zeromq pkgs/main/linux-64::zeromq-4.3.1-he6710b0_3\n zict pkgs/main/noarch::zict-1.0.0-py_0\n zipp pkgs/main/noarch::zipp-0.6.0-py_0\n zlib pkgs/main/linux-64::zlib-1.2.11-h7b6447c_3\n zstd pkgs/main/linux-64::zstd-1.3.7-h0b5b093_0\n\n\nPreparing transaction: - \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\bdone\nExecuting transaction: - \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\bdone\ninstallation finished.\nWARNING:\n You currently have a PYTHONPATH environment variable set. This may cause\n unexpected behavior when running the Python interpreter in Anaconda3.\n For best results, please verify that your PYTHONPATH only points to\n directories of packages that are compatible with the Python interpreter\n in Anaconda3: /usr/local\nCollecting package metadata (current_repodata.json): - \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\bdone\nSolving environment: | \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\bfailed with initial frozen solve. Retrying with flexible solve.\nSolving environment: / \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\bfailed with repodata from current_repodata.json, will retry with next repodata source.\nCollecting package metadata (repodata.json): / \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\bdone\nSolving environment: / \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\bdone\n\n\n==> WARNING: A newer version of conda exists. <==\n current version: 4.7.12\n latest version: 4.8.2\n\nPlease update conda by running\n\n $ conda update -n base -c defaults conda\n\n\n\n## Package Plan ##\n\n environment location: /usr/local\n\n added / updated specs:\n - deepchem-gpu=2.3.0\n\n\nThe following packages will be downloaded:\n\n package | build\n ---------------------------|-----------------\n _py-xgboost-mutex-2.0 | cpu_0 8 KB conda-forge\n _tflow_select-2.1.0 | gpu 2 KB\n absl-py-0.9.0 | py37_0 162 KB conda-forge\n astor-0.7.1 | py_0 22 KB conda-forge\n c-ares-1.15.0 | h516909a_1001 100 KB conda-forge\n certifi-2019.9.11 | py37_0 147 KB conda-forge\n conda-4.8.2 | py37_0 3.0 MB conda-forge\n cudatoolkit-10.1.243 | h6bb024c_0 347.4 MB\n cudnn-7.6.5 | cuda10.1_0 179.9 MB\n cupti-10.1.168 | 0 1.4 MB\n deepchem-gpu-2.3.0 | py37_0 2.1 MB deepchem\n fftw3f-3.3.4 | 2 1.2 MB omnia\n gast-0.3.3 | py_0 12 KB conda-forge\n google-pasta-0.1.8 | py_0 42 KB conda-forge\n grpcio-1.23.0 | py37he9ae1f9_0 1.1 MB conda-forge\n keras-applications-1.0.8 | py_1 30 KB conda-forge\n keras-preprocessing-1.1.0 | py_0 33 KB conda-forge\n libboost-1.67.0 | h46d08c1_4 13.0 MB\n libprotobuf-3.11.4 | h8b12597_0 4.8 MB conda-forge\n libxgboost-0.90 | he1b5a44_4 2.4 MB conda-forge\n markdown-3.2.1 | py_0 61 KB conda-forge\n mdtraj-1.9.3 | py37h00575c5_0 1.9 MB conda-forge\n openmm-7.4.1 |py37_cuda101_rc_1 11.9 MB omnia\n pdbfixer-1.6 | py37_0 190 KB omnia\n protobuf-3.11.4 | py37he1b5a44_0 699 KB conda-forge\n py-boost-1.67.0 | py37h04863e7_4 278 KB\n py-xgboost-0.90 | py37_4 73 KB conda-forge\n rdkit-2019.09.3.0 | py37hc20afe1_1 23.7 MB rdkit\n simdna-0.4.2 | py_0 627 KB deepchem\n tensorboard-1.14.0 | py37_0 3.2 MB conda-forge\n tensorflow-1.14.0 |gpu_py37h74c33d7_0 4 KB\n tensorflow-base-1.14.0 |gpu_py37he45bfe2_0 146.3 MB\n tensorflow-estimator-1.14.0| py37h5ca1d4c_0 645 KB conda-forge\n tensorflow-gpu-1.14.0 | h0d30ee6_0 3 KB\n termcolor-1.1.0 | py_2 6 KB conda-forge\n xgboost-0.90 | py37he1b5a44_4 11 KB conda-forge\n ------------------------------------------------------------\n Total: 746.5 MB\n\nThe following NEW packages will be INSTALLED:\n\n _py-xgboost-mutex conda-forge/linux-64::_py-xgboost-mutex-2.0-cpu_0\n _tflow_select pkgs/main/linux-64::_tflow_select-2.1.0-gpu\n absl-py conda-forge/linux-64::absl-py-0.9.0-py37_0\n astor conda-forge/noarch::astor-0.7.1-py_0\n c-ares conda-forge/linux-64::c-ares-1.15.0-h516909a_1001\n cudatoolkit pkgs/main/linux-64::cudatoolkit-10.1.243-h6bb024c_0\n cudnn pkgs/main/linux-64::cudnn-7.6.5-cuda10.1_0\n cupti pkgs/main/linux-64::cupti-10.1.168-0\n deepchem-gpu deepchem/linux-64::deepchem-gpu-2.3.0-py37_0\n fftw3f omnia/linux-64::fftw3f-3.3.4-2\n gast conda-forge/noarch::gast-0.3.3-py_0\n google-pasta conda-forge/noarch::google-pasta-0.1.8-py_0\n grpcio conda-forge/linux-64::grpcio-1.23.0-py37he9ae1f9_0\n keras-applications conda-forge/noarch::keras-applications-1.0.8-py_1\n keras-preprocessi~ conda-forge/noarch::keras-preprocessing-1.1.0-py_0\n libboost pkgs/main/linux-64::libboost-1.67.0-h46d08c1_4\n libprotobuf conda-forge/linux-64::libprotobuf-3.11.4-h8b12597_0\n libxgboost conda-forge/linux-64::libxgboost-0.90-he1b5a44_4\n markdown conda-forge/noarch::markdown-3.2.1-py_0\n mdtraj conda-forge/linux-64::mdtraj-1.9.3-py37h00575c5_0\n openmm omnia/linux-64::openmm-7.4.1-py37_cuda101_rc_1\n pdbfixer omnia/linux-64::pdbfixer-1.6-py37_0\n protobuf conda-forge/linux-64::protobuf-3.11.4-py37he1b5a44_0\n py-boost pkgs/main/linux-64::py-boost-1.67.0-py37h04863e7_4\n py-xgboost conda-forge/linux-64::py-xgboost-0.90-py37_4\n rdkit rdkit/linux-64::rdkit-2019.09.3.0-py37hc20afe1_1\n simdna deepchem/noarch::simdna-0.4.2-py_0\n tensorboard conda-forge/linux-64::tensorboard-1.14.0-py37_0\n tensorflow pkgs/main/linux-64::tensorflow-1.14.0-gpu_py37h74c33d7_0\n tensorflow-base pkgs/main/linux-64::tensorflow-base-1.14.0-gpu_py37he45bfe2_0\n tensorflow-estima~ conda-forge/linux-64::tensorflow-estimator-1.14.0-py37h5ca1d4c_0\n tensorflow-gpu pkgs/main/linux-64::tensorflow-gpu-1.14.0-h0d30ee6_0\n termcolor conda-forge/noarch::termcolor-1.1.0-py_2\n xgboost conda-forge/linux-64::xgboost-0.90-py37he1b5a44_4\n\nThe following packages will be UPDATED:\n\n conda pkgs/main::conda-4.7.12-py37_0 --> conda-forge::conda-4.8.2-py37_0\n\nThe following packages will be SUPERSEDED by a higher-priority channel:\n\n certifi pkgs/main --> conda-forge\n\n\n\nDownloading and Extracting Packages\nkeras-applications-1 | 30 KB | : 100% 1.0/1 [00:00<00:00, 8.82it/s] \nlibboost-1.67.0 | 13.0 MB | : 100% 1.0/1 [00:01<00:00, 1.85s/it] \nabsl-py-0.9.0 | 162 KB | : 100% 1.0/1 [00:00<00:00, 11.13it/s]\nlibxgboost-0.90 | 2.4 MB | : 100% 1.0/1 [00:00<00:00, 2.16it/s]\ncupti-10.1.168 | 1.4 MB | : 100% 1.0/1 [00:00<00:00, 7.39it/s]\ntermcolor-1.1.0 | 6 KB | : 100% 1.0/1 [00:00<00:00, 22.33it/s]\ntensorflow-base-1.14 | 146.3 MB | : 100% 1.0/1 [00:14<00:00, 14.12s/it] \ntensorboard-1.14.0 | 3.2 MB | : 100% 1.0/1 [00:00<00:00, 1.87it/s]\ncudnn-7.6.5 | 179.9 MB | : 100% 1.0/1 [00:10<00:00, 10.91s/it] \nconda-4.8.2 | 3.0 MB | : 100% 1.0/1 [00:00<00:00, 1.22it/s]\npy-boost-1.67.0 | 278 KB | : 100% 1.0/1 [00:00<00:00, 8.26it/s]\npy-xgboost-0.90 | 73 KB | : 100% 1.0/1 [00:00<00:00, 18.94it/s]\ntensorflow-gpu-1.14. | 3 KB | : 100% 1.0/1 [00:00<00:00, 9.85it/s]\nmdtraj-1.9.3 | 1.9 MB | : 100% 1.0/1 [00:00<00:00, 2.17it/s]\nrdkit-2019.09.3.0 | 23.7 MB | : 100% 1.0/1 [00:05<00:00, 76.64s/it] \ndeepchem-gpu-2.3.0 | 2.1 MB | : 100% 1.0/1 [00:00<00:00, 50.91s/it] \ngrpcio-1.23.0 | 1.1 MB | : 100% 1.0/1 [00:00<00:00, 4.14it/s]\n_py-xgboost-mutex-2. | 8 KB | : 100% 1.0/1 [00:00<00:00, 27.43it/s]\nlibprotobuf-3.11.4 | 4.8 MB | : 100% 1.0/1 [00:01<00:00, 1.08s/it]\nkeras-preprocessing- | 33 KB | : 100% 1.0/1 [00:00<00:00, 22.50it/s]\nmarkdown-3.2.1 | 61 KB | : 100% 1.0/1 [00:00<00:00, 20.73it/s]\ngoogle-pasta-0.1.8 | 42 KB | : 100% 1.0/1 [00:00<00:00, 11.05it/s]\nprotobuf-3.11.4 | 699 KB | : 100% 1.0/1 [00:00<00:00, 4.10it/s]\n_tflow_select-2.1.0 | 2 KB | : 100% 1.0/1 [00:00<00:00, 10.36it/s]\nsimdna-0.4.2 | 627 KB | : 100% 1.0/1 [00:00<00:00, 2.80it/s] \nc-ares-1.15.0 | 100 KB | : 100% 1.0/1 [00:00<00:00, 13.50it/s]\ngast-0.3.3 | 12 KB | : 100% 1.0/1 [00:00<00:00, 20.80it/s]\ncertifi-2019.9.11 | 147 KB | : 100% 1.0/1 [00:00<00:00, 7.10it/s]\nfftw3f-3.3.4 | 1.2 MB | : 100% 1.0/1 [00:00<00:00, 12.56s/it] \nopenmm-7.4.1 | 11.9 MB | : 100% 1.0/1 [00:03<00:00, 108.64s/it] \ntensorflow-1.14.0 | 4 KB | : 100% 1.0/1 [00:00<00:00, 10.64it/s]\ntensorflow-estimator | 645 KB | : 100% 1.0/1 [00:00<00:00, 4.16it/s]\nastor-0.7.1 | 22 KB | : 100% 1.0/1 [00:00<00:00, 26.30it/s]\nxgboost-0.90 | 11 KB | : 100% 1.0/1 [00:00<00:00, 32.86it/s]\ncudatoolkit-10.1.243 | 347.4 MB | : 100% 1.0/1 [00:19<00:00, 19.76s/it] \npdbfixer-1.6 | 190 KB | : 100% 1.0/1 [00:00<00:00, 1.50it/s] \nPreparing transaction: \\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\bdone\nVerifying transaction: \\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\bdone\nExecuting transaction: | \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\b| \b\b/ \b\b- \b\b\\ \b\bdone\n"
]
],
[
[
"Let's start with some basic imports",
"_____no_output_____"
]
],
[
[
"from __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport numpy as np\nfrom rdkit import Chem\n\nfrom deepchem.feat import ConvMolFeaturizer, WeaveFeaturizer, CircularFingerprint\nfrom deepchem.feat import AdjacencyFingerprint, RDKitDescriptors\nfrom deepchem.feat import BPSymmetryFunctionInput, CoulombMatrix, CoulombMatrixEig\nfrom deepchem.utils import conformers",
"/usr/local/lib/python3.6/dist-packages/sklearn/externals/joblib/__init__.py:15: FutureWarning: sklearn.externals.joblib is deprecated in 0.21 and will be removed in 0.23. Please import this functionality directly from joblib, which can be installed with: pip install joblib. If this warning is raised when loading pickled models, you may need to re-serialize those models with scikit-learn 0.21+.\n warnings.warn(msg, category=FutureWarning)\n"
]
],
[
[
"We use `propane`( $CH_3 CH_2 CH_3 $ ) as a running example throughout this tutorial. Many of the featurization methods use conformers or the molecules. A conformer can be generated using the `ConformerGenerator` class in `deepchem.utils.conformers`. ",
"_____no_output_____"
],
[
"### RDKitDescriptors",
"_____no_output_____"
],
[
"`RDKitDescriptors` featurizes a molecule by computing descriptors values for specified descriptors. Intrinsic to the featurizer is a set of allowed descriptors, which can be accessed using `RDKitDescriptors.allowedDescriptors`.\n\nThe featurizer uses the descriptors in `rdkit.Chem.Descriptors.descList`, checks if they are in the list of allowed descriptors and computes the descriptor value for the molecule.",
"_____no_output_____"
]
],
[
[
"example_smile = \"CCC\"\nexample_mol = Chem.MolFromSmiles(example_smile)",
"_____no_output_____"
]
],
[
[
"Let's check the allowed list of descriptors. As you will see shortly, there's a wide range of chemical properties that RDKit computes for us.",
"_____no_output_____"
]
],
[
[
"for descriptor in RDKitDescriptors.allowedDescriptors:\n print(descriptor)",
"MaxAbsPartialCharge\nEState_VSA6\nSMR_VSA10\nEState_VSA3\nSlogP_VSA2\nSlogP_VSA12\nPEOE_VSA8\nLabuteASA\nSMR_VSA2\nChi4n\nMaxPartialCharge\nEState_VSA9\nEState_VSA8\nSMR_VSA8\nEState_VSA2\nSMR_VSA4\nRingCount\nSlogP_VSA6\nMinAbsEStateIndex\nVSA_EState4\nPEOE_VSA7\nChi2v\nPEOE_VSA12\nNumAliphaticCarbocycles\nVSA_EState8\nNumHeteroatoms\nMolLogP\nPEOE_VSA10\nSlogP_VSA9\nEState_VSA10\nChi1v\nMolWt\nEState_VSA11\nHeavyAtomMolWt\nChi4v\nMinPartialCharge\nPEOE_VSA1\nSlogP_VSA4\nMaxAbsEStateIndex\nPEOE_VSA2\nNumValenceElectrons\nChi1\nTPSA\nNumAromaticHeterocycles\nSMR_VSA1\nSMR_VSA3\nChi1n\nFractionCSP3\nNOCount\nSMR_VSA9\nVSA_EState10\nEState_VSA7\nNumAromaticCarbocycles\nChi3n\nVSA_EState1\nNumSaturatedRings\nKappa1\nPEOE_VSA4\nNumSaturatedHeterocycles\nEState_VSA5\nMolMR\nSMR_VSA5\nNumSaturatedCarbocycles\nChi2n\nMinAbsPartialCharge\nMinEStateIndex\nPEOE_VSA14\nSlogP_VSA3\nSlogP_VSA11\nNumRotatableBonds\nVSA_EState3\nExactMolWt\nVSA_EState6\nKappa3\nVSA_EState9\nChi3v\nKappa2\nEState_VSA4\nSMR_VSA7\nNumHDonors\nPEOE_VSA3\nSMR_VSA6\nSlogP_VSA1\nNumAliphaticRings\nHallKierAlpha\nNumAromaticRings\nChi0n\nPEOE_VSA6\nSlogP_VSA8\nVSA_EState7\nVSA_EState2\nBalabanJ\nSlogP_VSA5\nEState_VSA1\nNHOHCount\nBertzCT\nChi0\nNumRadicalElectrons\nPEOE_VSA9\nSlogP_VSA10\nSlogP_VSA7\nHeavyAtomCount\nNumHAcceptors\nVSA_EState5\nPEOE_VSA13\nNumAliphaticHeterocycles\nIpc\nMaxEStateIndex\nPEOE_VSA5\nChi0v\nPEOE_VSA11\n"
],
[
"rdkit_desc = RDKitDescriptors()\nfeatures = rdkit_desc._featurize(example_mol)\n\nprint('The number of descriptors present are: ', len(features))",
"The number of descriptors present are: 111\n"
]
],
[
[
"### BPSymmetryFunction",
"_____no_output_____"
],
[
"`Behler-Parinello Symmetry function` or `BPSymmetryFunction` featurizes a molecule by computing the atomic number and coordinates for each atom in the molecule. The features can be used as input for symmetry functions, like `RadialSymmetry`, `DistanceMatrix` and `DistanceCutoff` . More details on these symmetry functions can be found in [this paper](https://journals.aps.org/prl/pdf/10.1103/PhysRevLett.98.146401). These functions can be found in `deepchem.feat.coulomb_matrices`\n\nThe featurizer takes in `max_atoms` as an argument. As input, it takes in a conformer of the molecule and computes:\n\n1. coordinates of every atom in the molecule (in Bohr units)\n2. the atomic numbers for all atoms. \n\nThese features are concantenated and padded with zeros to account for different number of atoms, across molecules.",
"_____no_output_____"
]
],
[
[
"example_smile = \"CCC\"\nexample_mol = Chem.MolFromSmiles(example_smile)\nengine = conformers.ConformerGenerator(max_conformers=1)\nexample_mol = engine.generate_conformers(example_mol)",
"_____no_output_____"
]
],
[
[
"Let's now take a look at the actual featurized matrix that comes out.",
"_____no_output_____"
]
],
[
[
"bp_sym = BPSymmetryFunctionInput(max_atoms=20)\nfeatures = bp_sym._featurize(mol=example_mol)\nfeatures",
"_____no_output_____"
]
],
[
[
"A simple check for the featurization would be to count the different atomic numbers present in the features.",
"_____no_output_____"
]
],
[
[
"atomic_numbers = features[:, 0]\nfrom collections import Counter\n\nunique_numbers = Counter(atomic_numbers)\nprint(unique_numbers)",
"Counter({0.0: 9, 1.0: 8, 6.0: 3})\n"
]
],
[
[
"For propane, we have $3$ `C-atoms` and $8$ `H-atoms`, and these numbers are in agreement with the results shown above. There's also the additional padding of 9 atoms, to equalize with `max_atoms`.",
"_____no_output_____"
],
[
"### CoulombMatrix",
"_____no_output_____"
],
[
"`CoulombMatrix`, featurizes a molecule by computing the coulomb matrices for different conformers of the molecule, and returning it as a list.\n\nA Coulomb matrix tries to encode the energy structure of a molecule. The matrix is symmetric, with the off-diagonal elements capturing the Coulombic repulsion between pairs of atoms and the diagonal elements capturing atomic energies using the atomic numbers. More information on the functional forms used can be found [here](https://journals.aps.org/prl/pdf/10.1103/PhysRevLett.108.058301).\n\nThe featurizer takes in `max_atoms` as an argument and also has options for removing hydrogens from the molecule (`remove_hydrogens`), generating additional random coulomb matrices(`randomize`), and getting only the upper triangular matrix (`upper_tri`).",
"_____no_output_____"
]
],
[
[
"example_smile = \"CCC\"\nexample_mol = Chem.MolFromSmiles(example_smile)\n\nengine = conformers.ConformerGenerator(max_conformers=1)\nexample_mol = engine.generate_conformers(example_mol)\n\nprint(\"Number of available conformers for propane: \", len(example_mol.GetConformers()))",
"Number of available conformers for propane: 1\n"
],
[
"coulomb_mat = CoulombMatrix(max_atoms=20, randomize=False, remove_hydrogens=False, upper_tri=False)\nfeatures = coulomb_mat._featurize(mol=example_mol)",
"_____no_output_____"
]
],
[
[
"A simple check for the featurization is to see if the feature list has the same length as the number of conformers",
"_____no_output_____"
]
],
[
[
"print(len(example_mol.GetConformers()) == len(features))",
"True\n"
]
],
[
[
"### CoulombMatrixEig",
"_____no_output_____"
],
[
"`CoulombMatrix` is invariant to molecular rotation and translation, since the interatomic distances or atomic numbers do not change. However the matrix is not invariant to random permutations of the atom's indices. To deal with this, the `CoulumbMatrixEig` featurizer was introduced, which uses the eigenvalue spectrum of the columb matrix, and is invariant to random permutations of the atom's indices.\n\n`CoulombMatrixEig` inherits from `CoulombMatrix` and featurizes a molecule by first computing the coulomb matrices for different conformers of the molecule and then computing the eigenvalues for each coulomb matrix. These eigenvalues are then padded to account for variation in number of atoms across molecules.\n\nThe featurizer takes in `max_atoms` as an argument and also has options for removing hydrogens from the molecule (`remove_hydrogens`), generating additional random coulomb matrices(`randomize`).",
"_____no_output_____"
]
],
[
[
"example_smile = \"CCC\"\nexample_mol = Chem.MolFromSmiles(example_smile)\n\nengine = conformers.ConformerGenerator(max_conformers=1)\nexample_mol = engine.generate_conformers(example_mol)\n\nprint(\"Number of available conformers for propane: \", len(example_mol.GetConformers()))",
"Number of available conformers for propane: 1\n"
],
[
"coulomb_mat_eig = CoulombMatrixEig(max_atoms=20, randomize=False, remove_hydrogens=False)\nfeatures = coulomb_mat_eig._featurize(mol=example_mol)",
"_____no_output_____"
],
[
"print(len(example_mol.GetConformers()) == len(features))",
"True\n"
]
],
[
[
"### Adjacency Fingerprints",
"_____no_output_____"
],
[
"TODO(rbharath): This tutorial still needs to be expanded out with the additional fingerprints.",
"_____no_output_____"
],
[
"# Congratulations! Time to join the Community!\n\nCongratulations on completing this tutorial notebook! If you enjoyed working through the tutorial, and want to continue working with DeepChem, we encourage you to finish the rest of the tutorials in this series. You can also help the DeepChem community in the following ways:\n\n## Star DeepChem on [GitHub](https://github.com/deepchem/deepchem)\nThis helps build awareness of the DeepChem project and the tools for open source drug discovery that we're trying to build.\n\n## Join the DeepChem Gitter\nThe DeepChem [Gitter](https://gitter.im/deepchem/Lobby) hosts a number of scientists, developers, and enthusiasts interested in deep learning for the life sciences. Join the conversation!",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
d04cc6716a6b00fa05e21763693e4f086045641c | 17,471 | ipynb | Jupyter Notebook | Week 4 - Multi-Class Classification and Neural Networks/Neural Networks.ipynb | Nikronic/Coursera-Machine-Learning | 4bc5de16446b664f9995eba95762b730547b1fce | [
"MIT"
] | 16 | 2018-09-11T10:55:57.000Z | 2022-01-31T21:08:18.000Z | Week 4 - Multi-Class Classification and Neural Networks/Neural Networks.ipynb | Nikronic/Coursera-Machine-Learning | 4bc5de16446b664f9995eba95762b730547b1fce | [
"MIT"
] | 1 | 2018-10-14T18:40:21.000Z | 2018-11-08T20:06:03.000Z | Week 4 - Multi-Class Classification and Neural Networks/Neural Networks.ipynb | Nikronic/Coursera-Machine-Learning | 4bc5de16446b664f9995eba95762b730547b1fce | [
"MIT"
] | 6 | 2019-11-21T02:59:22.000Z | 2022-01-31T21:08:26.000Z | 33.727799 | 623 | 0.560014 | [
[
[
"# Neural Networks\nIn the previous part of this exercise, you implemented multi-class logistic re gression to recognize handwritten digits. However, logistic regression cannot form more complex hypotheses as it is only a linear classifier.<br><br>\n\nIn this part of the exercise, you will implement a neural network to recognize handwritten digits using the same training set as before. The <strong>neural network</strong> will be able to represent complex models that form <strong>non-linear hypotheses</strong>. For this week, you will be using parameters from <strong>a neural network that we have already trained</strong>. Your goal is to implement the <strong>feedforward propagation algorithm to use our weights for prediction</strong>. In next week’s exercise, you will write the backpropagation algorithm for learning the neural network parameters.<br><br>\n\nThe file <strong><em>ex3data1</em></strong> contains a training set.<br>\nThe structure of the dataset described blow:<br>\n1. X array = <strong>400 columns describe the values of pixels of 20*20 images in flatten format for 5000 samples</strong>\n2. y array = <strong>Value of image (number between 0-9)</strong>\n\n\n<br><br>\n<strong>\nOur assignment has these sections:\n1. Visualizing the Data\n 1. Converting .mat to .csv\n 2. Loading Dataset and Trained Neural Network Weights\n 3. Ploting Data\n2. Model Representation\n3. Feedforward Propagation and Prediction\n</strong>\n\nIn each section full description provided.",
"_____no_output_____"
],
[
"## 1. Visualizing the Dataset\nBefore starting on any task, it is often useful to understand the data by visualizing it.<br>",
"_____no_output_____"
],
[
"### 1.A Converting .mat to .csv\nIn this specific assignment, the instructor added a .mat file as training set and weights of trained neural network. But we have to convert it to .csv to use in python.<br>\nAfter all we now ready to import our new csv files to pandas dataframes and do preprocessing on it and make it ready for next steps.",
"_____no_output_____"
]
],
[
[
"# import libraries\nimport scipy.io\nimport numpy as np\n\ndata = scipy.io.loadmat(\"ex3data1\")\nweights = scipy.io.loadmat('ex3weights')",
"_____no_output_____"
]
],
[
[
"Now we extract X and y variables from the .mat file and save them into .csv file for further usage. After running the below code <strong>you should see X.csv and y.csv files</strong> in your directory.",
"_____no_output_____"
]
],
[
[
"for i in data:\n if '__' not in i and 'readme' not in i:\n np.savetxt((i+\".csv\"),data[i],delimiter=',')\n \nfor i in weights:\n if '__' not in i and 'readme' not in i:\n np.savetxt((i+\".csv\"),weights[i],delimiter=',')",
"_____no_output_____"
]
],
[
[
"### 1.B Loading Dataset and Trained Neural Network Weights\nFirst we import .csv files into pandas dataframes then save them into numpy arrays.<br><br>\nThere are <strong>5000 training examples</strong> in ex3data1.mat, where each training example is a <strong>20 pixel by 20 pixel <em>grayscale</em> image of the digit</strong>. Each pixel is represented by a floating point number indicating the <strong>grayscale intensity</strong> at that location. The 20 by 20 grid of pixels is <strong>\"flatten\" into a 400-dimensional vector</strong>. <strong>Each of these training examples becomes a single row in our data matrix X</strong>. This gives us a 5000 by 400 matrix X where every row is a training example for a handwritten digit image.<br><br>\nThe second part of the training set is a <strong>5000-dimensional vector y that contains labels</strong> for the training set.<br><br>\n<strong>Notice: In dataset, the digit zero mapped to the value ten. Therefore, a \"0\" digit is labeled as \"10\", while the digits \"1\" to \"9\" are labeled as \"1\" to \"9\" in their natural order.<br></strong>\nBut this make thing harder so we bring it back to natural order for 0!",
"_____no_output_____"
]
],
[
[
"# import library\nimport pandas as pd\n\n# saving .csv files to pandas dataframes\nx_df = pd.read_csv('X.csv',names= np.arange(0,400))\ny_df = pd.read_csv('y.csv',names=['label'])",
"_____no_output_____"
],
[
"# saving .csv files to pandas dataframes\nTheta1_df = pd.read_csv('Theta1.csv',names = np.arange(0,401))\nTheta2_df = pd.read_csv('Theta2.csv',names = np.arange(0,26))",
"_____no_output_____"
],
[
"# saving x_df and y_df into numpy arrays\nx = x_df.iloc[:,:].values\ny = y_df.iloc[:,:].values\n\nm, n = x.shape\n\n# bring back 0 to 0 !!!\ny = y.reshape(m,)\ny[y==10] = 0\ny = y.reshape(m,1)\n\nprint('#{} Number of training samples, #{} features per sample'.format(m,n))",
"#5000 Number of training samples, #400 features per sample\n"
],
[
"# saving Theta1_df and Theta2_df into numpy arrays\ntheta1 = Theta1_df.iloc[:,:].values\ntheta2 = Theta2_df.iloc[:,:].values",
"_____no_output_____"
]
],
[
[
"### 1.C Plotting Data\nYou will begin by visualizing a subset of the training set. In first part, the code <strong>randomly selects selects 100 rows from X</strong> and passes those rows to the <strong>display_data</strong> function. This function maps each row to a 20 pixel by 20 pixel grayscale image and displays the images together.<br>\nAfter plotting, you should see an image like this:<img src='img/plot.jpg'>",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\namount = 100\nlines = 10\ncolumns = 10\nimage = np.zeros((amount, 20, 20))\nnumber = np.zeros(amount)\n\nfor i in range(amount):\n rnd = random.randint(0,4999)\n image[i] = x[rnd].reshape(20, 20)\n y_temp = y.reshape(m,)\n number[i] = y_temp[rnd]\nfig = plt.figure(figsize=(8,8))\n\nfor i in range(amount):\n ax = fig.add_subplot(lines, columns, 1 + i)\n \n # Turn off tick labels\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n plt.imshow(image[i], cmap='binary')\nplt.show()\nprint(number)",
"_____no_output_____"
]
],
[
[
"# 2. Model Representation\nOur neural network is shown in below figure. It has <strong>3 layers an input layer, a hidden layer and an output layer</strong>. Recall that our <strong>inputs are pixel</strong> values of digit images. Since the images are of <strong>size 20×20</strong>, this gives us <strong>400 input layer units</strong> (excluding the extra bias unit which always outputs +1).<br><br><img src='img/nn.jpg'><br>\nYou have been provided with a set of <strong>network parameters (Θ<sup>(1)</sup>; Θ<sup>(2)</sup>)</strong> already trained by instructor.<br><br>\n<strong>Theta1 and Theta2 The parameters have dimensions that are sized for a neural network with 25 units in the second layer and 10 output units (corresponding to the 10 digit classes).</strong>",
"_____no_output_____"
]
],
[
[
"print('theta1 shape = {}, theta2 shape = {}'.format(theta1.shape,theta2.shape))",
"theta1 shape = (25, 401), theta2 shape = (10, 26)\n"
]
],
[
[
"It seems our weights are transposed, so we transpose them to have them in a way our neural network is.",
"_____no_output_____"
]
],
[
[
"theta1 = theta1.transpose()\ntheta2 = theta2.transpose()\nprint('theta1 shape = {}, theta2 shape = {}'.format(theta1.shape,theta2.shape))",
"theta1 shape = (401, 25), theta2 shape = (26, 10)\n"
]
],
[
[
"# 3. Feedforward Propagation and Prediction\nNow you will implement feedforward propagation for the neural network.<br>\nYou should implement the <strong>feedforward computation</strong> that computes <strong>h<sub>θ</sub>(x<sup>(i)</sup>)</strong> for every example i and returns the associated predictions. Similar to the one-vs-all classification strategy, the prediction from the neural network will be the <strong>label</strong> that has the <strong>largest output <strong>h<sub>θ</sub>(x)<sub>k</sub></strong></strong>.",
"_____no_output_____"
],
[
"<strong>Implementation Note:</strong> The matrix X contains the examples in rows. When you complete the code, <strong>you will need to add the column of 1’s</strong> to the matrix. The matrices <strong>Theta1 and Theta2 contain the parameters for each unit in rows.</strong> Specifically, the first row of Theta1 corresponds to the first hidden unit in the second layer. <br>\nYou must get <strong>a<sup>(l)</sup></strong> as a column vector.<br><br>\nYou should see that the <strong>accuracy is about 97.5%</strong>.",
"_____no_output_____"
]
],
[
[
"# adding column of 1's to x\nx = np.append(np.ones(shape=(m,1)),x,axis = 1)",
"_____no_output_____"
]
],
[
[
"<strong>h = hypothesis(x,theta)</strong> will compute <strong>sigmoid</strong> function on <strong>θ<sup>T</sup>X</strong> and return a number which <strong>0<=h<=1</strong>.<br>\nYou can use <a href='https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.special.expit.html'>this</a> library for calculating sigmoid.",
"_____no_output_____"
]
],
[
[
"def sigmoid(z):\n return 1/(1+np.exp(-z))",
"_____no_output_____"
],
[
"def lr_hypothesis(x,theta):\n return np.dot(x,theta)",
"_____no_output_____"
]
],
[
[
"<strong>predict(theta1, theta2, x):</strong> outputs the predicted label of x given the trained weights of a neural network (theta1, theta2).",
"_____no_output_____"
]
],
[
[
"layers = 3\nnum_labels = 10",
"_____no_output_____"
]
],
[
[
"<strong>Becuase the initial dataset has changed and mapped 0 to \"10\", so the weights also are changed. So we just rotate columns one step to right, to predict correct values.<br>\nRecall we have changed mapping 0 to \"10\" to 0 to \"0\" but we cannot detect this mapping in weights of neural netwrok. So we have to this rotation on final output of probabilities.</strong>",
"_____no_output_____"
]
],
[
[
"def rotate_column(array):\n array_ = np.zeros(shape=(m,num_labels))\n temp = np.zeros(num_labels,)\n temp= array[:,9]\n array_[:,1:10] = array[:,0:9]\n array_[:,0] = temp\n return array_",
"_____no_output_____"
],
[
"def predict(theta1,theta2,x):\n z2 = np.dot(x,theta1) # hidden layer\n a2 = sigmoid(z2) # hidden layer\n\n # adding column of 1's to a2\n a2 = np.append(np.ones(shape=(m,1)),a2,axis = 1)\n z3 = np.dot(a2,theta2)\n a3 = sigmoid(z3)\n \n # mapping problem. Rotate left one step\n y_prob = rotate_column(a3)\n \n # prediction on activation a2\n y_pred = np.argmax(y_prob, axis=1).reshape(-1,1)\n return y_pred",
"_____no_output_____"
],
[
"y_pred = predict(theta1,theta2,x)\ny_pred.shape",
"_____no_output_____"
]
],
[
[
"Now we will compare our predicted result to the true one with <a href='http://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html'>confusion_matrix</a> of numpy library.",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import confusion_matrix\n\n# Function for accuracy\ndef acc(confusion_matrix):\n t = 0\n for i in range(num_labels):\n t += confusion_matrix[i][i]\n f = m-t\n ac = t/(m)\n return (t,f,ac)",
"_____no_output_____"
],
[
"#import library\nfrom sklearn.metrics import confusion_matrix\ncm_train = confusion_matrix(y.reshape(m,),y_pred.reshape(m,))\nt,f,ac = acc(cm_train)\nprint('With #{} correct, #{} wrong ==========> accuracy = {}%'\n .format(t,f,ac*100))",
"With #4876 correct, #124 wrong ==========> accuracy = 97.52%\n"
],
[
"cm_train",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
d04cc8e7a8d97e632a5d7efbd38c082e18ea232e | 241,931 | ipynb | Jupyter Notebook | Reinforcement Learning/EDA - Financial Modelling.ipynb | deepaksood619/DS_ML | 2f5b374cddd11c8d5f7d4c980e62fb34c2beb6b5 | [
"MIT"
] | null | null | null | Reinforcement Learning/EDA - Financial Modelling.ipynb | deepaksood619/DS_ML | 2f5b374cddd11c8d5f7d4c980e62fb34c2beb6b5 | [
"MIT"
] | null | null | null | Reinforcement Learning/EDA - Financial Modelling.ipynb | deepaksood619/DS_ML | 2f5b374cddd11c8d5f7d4c980e62fb34c2beb6b5 | [
"MIT"
] | null | null | null | 104.101119 | 141,032 | 0.742551 | [
[
[
"!ls",
"EDA - Financial Modelling.ipynb \u001b[1m\u001b[32mKaggle_demo_LIVE_TwoSigma.ipynb\u001b[m\u001b[m\r\nFrozenLake - Brute Force RL.ipynb Q-Learning in FronzenLake.ipynb\r\nFrozenLake - Policy Iteration.ipynb \u001b[1m\u001b[32mtrain.h5\u001b[m\u001b[m\r\nFrozenLake - Value Iteration.ipynb train.h5.zip\r\n"
],
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"with pd.HDFStore('train.h5', 'r') as train:\n df = train.get('train')",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"df = pd.read_hdf('train.h5')",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"len(df)",
"_____no_output_____"
],
[
"for i in df.columns:\n print(i, end='\\t')",
"id\ttimestamp\tderived_0\tderived_1\tderived_2\tderived_3\tderived_4\tfundamental_0\tfundamental_1\tfundamental_2\tfundamental_3\tfundamental_5\tfundamental_6\tfundamental_7\tfundamental_8\tfundamental_9\tfundamental_10\tfundamental_11\tfundamental_12\tfundamental_13\tfundamental_14\tfundamental_15\tfundamental_16\tfundamental_17\tfundamental_18\tfundamental_19\tfundamental_20\tfundamental_21\tfundamental_22\tfundamental_23\tfundamental_24\tfundamental_25\tfundamental_26\tfundamental_27\tfundamental_28\tfundamental_29\tfundamental_30\tfundamental_31\tfundamental_32\tfundamental_33\tfundamental_34\tfundamental_35\tfundamental_36\tfundamental_37\tfundamental_38\tfundamental_39\tfundamental_40\tfundamental_41\tfundamental_42\tfundamental_43\tfundamental_44\tfundamental_45\tfundamental_46\tfundamental_47\tfundamental_48\tfundamental_49\tfundamental_50\tfundamental_51\tfundamental_52\tfundamental_53\tfundamental_54\tfundamental_55\tfundamental_56\tfundamental_57\tfundamental_58\tfundamental_59\tfundamental_60\tfundamental_61\tfundamental_62\tfundamental_63\ttechnical_0\ttechnical_1\ttechnical_2\ttechnical_3\ttechnical_5\ttechnical_6\ttechnical_7\ttechnical_9\ttechnical_10\ttechnical_11\ttechnical_12\ttechnical_13\ttechnical_14\ttechnical_16\ttechnical_17\ttechnical_18\ttechnical_19\ttechnical_20\ttechnical_21\ttechnical_22\ttechnical_24\ttechnical_25\ttechnical_27\ttechnical_28\ttechnical_29\ttechnical_30\ttechnical_31\ttechnical_32\ttechnical_33\ttechnical_34\ttechnical_35\ttechnical_36\ttechnical_37\ttechnical_38\ttechnical_39\ttechnical_40\ttechnical_41\ttechnical_42\ttechnical_43\ttechnical_44\ty\t"
],
[
"labels = []\nvalues = []\n\nfor col in df.columns:\n labels.append(col)\n values.append(df[col].isnull().sum())\n print(col, values[-1])",
"id 0\ntimestamp 0\nderived_0 72959\nderived_1 81029\nderived_2 398651\nderived_3 149471\nderived_4 406458\nfundamental_0 23947\nfundamental_1 679070\nfundamental_2 368840\nfundamental_3 454380\nfundamental_5 962020\nfundamental_6 701625\nfundamental_7 26340\nfundamental_8 373166\nfundamental_9 565567\nfundamental_10 112977\nfundamental_11 368840\nfundamental_12 110871\nfundamental_13 355138\nfundamental_14 356084\nfundamental_15 354897\nfundamental_16 355138\nfundamental_17 97222\nfundamental_18 15833\nfundamental_19 54588\nfundamental_20 110871\nfundamental_21 54333\nfundamental_22 558488\nfundamental_23 356723\nfundamental_24 576655\nfundamental_25 121894\nfundamental_26 657184\nfundamental_27 281298\nfundamental_28 667331\nfundamental_29 354106\nfundamental_30 354907\nfundamental_31 454380\nfundamental_32 111672\nfundamental_33 13835\nfundamental_34 434862\nfundamental_35 424476\nfundamental_36 15843\nfundamental_37 355843\nfundamental_38 803489\nfundamental_39 377574\nfundamental_40 279702\nfundamental_41 30115\nfundamental_42 22913\nfundamental_43 354531\nfundamental_44 355805\nfundamental_45 16062\nfundamental_46 355843\nfundamental_47 446328\nfundamental_48 15843\nfundamental_49 558333\nfundamental_50 355231\nfundamental_51 447053\nfundamental_52 153927\nfundamental_53 23947\nfundamental_54 384213\nfundamental_55 368840\nfundamental_56 368840\nfundamental_57 657184\nfundamental_58 142125\nfundamental_59 16062\nfundamental_60 355126\nfundamental_61 671801\nfundamental_62 112977\nfundamental_63 375614\ntechnical_0 19165\ntechnical_1 126776\ntechnical_2 4279\ntechnical_3 98294\ntechnical_5 153604\ntechnical_6 4279\ntechnical_7 2236\ntechnical_9 19165\ntechnical_10 167483\ntechnical_11 4279\ntechnical_12 19165\ntechnical_13 4764\ntechnical_14 14184\ntechnical_16 19981\ntechnical_17 4279\ntechnical_18 20016\ntechnical_19 2320\ntechnical_20 4764\ntechnical_21 2236\ntechnical_22 0\ntechnical_24 71146\ntechnical_25 208056\ntechnical_27 2420\ntechnical_28 262916\ntechnical_29 61615\ntechnical_30 4764\ntechnical_31 182678\ntechnical_32 19165\ntechnical_33 14535\ntechnical_34 0\ntechnical_35 3155\ntechnical_36 2552\ntechnical_37 19165\ntechnical_38 19165\ntechnical_39 20016\ntechnical_40 2236\ntechnical_41 44189\ntechnical_42 20001\ntechnical_43 4686\ntechnical_44 236779\ny 0\n"
],
[
"import matplotlib.pyplot as plt\n\n%matplotlib inline\n\nind = np.arange(len(labels))\nwidth = 0.9\n\nfig, ax = plt.subplots(figsize=(12,50))\nrects = ax.barh(ind, np.array(values), color='b')\nax.set_yticks(ind+((width/2.)))\nax.set_yticklabels(labels, rotation='horizontal')\nax.set_ylabel('Count of missing values')\nax.set_title('# of missing values in each column')\nplt.show()\n",
"_____no_output_____"
],
[
"import seaborn as sns\n\nfig = plt.figure(figsize=(12,6))\nsns.countplot(x='timestamp', data=df)\nplt.show()",
"_____no_output_____"
],
[
"print(len(df.id.unique()))",
"1424\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d04ccbc418081bbcdaf4f6d9a11f0a31b1a288f0 | 431,862 | ipynb | Jupyter Notebook | Movie_Analysis.ipynb | neoaksa/IMDB_Spider | 059bdffbd08edc07b07b6a231e5b0b465dae2035 | [
"Apache-2.0"
] | null | null | null | Movie_Analysis.ipynb | neoaksa/IMDB_Spider | 059bdffbd08edc07b07b6a231e5b0b465dae2035 | [
"Apache-2.0"
] | null | null | null | Movie_Analysis.ipynb | neoaksa/IMDB_Spider | 059bdffbd08edc07b07b6a231e5b0b465dae2035 | [
"Apache-2.0"
] | null | null | null | 555.092545 | 207,218 | 0.914524 | [
[
[
"[View in Colaboratory](https://colab.research.google.com/github/neoaksa/IMDB_Spider/blob/master/Movie_Analysis.ipynb)",
"_____no_output_____"
]
],
[
[
"# I've already uploaded three files onto googledrive, you can use uploaded function blew to upload the files.\n\n# # upload\n# uploaded = files.upload()\n\n# for fn in uploaded.keys():\n# print('User uploaded file \"{name}\" with length {length} bytes'.format(\n# name=fn, length=len(uploaded[fn])))\n",
"_____no_output_____"
],
[
"import pandas as pd\nimport numpy as np\nimport urllib.request\n! pip install pydrive\n# these classes allow you to request the Google drive API\nfrom pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive \nfrom google.colab import auth \nfrom oauth2client.client import GoogleCredentials\nfrom googleapiclient.discovery import build\nfrom google.colab import auth\n\n# authenticate google drive\nauth.authenticate_user()\ndrive_service = build('drive', 'v3')\n# 1. Authenticate and create the PyDrive client.\ngauth = GoogleAuth()\ngauth.credentials = GoogleCredentials.get_application_default()\ndrive = GoogleDrive(gauth)\n\ndef downloadFile(inputfilename,outputfilename):\n downloaded = drive.CreateFile({'id': inputfilename})\n # assume the file is called file.csv and it's located at the root of your drive\n downloaded.GetContentFile(outputfilename)\n \n# traning file download\nMovieItemFile = downloadFile(\"1w8Ce9An_6vJH_o5Ux7A8Zf0zc2E419xN\",\"MovieItem.csv\")\nMovieReview = downloadFile(\"1R7kAHF9X_YnPGwsclqMn2_XA1WgVgjlC\",\"MovieReview.csv\")\nMovieStar = downloadFile(\"15d3ZiHoqvxxdRhS9-5it979D0M60Ued0\",\"MovieStar.csv\")\n\ndf_movieItem = pd.read_csv('MovieItem.csv', delimiter=',',index_col=['id'])\ndf_movieReview = pd.read_csv('MovieReview.csv', delimiter=',',index_col=['id'])\ndf_movieStar = pd.read_csv('MovieStar.csv', delimiter=',',index_col=['id'])\n# sort by index id(also known by rating)\ndf_movieItem = df_movieItem.sort_index(axis=0)\n# rating overview\nimport seaborn as sns\nsns.stripplot(data=df_movieItem,y='rating',jitter= True,orient = 'v' ,size=6)\nplt.title('Movie Rating Overview')\nplt.show()",
"Requirement already satisfied: pydrive in /usr/local/lib/python3.6/dist-packages (1.3.1)\r\nRequirement already satisfied: PyYAML>=3.0 in /usr/local/lib/python3.6/dist-packages (from pydrive) (3.13)\r\nRequirement already satisfied: oauth2client>=4.0.0 in /usr/local/lib/python3.6/dist-packages (from pydrive) (4.1.2)\r\nRequirement already satisfied: google-api-python-client>=1.2 in /usr/local/lib/python3.6/dist-packages (from pydrive) (1.6.7)\r\nRequirement already satisfied: httplib2>=0.9.1 in /usr/local/lib/python3.6/dist-packages (from oauth2client>=4.0.0->pydrive) (0.11.3)\r\nRequirement already satisfied: six>=1.6.1 in /usr/local/lib/python3.6/dist-packages (from oauth2client>=4.0.0->pydrive) (1.11.0)\r\nRequirement already satisfied: pyasn1-modules>=0.0.5 in /usr/local/lib/python3.6/dist-packages (from oauth2client>=4.0.0->pydrive) (0.2.2)\nRequirement already satisfied: pyasn1>=0.1.7 in /usr/local/lib/python3.6/dist-packages (from oauth2client>=4.0.0->pydrive) (0.4.4)\nRequirement already satisfied: rsa>=3.1.4 in /usr/local/lib/python3.6/dist-packages (from oauth2client>=4.0.0->pydrive) (3.4.2)\nRequirement already satisfied: uritemplate<4dev,>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from google-api-python-client>=1.2->pydrive) (3.0.0)\n"
],
[
"# stars analysis\n# pre-process for movieItem and movieStar\nstar_list = []\nfor index,stars in df_movieItem[['stars','stars_id']].iterrows():\n star_list += [(x.lstrip().replace('\"',''),y.lstrip().replace('\"','')) \n for x,y in zip(stars['stars'][1:-1].replace('\\'','').split(','),stars['stars_id'][1:-1].replace('\\'','').split(','))]\n# star_id_list += [x.lstrip().replace('\"','') for x in stars['stars_id'][1:-1].replace('\\'','').split(',')]\n# reduce duplicate\nstar_list = list(set(star_list))\n# create a dataframe for output\ndf_star = pd.DataFrame(columns=['stars_id','stars','avg_rating','num_movie'])\ndf_star['stars_id'] = [x[1] for x in star_list]\ndf_star['stars'] = [x[0] for x in star_list]\nfor index,star_id in enumerate(df_star['stars_id']):\n filter = df_movieItem['stars_id'].str.contains(star_id)\n df_star['num_movie'][index] = len(df_movieItem[filter])\n df_star['avg_rating'][index] = pd.to_numeric(df_movieItem[filter]['rating'].str[2:-2]).sum(axis=0)/df_star['num_movie'][index]\n# left join star information\ndf_star\n# order by # of movies\ndf_star = df_star.sort_values(['num_movie'],ascending=False)\nprint(df_star.head(10))\n# order by avg rating\ndf_star = df_star.sort_values(['avg_rating'],ascending=False)\nprint(df_star.head(10))",
" stars_id stars avg_rating num_movie\n330 nm0000134 Robert De Niro 8.375 8\n352 nm0000148 Harrison Ford 8.34286 7\n172 nm0000138 Leonardo DiCaprio 8.3 6\n250 nm0000158 Tom Hanks 8.38333 6\n588 nm0000142 Clint Eastwood 8.28 5\n62 nm0451148 Aamir Khan 8.2 5\n539 nm0000122 Charles Chaplin 8.38 5\n26 nm0000199 Al Pacino 8.65 4\n208 nm0000197 Jack Nicholson 8.45 4\n327 nm0000228 Kevin Spacey 8.425 4\n stars_id stars avg_rating num_movie\n427 nm0001001 James Caan 9.2 1\n176 nm0348409 Bob Gunton 9.2 1\n39 nm0000209 Tim Robbins 9.2 1\n290 nm0005132 Heath Ledger 9 1\n338 nm0001173 Aaron Eckhart 9 1\n276 nm0000842 Martin Balsam 8.9 1\n343 nm0000168 Samuel L. Jackson 8.9 1\n303 nm0000237 John Travolta 8.9 1\n398 nm0000553 Liam Neeson 8.9 1\n177 nm0005212 Ian McKellen 8.8 3\n"
]
],
[
[
"Accordig this breif table, we can find **Robert De Niro** took the most movies in top 250 list. Followed by **Harrison**,**Tom** and **Leonardo** .",
"_____no_output_____"
]
],
[
[
"# visual stars\nimport matplotlib.pyplot as plt\n# figure = plt.figure()\nax1 = plt.subplot()\ndf_aggbyMovie = df_star[df_star['num_movie']>0].groupby(['num_movie']).agg({'stars_id':np.size})\ndf_aggbyMovie.columns.values[0] ='freq'\ndf_aggbyMovie = df_aggbyMovie.sort_values(['freq'])\nacc_numMovie = np.cumsum(df_aggbyMovie['freq'])\nax1.plot(acc_numMovie)\nax1.set_xlabel('# of movies')\nax1.set_ylabel('cumulated # of stars')\nax1.set_title('Cumulated chart for each segement')\nplt.gca().invert_xaxis()\nplt.show()\n\nax2 = plt.subplot()\nax2.pie(df_aggbyMovie,\n labels=df_aggbyMovie.index,\n startangle=90,\n autopct='%1.1f%%')\nax2.set_title('Percetage of segements')\nplt.show()\n\n# check out which moive the best stars perform. - best stars: who took more than one movies in the top250 list\ndf_star_2plus = df_star[df_star['num_movie']>1]['stars_id']\ni = 0\nmovie_list = []\nfor index,row in df_movieItem[['stars_id','title']].iterrows():\n for x in df_star_2plus.values:\n if x in row['stars_id']:\n i +=1\n movie_list.append(row['title'])\n break\ndf_movieItem[df_movieItem['title'].isin(movie_list)].head(10)",
"_____no_output_____"
]
],
[
[
"**165** movies in top 250 movies are performed by the **100** best stars who is defined that took more than one movies in the list. We picked up these 100 movie stars for future star research",
"_____no_output_____"
]
],
[
[
"# movie star relationship analysis\n\ndf_movie_star_plus = df_star[df_star['num_movie']>2][['stars_id','stars']]\n# transfer star list to relationship list\ndef starlist2network(list):\n bi_list = []\n i = 0\n while i<len(list):\n j = 1\n while j<len(list)-i:\n bi_list.append((list[i],list[i+j]))\n j += 1\n i += 1\n return tuple(bi_list)\n\nstar_map_list =set()\nfor index,stars in df_movieItem[['stars']].iterrows():\n star_list = []\n star_list += [x.lstrip().replace('\"','')\n for x in stars['stars'][1:-1].replace('\\'','').split(',')]\n for item in starlist2network(star_list):\n if item[0] in df_movie_star_plus['stars'].values and item[1] in df_movie_star_plus['stars'].values: \n star_map_list.add(tuple(sorted(item)))\n\n\n!pip install networkx\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n# Creating a Graph \nG = nx.Graph() # Right now G is empty\nG.add_edges_from(star_map_list)\n# k controls the distance between the nodes and varies between 0 and 1\n# iterations is the number of times simulated annealing is run\n# default k =0.1 and iterations=50\npos = nx.spring_layout(G,k=0.55,iterations=50)\nnx.draw(G,pos, with_labels=True, font_weight='bold',node_shape = 'o')",
"Requirement already satisfied: networkx in /usr/local/lib/python3.6/dist-packages (2.1)\r\nRequirement already satisfied: decorator>=4.1.0 in /usr/local/lib/python3.6/dist-packages (from networkx) (4.3.0)\r\n"
]
],
[
[
"I picked up a few stars who took more than 2 movies in the top 250 list, and create a relationship netwrok for them.We can find the major 5 blocks, if we loose the filter, maybe we can find more.",
"_____no_output_____"
]
],
[
[
"# pick 100 stars for age analysis\n# rebin the year by 10 years\n\ndf_movieStar_bin = df_movieStar.copy()\ndf_movieStar_bin['name'] = df_movieStar_bin['name'].str[2:-2]\ndf_movieStar_bin['born_year'] = df_movieStar_bin['born_year'].str[2:-2]\ndf_movieStar_bin['born_area'] = df_movieStar_bin['born_area'].str[2:-2]\ndf_movieStar_bin['born_year'] = pd.cut(pd.to_numeric(df_movieStar_bin['born_year'].str[0:4]),range(1900,2020,10),right=False)\ndf_movieStar_bin = df_movieStar_bin.dropna()\ndf_movieStar_bin['born_year'] = df_movieStar_bin['born_year'].astype(str).str[1:5] + 's'\ndf_movieStar_bin = df_movieStar_bin[df_movieStar_bin.index.isin(df_star_2plus.values)]\nfig = plt.figure(figsize=(12,6))\nplt.style.use('fivethirtyeight')\nax3 = plt.subplot()\nax3.hist(df_movieStar_bin['born_year'])\nax3.set_title('Histogram of Star born year')\nplt.xlabel('Star Born Year')\nplt.ylabel('# of Star')\nplt.show()\n\n# star city anlysis\ndf_movieStar_bin['born_state'] = [x.split(',')[1] for x in df_movieStar_bin['born_area']]\ndf_movieStar_by_state = df_movieStar_bin.groupby(['born_state']).size().sort_values(ascending=False)\ndf_movieStar_by_state = df_movieStar_by_state[df_movieStar_by_state>=2].append(\npd.Series(df_movieStar_by_state[df_movieStar_by_state<2].sum(),index=['Others']))\n# print(df_movieStar_by_state)\nfig = plt.figure(figsize=(20,6))\nplt.bar(range(len(df_movieStar_by_state)), df_movieStar_by_state, align='center', alpha=0.5)\nplt.xticks(range(len(df_movieStar_by_state)), df_movieStar_by_state.index)\nplt.ylabel('# of Stars')\nplt.title('Movie Star by States')\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"From picked 100 movie stars, most of them are born between **1930s to 1970s**. **California, Illinois, New Jersey ** are the states with most movie stars. Even so, none of state or regions is predominant.",
"_____no_output_____"
]
],
[
[
"# review analysis\n!pip install wordcloud\n!pip install multidict\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nimport string\nimport multidict as multidict\n\n\nnltk.download('stopwords')\nnltk.download('punkt')\nnltk.download('averaged_perceptron_tagger')\nnltk.download('wordnet')\n\nLemmatizer = WordNetLemmatizer()\n\n# remove punctuation\nlist_word = []\nfor text in df_movieReview['content'].values:\n nopunc = [char.lower() for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n list_word.append(nopunc)\n\n# setting words unuseful\ndel_words = ['movie','character','film','story','wa','ha']# excluded words\nword_type_list_In = (\"JJ\",\"NN\") # only picked adj and noun\n# word_list_Ex = (\"/\", \"br\", \"<\", \">\",\"be\",\"movie\",\"film\",\"have\",\"do\",\"none\",\"none none\") \n\nwords = {}\nfor sent in list_word:\n text = nltk.word_tokenize(sent) # tokenize sentence to words\n text = [Lemmatizer.lemmatize(word) for word in text] # get stem of words\n text_tag = nltk.pos_tag(text) # get words type\n for item in [x[0] for x in text_tag if x[1][:2] in word_type_list_In and x[0] not in del_words and x[0] not in stopwords.words('english')]:\n if item not in words:\n words[item] = 1\n else:\n words[item] += 1\n\n#sort by value\nsorted_words = sorted(words.items(), key=lambda x: x[1],reverse=True)\n# filtered_words = ' '.join([x[0] for x in sorted_words if x[1]>=1000])\nprint(sorted_words[0:20])\n\nfullTermsDict = multidict.MultiDict()\nfor key in words:\n fullTermsDict.add(key, words[key])\n# Create the wordcloud object\nwordcloud = WordCloud(width=1600, height=800, margin=0,max_font_size=100).generate_from_frequencies(fullTermsDict)\n\n# Display the generated image:\nplt.imshow(wordcloud, interpolation='bilinear')\nplt.axis(\"off\")\nplt.margins(x=0, y=0)\nplt.show()",
"[('time', 6689), ('scene', 4773), ('great', 4118), ('life', 4079), ('best', 3854), ('good', 3790), ('way', 3752), ('people', 3487), ('many', 3194), ('year', 2941), ('first', 2832), ('man', 2769), ('thing', 2649), ('performance', 2588), ('world', 2253), ('actor', 2154), ('director', 1919), ('war', 1915), ('action', 1865), ('plot', 1851)]\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
d04cdc176d431a80bb85ebcfd539d148621324c2 | 46,694 | ipynb | Jupyter Notebook | p12-logistic-regression.ipynb | Avanderheyde/cs451-practicals | efd2560d8d40ddb2ec97a05234e9d808731c3feb | [
"BSD-3-Clause"
] | null | null | null | p12-logistic-regression.ipynb | Avanderheyde/cs451-practicals | efd2560d8d40ddb2ec97a05234e9d808731c3feb | [
"BSD-3-Clause"
] | null | null | null | p12-logistic-regression.ipynb | Avanderheyde/cs451-practicals | efd2560d8d40ddb2ec97a05234e9d808731c3feb | [
"BSD-3-Clause"
] | null | null | null | 111.17619 | 17,804 | 0.852872 | [
[
[
"#%%\nfrom dataclasses import dataclass, field\nimport numpy as np\nfrom sklearn import metrics\nimport numpy as np\nfrom tqdm import tqdm\nimport random\nfrom typing import List, Dict\nfrom sklearn.utils import resample\nfrom scipy.special import expit\nfrom shared import bootstrap_auc\nfrom sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"# start off by seeding random number generators:\nRANDOM_SEED = 12345\nrandom.seed(RANDOM_SEED)\nnp.random.seed(RANDOM_SEED)\n\n# import data; choose feature space\nfrom dataset_poetry import y_train, Xd_train, y_vali, Xd_vali\n\nX_train = Xd_train[\"numeric\"]\nX_vali = Xd_vali[\"numeric\"]",
"_____no_output_____"
],
[
"#%%\nfrom sklearn.linear_model import LogisticRegression\n\nm = LogisticRegression(random_state=RANDOM_SEED, penalty=\"none\", max_iter=2000)\nm.fit(X_train, y_train)\n\nprint(\"skLearn-LR AUC: {:.3}\".format(np.mean(bootstrap_auc(m, X_vali, y_vali))))\nprint(\"skLearn-LR Acc: {:.3}\".format(m.score(X_vali, y_vali)))",
"skLearn-LR AUC: 0.973\nskLearn-LR Acc: 0.929\n"
],
[
"@dataclass\nclass LogisticRegressionModel:\n # Managed to squeeze bias into this weights array by adding some +1s.\n weights: np.ndarray\n\n @staticmethod\n def random(D: int) -> \"LogisticRegressionModel\":\n weights = np.random.randn(D + 1, 1)\n return LogisticRegressionModel(weights)\n\n def decision_function(self, X: np.ndarray) -> np.ndarray:\n \"\"\" Compute the expit of the signed distance from the self.weights hyperplane. \"\"\"\n (N, D) = X.shape\n assert self.weights[:D].shape == (D, 1)\n # Matrix multiplication; sprinkle transpose and assert to get the shapes you want (or remember Linear Algebra)... or both!\n output = np.dot(self.weights[:D].transpose(), X.transpose())\n assert output.shape == (1, N)\n # now add bias and put it through the 'S'/sigmoid/'expit' function.\n return np.array(expit(output + self.weights[-1])).ravel()\n\n def predict(self, X: np.ndarray) -> np.ndarray:\n return np.array(self.decision_function(X) > 0.5, dtype=\"int32\").ravel()\n\n def score(self, X: np.ndarray, y: np.ndarray) -> float:\n \"\"\" Take predictions and compute accuracy. \"\"\"\n y_hat = self.predict(X)\n return metrics.accuracy_score(np.asarray(y), y_hat) # type:ignore\n\n\n@dataclass\nclass ModelTrainingCurve:\n train: List[float] = field(default_factory=list)\n validation: List[float] = field(default_factory=list)\n\n def add_sample(\n self,\n m: LogisticRegressionModel,\n X: np.ndarray,\n y: np.ndarray,\n X_vali: np.ndarray,\n y_vali: np.ndarray,\n ) -> None:\n self.train.append(m.score(X, y))\n self.validation.append(m.score(X_vali, y_vali))\n\n\n(N, D) = X_train.shape\n\nlearning_curves: Dict[str, ModelTrainingCurve] = {}\n\n\ndef compute_gradient_update(m, X, y) -> np.ndarray:\n \"\"\" Predict using m over X; compare to y, calculate the gradient update.\"\"\"\n (N, D) = X.shape\n y_hat = m.decision_function(X)\n y_diffs = np.array(y_hat - y)\n # look at all the predictions to compute our derivative:\n gradient = np.zeros((D + 1, 1))\n\n # Literally a bajillion times faster if we ditch the for loops!\n # 1. scale X matrix by the y_diffs; then sum columns:\n x_scaled_by_y = X.T * y_diffs\n non_bias_gradient = np.sum(x_scaled_by_y, axis=1)\n gradient[:D] = non_bias_gradient.reshape((D, 1))\n # 2. the bias term is always 1 in X rows; so we just sum up the y_diffs for this.\n gradient[D] += np.sum(y_diffs)\n\n # take an gradient step in the negative direction ('down')\n return -(gradient / N)",
"_____no_output_____"
],
[
"def train_logistic_regression_gd(a, name=\"LR-GD\", num_iter=100):\n plot = ModelTrainingCurve()\n learning_curves[name] = plot\n\n m = LogisticRegressionModel.random(D)\n # Alpha is the 'learning rate'.\n alpha = a\n\n for _ in tqdm(range(num_iter), total=num_iter, desc=name):\n # Each step is scaled by alpha, to control how fast we move, overall:\n m.weights += alpha * compute_gradient_update(m, X_train, y_train)\n # record performance:\n plot.add_sample(m, X_train, y_train, X_vali, y_vali)\n return m\n\nm = train_logistic_regression_gd(a=1, num_iter=2000)\nprint(\"LR-GD AUC: {:.3}\".format(np.mean(bootstrap_auc(m, X_vali, y_vali))))\nprint(\"LR-GD Acc: {:.3}\".format(m.score(X_vali, y_vali)))",
"LR-GD: 100%|██████████| 2000/2000 [00:01<00:00, 1245.11it/s]\n"
],
[
"def train_logistic_regression_sgd_opt(a, name=\"LR-SGD\", num_iter=100, minibatch_size=512):\n \"\"\" This is bootstrap-sampling minibatch SGD \"\"\"\n plot = ModelTrainingCurve()\n learning_curves[name] = plot\n\n m = LogisticRegressionModel.random(D)\n alpha = a\n n_samples = max(1, N // minibatch_size)\n\n for _ in tqdm(range(num_iter), total=num_iter, desc=name):\n for _ in range(n_samples):\n X_mb, y_mb = resample(X_train, y_train, n_samples=minibatch_size)\n m.weights += alpha * compute_gradient_update(m, X_mb, y_mb)\n # record performance:\n plot.add_sample(m, X_train, y_train, X_vali, y_vali)\n return m\n\nm = train_logistic_regression_sgd_opt(a=1, num_iter=2000)\nprint(\"LR-SGD AUC: {:.3}\".format(np.mean(bootstrap_auc(m, X_vali, y_vali))))\nprint(\"LR-SGD Acc: {:.3}\".format(m.score(X_vali, y_vali)))",
"LR-SGD: 100%|██████████| 2000/2000 [00:03<00:00, 659.73it/s]\n"
],
[
"## Create training curve plots:\nimport matplotlib.pyplot as plt\n\nfor key, dataset in learning_curves.items():\n xs = np.array(list(range(len(dataset.train))))\n plt.plot(xs, dataset.train, label=\"{}\".format(key), alpha=0.7)\nplt.title(\"Training Curves\")\nplt.xlabel(\"Iteration\")\nplt.ylabel(\"Accuracy\")\nplt.legend()\nplt.tight_layout()\nplt.savefig(\"graphs/p12-training-curves.png\")\nplt.show()\n\n## Create validation curve plots:\nfor key, dataset in learning_curves.items():\n xs = np.array(list(range(len(dataset.validation))))\n plt.plot(xs, dataset.validation, label=\"{}\".format(key), alpha=0.7)\nplt.title(\"Validation Curves\")\nplt.xlabel(\"Iteration\")\nplt.ylabel(\"Accuracy\")\nplt.legend()\nplt.tight_layout()\nplt.savefig(\"graphs/p12-vali-curves.png\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"# TODO:\n#\n### 1. pick SGD or GD (I recommend SGD)",
"_____no_output_____"
],
[
"i looked at both",
"_____no_output_____"
],
[
"### 2. pick a smaller max_iter that gets good performance.",
"_____no_output_____"
],
[
"max_iter = 1000",
"_____no_output_____"
],
[
"##### Do either A or B:\n\n##### (A) Explore Learning Rates:\n\n##### 3. make ``alpha``, the learning rate, a parameter of the train function.",
"_____no_output_____"
],
[
"done",
"_____no_output_____"
],
[
"##### 4. make a graph including some faster and slower alphas:\n##### .... alpha = [0.05, 0.1, 0.5, 1.0]\n##### .... what do you notice?",
"_____no_output_____"
],
[
"The alpha of 1 converges faster than 0.05",
"_____no_output_____"
],
[
"##### (B) Explore 'Automatic/Early Stopping'\n\n##### 3. split the 'training' data into **another** validation set.",
"_____no_output_____"
],
[
"##### 4. modify the SGD/GD loop to keep track of loss/accuarcy on this mini validation set at each iteration.\n##### 5. add a tolerance parameter, and stop looping when the loss/accuracy on the mini validation set stops going down.",
"_____no_output_____"
]
]
] | [
"code",
"markdown"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
d04ce6e09176f4e1c07e7433c06889eafc212771 | 13,198 | ipynb | Jupyter Notebook | docs/notebooks/fitting/1D_fitting/plot_1_29Si_cuspidine.ipynb | pjgrandinetti/mrsimulator | e603395e52ad162d4a9051a0741651c9030c3459 | [
"BSD-3-Clause"
] | 14 | 2019-05-28T20:06:13.000Z | 2021-05-27T01:37:16.000Z | docs/notebooks/fitting/1D_fitting/plot_1_29Si_cuspidine.ipynb | pjgrandinetti/mrsimulator | e603395e52ad162d4a9051a0741651c9030c3459 | [
"BSD-3-Clause"
] | 74 | 2021-06-07T15:13:49.000Z | 2022-03-29T20:09:19.000Z | docs/notebooks/fitting/1D_fitting/plot_1_29Si_cuspidine.ipynb | pjgrandinetti/mrsimulator | e603395e52ad162d4a9051a0741651c9030c3459 | [
"BSD-3-Clause"
] | 7 | 2019-05-28T20:19:29.000Z | 2021-04-06T18:48:24.000Z | 53.217742 | 1,126 | 0.624867 | [
[
[
"# This cell is added by sphinx-gallery\n!pip install mrsimulator --quiet\n\n\n%matplotlib inline\n\nimport mrsimulator\nprint(f'You are using mrsimulator v{mrsimulator.__version__}')",
"_____no_output_____"
]
],
[
[
"\n# ²⁹Si 1D MAS spinning sideband (CSA)\n",
"_____no_output_____"
],
[
"After acquiring an NMR spectrum, we often require a least-squares analysis to\ndetermine site populations and nuclear spin interaction parameters. Generally, this\ncomprises of two steps:\n\n- create a fitting model, and\n- determine the model parameters that give the best fit to the spectrum.\n\nHere, we will use the mrsimulator objects to create a fitting model, and use the\n`LMFIT <https://lmfit.github.io/lmfit-py/>`_ library for performing the least-squares\nfitting optimization.\nIn this example, we use a synthetic $^{29}\\text{Si}$ NMR spectrum of cuspidine,\ngenerated from the tensor parameters reported by Hansen `et al.` [#f1]_, to\ndemonstrate a simple fitting procedure.\n\nWe will begin by importing relevant modules and establishing figure size.\n\n",
"_____no_output_____"
]
],
[
[
"import csdmpy as cp\nimport matplotlib.pyplot as plt\nfrom lmfit import Minimizer, Parameters\n\nfrom mrsimulator import Simulator, SpinSystem, Site\nfrom mrsimulator.methods import BlochDecaySpectrum\nfrom mrsimulator import signal_processing as sp\nfrom mrsimulator.utils import spectral_fitting as sf",
"_____no_output_____"
]
],
[
[
"## Import the dataset\nUse the `csdmpy <https://csdmpy.readthedocs.io/en/stable/index.html>`_\nmodule to load the synthetic dataset as a CSDM object.\n\n",
"_____no_output_____"
]
],
[
[
"file_ = \"https://sandbox.zenodo.org/record/835664/files/synthetic_cuspidine_test.csdf?\"\nsynthetic_experiment = cp.load(file_).real\n\n# standard deviation of noise from the dataset\nsigma = 0.03383338\n\n# convert the dimension coordinates from Hz to ppm\nsynthetic_experiment.x[0].to(\"ppm\", \"nmr_frequency_ratio\")\n\n# Plot of the synthetic dataset.\nplt.figure(figsize=(4.25, 3.0))\nax = plt.subplot(projection=\"csdm\")\nax.plot(synthetic_experiment, \"k\", alpha=0.5)\nax.set_xlim(50, -200)\nplt.grid()\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Create a fitting model\n\nBefore you can fit a simulation to an experiment, in this case, the synthetic dataset,\nyou will first need to create a fitting model. We will use the ``mrsimulator`` objects\nas tools in creating a model for the least-squares fitting.\n\n**Step 1:** Create initial guess sites and spin systems.\n\nThe initial guess is often based on some prior knowledge about the system under\ninvestigation. For the current example, we know that Cuspidine is a crystalline silica\npolymorph with one crystallographic Si site. Therefore, our initial guess model is a\nsingle $^{29}\\text{Si}$ site spin system. For non-linear fitting algorithms, as\na general recommendation, the initial guess model parameters should be a good starting\npoint for the algorithms to converge.\n\n",
"_____no_output_____"
]
],
[
[
"# the guess model comprising of a single site spin system\nsite = Site(\n isotope=\"29Si\",\n isotropic_chemical_shift=-82.0, # in ppm,\n shielding_symmetric={\"zeta\": -63, \"eta\": 0.4}, # zeta in ppm\n)\n\nspin_system = SpinSystem(\n name=\"Si Site\",\n description=\"A 29Si site in cuspidine\",\n sites=[site], # from the above code\n abundance=100,\n)",
"_____no_output_____"
]
],
[
[
"**Step 2:** Create the method object.\n\nThe method should be the same as the one used\nin the measurement. In this example, we use the `BlochDecaySpectrum` method. Note,\nwhen creating the method object, the value of the method parameters must match the\nrespective values used in the experiment.\n\n",
"_____no_output_____"
]
],
[
[
"MAS = BlochDecaySpectrum(\n channels=[\"29Si\"],\n magnetic_flux_density=7.1, # in T\n rotor_frequency=780, # in Hz\n spectral_dimensions=[\n {\n \"count\": 2048,\n \"spectral_width\": 25000, # in Hz\n \"reference_offset\": -5000, # in Hz\n }\n ],\n experiment=synthetic_experiment, # add the measurement to the method.\n)",
"_____no_output_____"
]
],
[
[
"**Step 3:** Create the Simulator object, add the method and spin system objects, and\nrun the simulation.\n\n",
"_____no_output_____"
]
],
[
[
"sim = Simulator(spin_systems=[spin_system], methods=[MAS])\nsim.run()",
"_____no_output_____"
]
],
[
[
"**Step 4:** Create a SignalProcessor class and apply post simulation processing.\n\n",
"_____no_output_____"
]
],
[
[
"processor = sp.SignalProcessor(\n operations=[\n sp.IFFT(), # inverse FFT to convert frequency based spectrum to time domain.\n sp.apodization.Exponential(FWHM=\"200 Hz\"), # apodization of time domain signal.\n sp.FFT(), # forward FFT to convert time domain signal to frequency spectrum.\n sp.Scale(factor=3), # scale the frequency spectrum.\n ]\n)\nprocessed_data = processor.apply_operations(data=sim.methods[0].simulation).real",
"_____no_output_____"
]
],
[
[
"**Step 5:** The plot the spectrum. We also plot the synthetic dataset for comparison.\n\n",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(4.25, 3.0))\nax = plt.subplot(projection=\"csdm\")\nax.plot(synthetic_experiment, \"k\", linewidth=1, label=\"Experiment\")\nax.plot(processed_data, \"r\", alpha=0.75, linewidth=1, label=\"guess spectrum\")\nax.set_xlim(50, -200)\nplt.legend()\nplt.grid()\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Setup a Least-squares minimization\n\nNow that our model is ready, the next step is to set up a least-squares minimization.\nYou may use any optimization package of choice, here we show an application using\nLMFIT. You may read more on the LMFIT\n`documentation page <https://lmfit.github.io/lmfit-py/index.html>`_.\n\n### Create fitting parameters\n\nNext, you will need a list of parameters that will be used in the fit. The *LMFIT*\nlibrary provides a `Parameters <https://lmfit.github.io/lmfit-py/parameters.html>`_\nclass to create a list of parameters.\n\n",
"_____no_output_____"
]
],
[
[
"site1 = spin_system.sites[0]\nparams = Parameters()\n\nparams.add(name=\"iso\", value=site1.isotropic_chemical_shift)\nparams.add(name=\"eta\", value=site1.shielding_symmetric.eta, min=0, max=1)\nparams.add(name=\"zeta\", value=site1.shielding_symmetric.zeta)\nparams.add(name=\"FWHM\", value=processor.operations[1].FWHM)\nparams.add(name=\"factor\", value=processor.operations[3].factor)",
"_____no_output_____"
]
],
[
[
"### Create a minimization function\n\nNote, the above set of parameters does not know about the model. You will need to\nset up a function that will\n\n- update the parameters of the `Simulator` and `SignalProcessor` object based on the\n LMFIT parameter updates,\n- re-simulate the spectrum based on the updated values, and\n- return the difference between the experiment and simulation.\n\n",
"_____no_output_____"
]
],
[
[
"def minimization_function(params, sim, processor, sigma=1):\n values = params.valuesdict()\n\n # the experiment data as a Numpy array\n intensity = sim.methods[0].experiment.y[0].components[0].real\n\n # Here, we update simulation parameters iso, eta, and zeta for the site object\n site = sim.spin_systems[0].sites[0]\n site.isotropic_chemical_shift = values[\"iso\"]\n site.shielding_symmetric.eta = values[\"eta\"]\n site.shielding_symmetric.zeta = values[\"zeta\"]\n\n # run the simulation\n sim.run()\n\n # update the SignalProcessor parameter and apply line broadening.\n # update the scaling factor parameter at index 3 of operations list.\n processor.operations[3].factor = values[\"factor\"]\n # update the exponential apodization FWHM parameter at index 1 of operations list.\n processor.operations[1].FWHM = values[\"FWHM\"]\n\n # apply signal processing\n processed_data = processor.apply_operations(sim.methods[0].simulation)\n\n # return the difference vector.\n diff = intensity - processed_data.y[0].components[0].real\n return diff / sigma",
"_____no_output_____"
]
],
[
[
"<div class=\"alert alert-info\"><h4>Note</h4><p>To automate the fitting process, we provide a function to parse the\n ``Simulator`` and ``SignalProcessor`` objects for parameters and construct an\n *LMFIT* ``Parameters`` object. Similarly, a minimization function, analogous to\n the above `minimization_function`, is also included in the *mrsimulator*\n library. See the next example for usage instructions.</p></div>\n\n### Perform the least-squares minimization\n\nWith the synthetic dataset, simulation, and the initial guess parameters, we are ready\nto perform the fit. To fit, we use the *LMFIT*\n`Minimizer <https://lmfit.github.io/lmfit-py/fitting.html>`_ class.\n\n",
"_____no_output_____"
]
],
[
[
"minner = Minimizer(minimization_function, params, fcn_args=(sim, processor, sigma))\nresult = minner.minimize()\nresult",
"_____no_output_____"
]
],
[
[
"The plot of the fit, measurement and the residuals is shown below.\n\n",
"_____no_output_____"
]
],
[
[
"best_fit = sf.bestfit(sim, processor)[0]\nresiduals = sf.residuals(sim, processor)[0]\n\nplt.figure(figsize=(4.25, 3.0))\nax = plt.subplot(projection=\"csdm\")\nax.plot(synthetic_experiment, \"k\", linewidth=1, label=\"Experiment\")\nax.plot(best_fit, \"r\", alpha=0.75, linewidth=1, label=\"Best Fit\")\nax.plot(residuals, alpha=0.75, linewidth=1, label=\"Residuals\")\nax.set_xlabel(\"Frequency / Hz\")\nax.set_xlim(50, -200)\nplt.legend()\nplt.grid()\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
]
],
[
[
".. [#f1] Hansen, M. R., Jakobsen, H. J., Skibsted, J., $^{29}\\text{Si}$\n Chemical Shift Anisotropies in Calcium Silicates from High-Field\n $^{29}\\text{Si}$ MAS NMR Spectroscopy, Inorg. Chem. 2003,\n **42**, *7*, 2368-2377.\n `DOI: 10.1021/ic020647f <https://doi.org/10.1021/ic020647f>`_\n\n",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d04d018e16ef4b816705b2c2b557c0be3321d64d | 366,662 | ipynb | Jupyter Notebook | pytorch.ipynb | lamahechag/pytorch_tensorflow | 77f6ce1fb351a459fe0b7a2e6d8ec0f5fd14abe5 | [
"MIT"
] | null | null | null | pytorch.ipynb | lamahechag/pytorch_tensorflow | 77f6ce1fb351a459fe0b7a2e6d8ec0f5fd14abe5 | [
"MIT"
] | null | null | null | pytorch.ipynb | lamahechag/pytorch_tensorflow | 77f6ce1fb351a459fe0b7a2e6d8ec0f5fd14abe5 | [
"MIT"
] | null | null | null | 544.817236 | 133,030 | 0.93122 | [
[
[
"<a href=\"https://colab.research.google.com/github/lamahechag/pytorch_tensorflow/blob/master/pytorch.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Pytorch\n\nPytorch is a framework that challenge you to build a ANN almost from scratch.\n\nThis tutorial aims to explain how load non-iamges data in Pytorch, and create classification models.\n\n1. Learn how to generate synthetic data for classification. The more complex the bidimentional patern, the larger the high dimentional transformation to find a hiperplane that separes the prolem.\n\n1. Understand the basic components of a neural network using Pytorch: layers, foward pass, gradient calculation, update weights with any gradient desent method.\n\n1. Do a paralallel view of TensorFlow and Pytorch.\n1. Apply transformations to Loss function to trainning with imbalanced data: class weight, focal loss, etc.\n\n__References__\n\nhttps://towardsdatascience.com/pytorch-tabular-binary-classification-a0368da5bb89\n\nhttps://towardsdatascience.com/pytorch-basics-intro-to-dataloaders-and-loss-functions-868e86450047\n\nhttps://towardsdatascience.com/understanding-pytorch-with-an-example-a-step-by-step-tutorial-81fc5f8c4e8e\n\nhttps://cs230.stanford.edu/blog/pytorch/\n",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\n# plotlib and sklearn modules\nimport numpy as np\nfrom sklearn.datasets import make_moons, make_circles, make_classification\nfrom sklearn.metrics import accuracy_score, f1_score\nfrom sklearn.model_selection import train_test_split\nfrom matplotlib import pyplot as plt\nfrom matplotlib.colors import ListedColormap",
"_____no_output_____"
],
[
"device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(device)",
"cuda:0\n"
],
[
"# binary imbalanced set\nX_imb, y_imb = make_classification(n_samples=10000, n_features=2, n_redundant=0, n_informative=2,\n random_state=1, n_clusters_per_class=1, weights=[0.95, 0.05], class_sep=1.5)\nrng = np.random.RandomState(2)\nX_imb += 2 * rng.uniform(size=X_imb.shape)\n\n# multiclass set\nX_multi, y_multi = make_classification(n_samples=10000, n_features=2, n_informative=2,\n n_redundant=0, n_repeated=0, n_classes=3,\n n_clusters_per_class=1, weights=[0.33, 0.33, 0.33],\n class_sep=0.8, random_state=7)\n# non-linear separable\nX_moon, y_moon = make_moons(n_samples=10000, noise=0.3, random_state=3)",
"_____no_output_____"
],
[
"plt.scatter(X_imb[:, 0], X_imb[:, 1], marker='o', c=y_imb,\n s=25, edgecolor='k')",
"_____no_output_____"
],
[
"plt.scatter(X_moon[:, 0], X_moon[:, 1], marker='o', c=y_moon,\n s=25, edgecolor='k')",
"_____no_output_____"
],
[
"plt.scatter(X_multi[:, 0], X_multi[:, 1], marker='o', c=y_multi,\n s=25, edgecolor='k')",
"_____no_output_____"
]
],
[
[
"# Data loader\n\nWe create a custom dataset class to iterate our data in the dataloader from Pytorch.\n\n`trainData(torch.FloatTensor(X_train), torch.FloatTensor(y_train))`\n\nThen we use `DataLoader` to allow auto batching. The function `loader_data()` gather all the pipeline to load tha data in a Pytorch tensor.",
"_____no_output_____"
]
],
[
[
"class trainData(Dataset): \n def __init__(self, X_data, y_data):\n self.X_data = X_data\n self.y_data = y_data \n def __getitem__(self, index):\n return self.X_data[index], self.y_data[index]\n def __len__ (self):\n return len(self.X_data)\n\n \nclass testData(Dataset):\n def __init__(self, X_data):\n self.X_data = X_data\n def __getitem__(self, index):\n return self.X_data[index]\n def __len__ (self):\n return len(self.X_data)\n\ndef loader_data(X, y, BATCH_SIZE=500): \n # create function that recive the X and y, batch and returns: train_loader and test_loader.\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33,\n random_state=69, stratify=y_imb)\n train_data = trainData(torch.FloatTensor(X_train), torch.FloatTensor(y_train))\n test_data = testData(torch.FloatTensor(X_test))\n train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)\n test_loader = DataLoader(dataset=test_data, batch_size=100)\n return train_loader, test_loader, y_test",
"_____no_output_____"
]
],
[
[
"# Pytorch Model\n\nTo build a model in Pytorch, you should define a `Class`. The class has two parts:\n\n1. `__init__` defines the different elements of calculation, like: hidden layers, activation functions, dropouts, etc.\n\n1. `foward` method where you define how the input going through each calculation element.\n\nYou will see that in the output layer for the binary classifiers there is not `sigmoid` function in the output layer, this is because in Pytorch it can be include in the loss function that will be defined later.",
"_____no_output_____"
]
],
[
[
"class LogisClassifier(nn.Module):\n def __init__(self, num_input=2):\n super(LogisClassifier, self).__init__()\n self.num_input = num_input\n # Number of input features\n self.layer_1 = nn.Linear(self.num_input, 1) \n def forward(self, inputs):\n x = self.layer_1(inputs) \n return x\n\nclass binaryClassification(nn.Module):\n def __init__(self, num_input=2):\n super(binaryClassification, self).__init__()\n self.num_input = num_input\n # Number of input features\n self.layer_1 = nn.Linear(self.num_input, 120) \n self.layer_2 = nn.Linear(120, 64)\n self.layer_out = nn.Linear(64, 1) \n \n self.relu = nn.ReLU()\n self.dropout = nn.Dropout(p=0.2)\n self.batchnorm1 = nn.BatchNorm1d(120)\n self.batchnorm2 = nn.BatchNorm1d(64)\n \n def forward(self, inputs):\n x = self.relu(self.layer_1(inputs))\n x = self.batchnorm1(x)\n x = self.relu(self.layer_2(x))\n x = self.batchnorm2(x)\n x = self.dropout(x)\n x = self.layer_out(x) \n return x\n\nclass Multiclass(nn.Module): \n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(2, 50)\n self.relu1 = nn.ReLU()\n self.dout = nn.Dropout(0.2)\n self.fc2 = nn.Linear(50, 100)\n self.prelu = nn.PReLU(1)\n self.out = nn.Linear(100, 1)\n self.out_act = nn.Softmax(dim=1)\n\n def forward(self, input_):\n a1 = self.fc1(input_)\n h1 = self.relu1(a1)\n dout = self.dout(h1)\n a2 = self.fc2(dout)\n h2 = self.prelu(a2)\n a3 = self.out(h2)\n y = self.out_act(a3)\n return y",
"_____no_output_____"
]
],
[
[
"# Training loop\n\nIn a neural network the process of learning is as follow: calculate the output, calculate the gradient, do the backward pass and update the weights.\n\nWithin the training loop, you should do this in each iteration.\n1. reset gradient to zero.\n1. perform backward step.\n1. update parameters.\n\nAlso before to measure accuracy and evaluate should be define in Pytorch operations.",
"_____no_output_____"
]
],
[
[
"def binary_acc(y_pred, y_test):\n y_pred_tag = torch.round(torch.sigmoid(y_pred))\n correct_results_sum = (y_pred_tag == y_test).sum().float()\n acc = correct_results_sum/y_test.shape[0]\n acc = torch.round(acc * 100)\n return acc\n\ndef eval_testdata(model, test_loader):\n y_pred_list = []\n model.eval()\n # this 'with' is to evaluate without a gradient step.\n with torch.no_grad():\n for X_batch in test_loader:\n X_batch = X_batch.to(device)\n y_test_pred = model(X_batch)\n y_test_pred = torch.sigmoid(y_test_pred)\n y_pred_tag = torch.round(y_test_pred)\n y_pred_list += y_pred_tag.cpu().numpy().squeeze().tolist()\n return y_pred_list\n\ndef train_model(model, criterion, optimizer, train_loader, EPOCHS, test_loader, y_test):\n model.train()\n for e in range(1, EPOCHS+1):\n epoch_loss = 0\n epoch_acc = 0\n for X_batch, y_batch in train_loader:\n X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n y_pred = model(X_batch)\n loss = criterion(y_pred, y_batch.unsqueeze(1))\n acc = binary_acc(y_pred, y_batch.unsqueeze(1))\n # Zero gradients, perform a backward pass, and update the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step() \n epoch_loss += loss.item()\n epoch_acc += acc.item()\n y_pred_test = eval_testdata(model, test_loader)\n eval_acc = round(accuracy_score(y_true=y_test, y_pred=y_pred_test), 2)\n eval_f1 = round(f1_score(y_true=y_test, y_pred=y_pred_test),2)\n print(f'Epoch {e+0:03}: | Loss: {epoch_loss/len(train_loader):.5f} | Acc: {epoch_acc/len(train_loader):.3f} | Acc_eval: {eval_acc} | f1_eval: {eval_f1}')",
"_____no_output_____"
]
],
[
[
"# Declare model and train\n\nWe have defined a training loop, but we need a loss function and an optimizer to perform gradient desent step.\n\nIn the first line the data are loaded, followed by the model declaration and send to the `GPU` device in this case.\n\n## First experiment: Logistic classifier.",
"_____no_output_____"
]
],
[
[
"train_loader, test_loader, y_test = loader_data(X_moon, y_moon, BATCH_SIZE=10)\nmodel = LogisClassifier()\nmodel.to(device)\n# define loss function\ncriterion = nn.BCEWithLogitsLoss()\nLEARNING_RATE = 0.001\n# define gradient decent optimizer\noptimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)\nprint(model)\n\n# now train(fit) the model\nEPOCHS = 100\ntrain_model(model, criterion, optimizer, train_loader, EPOCHS, test_loader, y_test)",
"LogisClassifier(\n (layer_1): Linear(in_features=2, out_features=1, bias=True)\n)\nEpoch 001: | Loss: 0.65585 | Acc: 53.806 | Acc_eval: 0.7 | f1_eval: 0.7\nEpoch 002: | Loss: 0.51862 | Acc: 73.881 | Acc_eval: 0.78 | f1_eval: 0.78\nEpoch 003: | Loss: 0.45027 | Acc: 78.746 | Acc_eval: 0.81 | f1_eval: 0.81\nEpoch 004: | Loss: 0.41158 | Acc: 80.761 | Acc_eval: 0.82 | f1_eval: 0.82\nEpoch 005: | Loss: 0.38718 | Acc: 82.030 | Acc_eval: 0.83 | f1_eval: 0.83\nEpoch 006: | Loss: 0.37097 | Acc: 82.985 | Acc_eval: 0.84 | f1_eval: 0.84\nEpoch 007: | Loss: 0.35982 | Acc: 83.582 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 008: | Loss: 0.35223 | Acc: 84.104 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 009: | Loss: 0.34708 | Acc: 84.433 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 010: | Loss: 0.34370 | Acc: 84.731 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 011: | Loss: 0.34149 | Acc: 84.806 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 012: | Loss: 0.34005 | Acc: 84.985 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 013: | Loss: 0.33915 | Acc: 85.224 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 014: | Loss: 0.33854 | Acc: 85.149 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 015: | Loss: 0.33819 | Acc: 85.134 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 016: | Loss: 0.33794 | Acc: 85.239 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 017: | Loss: 0.33776 | Acc: 85.313 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 018: | Loss: 0.33770 | Acc: 85.164 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 019: | Loss: 0.33761 | Acc: 85.164 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 020: | Loss: 0.33757 | Acc: 85.209 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 021: | Loss: 0.33754 | Acc: 85.164 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 022: | Loss: 0.33755 | Acc: 85.104 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 023: | Loss: 0.33750 | Acc: 85.194 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 024: | Loss: 0.33752 | Acc: 85.194 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 025: | Loss: 0.33751 | Acc: 85.194 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 026: | Loss: 0.33748 | Acc: 85.149 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 027: | Loss: 0.33748 | Acc: 85.119 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 028: | Loss: 0.33752 | Acc: 85.149 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 029: | Loss: 0.33749 | Acc: 85.090 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 030: | Loss: 0.33752 | Acc: 85.134 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 031: | Loss: 0.33749 | Acc: 85.164 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 032: | Loss: 0.33749 | Acc: 85.104 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 033: | Loss: 0.33748 | Acc: 85.119 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 034: | Loss: 0.33749 | Acc: 85.194 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 035: | Loss: 0.33749 | Acc: 85.194 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 036: | Loss: 0.33746 | Acc: 85.209 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 037: | Loss: 0.33750 | Acc: 85.179 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 038: | Loss: 0.33751 | Acc: 85.164 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 039: | Loss: 0.33745 | Acc: 85.209 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 040: | Loss: 0.33752 | Acc: 85.119 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 041: | Loss: 0.33747 | Acc: 85.149 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 042: | Loss: 0.33748 | Acc: 85.104 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 043: | Loss: 0.33750 | Acc: 85.119 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 044: | Loss: 0.33748 | Acc: 85.134 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 045: | Loss: 0.33749 | Acc: 85.164 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 046: | Loss: 0.33747 | Acc: 85.134 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 047: | Loss: 0.33750 | Acc: 85.164 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 048: | Loss: 0.33748 | Acc: 85.164 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 049: | Loss: 0.33749 | Acc: 85.224 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 050: | Loss: 0.33751 | Acc: 85.164 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 051: | Loss: 0.33747 | Acc: 85.164 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 052: | Loss: 0.33746 | Acc: 85.164 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 053: | Loss: 0.33746 | Acc: 85.179 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 054: | Loss: 0.33748 | Acc: 85.134 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 055: | Loss: 0.33750 | Acc: 85.119 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 056: | Loss: 0.33748 | Acc: 85.179 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 057: | Loss: 0.33747 | Acc: 85.149 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 058: | Loss: 0.33748 | Acc: 85.179 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 059: | Loss: 0.33749 | Acc: 85.090 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 060: | Loss: 0.33748 | Acc: 85.149 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 061: | Loss: 0.33750 | Acc: 85.179 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 062: | Loss: 0.33748 | Acc: 85.104 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 063: | Loss: 0.33748 | Acc: 85.164 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 064: | Loss: 0.33749 | Acc: 85.209 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 065: | Loss: 0.33749 | Acc: 85.119 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 066: | Loss: 0.33750 | Acc: 85.119 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 067: | Loss: 0.33748 | Acc: 85.119 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 068: | Loss: 0.33751 | Acc: 85.134 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 069: | Loss: 0.33751 | Acc: 85.149 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 070: | Loss: 0.33748 | Acc: 85.119 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 071: | Loss: 0.33749 | Acc: 85.149 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 072: | Loss: 0.33748 | Acc: 85.119 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 073: | Loss: 0.33748 | Acc: 85.164 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 074: | Loss: 0.33747 | Acc: 85.164 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 075: | Loss: 0.33749 | Acc: 85.119 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 076: | Loss: 0.33749 | Acc: 85.119 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 077: | Loss: 0.33748 | Acc: 85.104 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 078: | Loss: 0.33749 | Acc: 85.104 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 079: | Loss: 0.33749 | Acc: 85.194 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 080: | Loss: 0.33747 | Acc: 85.104 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 081: | Loss: 0.33745 | Acc: 85.179 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 082: | Loss: 0.33748 | Acc: 85.209 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 083: | Loss: 0.33749 | Acc: 85.149 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 084: | Loss: 0.33751 | Acc: 85.134 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 085: | Loss: 0.33746 | Acc: 85.194 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 086: | Loss: 0.33750 | Acc: 85.164 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 087: | Loss: 0.33747 | Acc: 85.090 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 088: | Loss: 0.33746 | Acc: 85.209 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 089: | Loss: 0.33748 | Acc: 85.164 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 090: | Loss: 0.33747 | Acc: 85.134 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 091: | Loss: 0.33749 | Acc: 85.164 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 092: | Loss: 0.33750 | Acc: 85.149 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 093: | Loss: 0.33752 | Acc: 85.060 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 094: | Loss: 0.33747 | Acc: 85.194 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 095: | Loss: 0.33746 | Acc: 85.149 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 096: | Loss: 0.33751 | Acc: 85.134 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 097: | Loss: 0.33749 | Acc: 85.134 | Acc_eval: 0.85 | f1_eval: 0.85\nEpoch 098: | Loss: 0.33748 | Acc: 85.030 | Acc_eval: 0.85 | f1_eval: 0.86\nEpoch 099: | Loss: 0.33747 | Acc: 85.119 | Acc_eval: 0.86 | f1_eval: 0.86\nEpoch 100: | Loss: 0.33750 | Acc: 85.134 | Acc_eval: 0.86 | f1_eval: 0.86\n"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
d04d12d127b658c5eb666feb255e0213084bc86e | 9,709 | ipynb | Jupyter Notebook | .ipynb_checkpoints/conclusions_groupby-checkpoint.ipynb | Siddharth1698/Data-Analyst-Nanodegree | ad09ee76788338e94b97c32a3e3beb9ec43d3c5f | [
"MIT"
] | 1 | 2021-08-03T20:57:56.000Z | 2021-08-03T20:57:56.000Z | .ipynb_checkpoints/conclusions_groupby-checkpoint.ipynb | Siddharth1698/Data-Analyst-Nanodegree | ad09ee76788338e94b97c32a3e3beb9ec43d3c5f | [
"MIT"
] | null | null | null | .ipynb_checkpoints/conclusions_groupby-checkpoint.ipynb | Siddharth1698/Data-Analyst-Nanodegree | ad09ee76788338e94b97c32a3e3beb9ec43d3c5f | [
"MIT"
] | null | null | null | 28.060694 | 100 | 0.376043 | [
[
[
"# Drawing Conclusions Using Groupby",
"_____no_output_____"
]
],
[
[
"# Load `winequality_edited.csv`\nimport pandas as pd\n\ndf = pd.read_csv('winequality_edited.csv')",
"_____no_output_____"
]
],
[
[
"### Is a certain type of wine associated with higher quality?",
"_____no_output_____"
]
],
[
[
"# Find the mean quality of each wine type (red and white) with groupby\ndf.groupby('color').mean().quality",
"_____no_output_____"
]
],
[
[
"### What level of acidity receives the highest average rating?",
"_____no_output_____"
]
],
[
[
"# View the min, 25%, 50%, 75%, max pH values with Pandas describe\ndf.describe().pH",
"_____no_output_____"
],
[
"# Bin edges that will be used to \"cut\" the data into groups\nbin_edges = [2.72, 3.11, 3.21, 3.32, 4.01] # Fill in this list with five values you just found",
"_____no_output_____"
],
[
"# Labels for the four acidity level groups\nbin_names = ['high', 'mod_high', 'medium', 'low'] # Name each acidity level category",
"_____no_output_____"
],
[
"# Creates acidity_levels column\ndf['acidity_levels'] = pd.cut(df['pH'], bin_edges, labels=bin_names)\n\n# Checks for successful creation of this column\ndf.head()",
"_____no_output_____"
],
[
"# Find the mean quality of each acidity level with groupby\ndf.groupby('acidity_levels').mean().quality",
"_____no_output_____"
],
[
"# Save changes for the next section\ndf.to_csv('winequality_edited.csv', index=False)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d04d1b5bc0e669ebd24705dae25ccfece20b9aa1 | 2,072 | ipynb | Jupyter Notebook | Challenges/LongestSubstringChallenge.ipynb | CVanchieri/LambdaSchool-DS-Challenges | e023bae45bd63979c8fb8b630858d61df122a9de | [
"MIT"
] | 1 | 2020-02-04T04:16:16.000Z | 2020-02-04T04:16:16.000Z | Challenges/LongestSubstringChallenge.ipynb | CVanchieri/LambdaSchool-DS-Challenges | e023bae45bd63979c8fb8b630858d61df122a9de | [
"MIT"
] | null | null | null | Challenges/LongestSubstringChallenge.ipynb | CVanchieri/LambdaSchool-DS-Challenges | e023bae45bd63979c8fb8b630858d61df122a9de | [
"MIT"
] | null | null | null | 2,072 | 2,072 | 0.689672 | [
[
[
"Write a function that takes in a string and returns its longest substring without duplicate characters. Assume that there will only be one longest substring without duplication.\n\nFor example, longest_substring(\"zaabcde\") == 'abcde'",
"_____no_output_____"
]
],
[
[
"# longest substring function.\ndef longest_substring(s):\n lastSeen = {} # dictionary of where the last of a character is.\n longest = [0, 1] # index of beginning and end of longest substring.\n startIdx = 0 # index of beginning of current substring\n for i, char in enumerate(s):\n if char in lastSeen:\n # start over if character is in current substring.\n startIdx = max(startIdx, lastSeen[char] + 1)\n if longest[1] - longest[0] < i + 1 - startIdx:\n # if current substring is longer than longest.\n longest = [startIdx, i + 1]\n lastSeen[char] = i # update dictionary.\n \n return s[longest[0] : longest[1]]\n",
"_____no_output_____"
],
[
"# driver program.\n\ns = \"zaabcde\"\n# results for longest substring.\nprint(longest_substring(s))",
"abcde\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
]
] |
d04d1ea95774f042d0cda8fe7ae301a10bd39817 | 41,636 | ipynb | Jupyter Notebook | DataScienceExam/Exam.ipynb | SvetozarMateev/Data-Science | fe662dc8ad7040e60a0811567e0cddc8de606c01 | [
"MIT"
] | null | null | null | DataScienceExam/Exam.ipynb | SvetozarMateev/Data-Science | fe662dc8ad7040e60a0811567e0cddc8de606c01 | [
"MIT"
] | null | null | null | DataScienceExam/Exam.ipynb | SvetozarMateev/Data-Science | fe662dc8ad7040e60a0811567e0cddc8de606c01 | [
"MIT"
] | null | null | null | 42.70359 | 1,504 | 0.626165 | [
[
[
"import pandas as pd\nimport scipy.stats as st\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport operator",
"_____no_output_____"
]
],
[
[
"# Crimes\n### Svetozar Mateev",
"_____no_output_____"
],
[
"## Putting Crime in the US in Context\n",
"_____no_output_____"
],
[
"First I am going to calculate the total crimes by dividing the population by 100 000 and then multiplying it by the crimes percapita.Then I am going to remove the NaN values.",
"_____no_output_____"
]
],
[
[
"crime_reports=pd.read_csv(\"report.csv\")\ncrime_reports=crime_reports.dropna()\ncrime_reports=crime_reports.reset_index()\ncrime_reports[\"total_crimes\"]=(crime_reports.population/100000*crime_reports.crimes_percapita)\n#crime_reports[[\"population\",'crimes_percapita','total_crimes']]\n",
"_____no_output_____"
]
],
[
[
"•\tHave a look at the “months_reported” column values. What do they mean? What percent of the rows have less than 12 months? How significant is that?",
"_____no_output_____"
]
],
[
[
"crime_reports[\"months_reported\"].unique()\nless_than_twelve=crime_reports[crime_reports.months_reported<12]\nprint(str(len(less_than_twelve)/len(crime_reports.months_reported)*100)+'%')",
"_____no_output_____"
]
],
[
[
"The months reported column indicates how much months from the year have been reported and only 1.9% of the rows have less than 12 months reported per year whichn on a 5% level isn't significant.",
"_____no_output_____"
],
[
"•\tOverall crime popularity: Create a bar chart of crime frequencies (total, not per capita). Display the type of crime and total occurrences (sum over all years). Sort largest to smallest. Are there any patterns? Which crime is most common?",
"_____no_output_____"
]
],
[
[
"homicides_total_sum=crime_reports.homicides.sum()\nrapes_total_sum=crime_reports.rapes.sum()\nassaults_total_sum=crime_reports.assaults.sum()\nrobberies_total_sum=crime_reports.robberies.sum()\ntotal_crimes_total_sum= crime_reports.total_crimes.sum()\nhomicides_frequency=homicides_total_sum/total_crimes_total_sum\nrapes_frequency=rapes_total_sum/total_crimes_total_sum\nassaults_frequency=assaults_total_sum/total_crimes_total_sum\nrobberies_frequency=robberies_total_sum/total_crimes_total_sum\n\nplt.bar(height=[assaults_frequency,robberies_frequency,rapes_frequency,homicides_frequency],left=[1,2,3,4], align = \"center\",width=0.2)\nplt.xticks([1,2,3,4,],['Assaults','Robberies','Rapes','Homicides'])\nplt.ylabel(\"Frequency of a crime\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"The most frequent crimes are the assaults and i can see from the diagram that crimes which are less serious are committed more often.",
"_____no_output_____"
],
[
"•\tCrime popularity by year: Break down the analysis of the previous graph by year. What is the most common crime (total, not per capita) for each year? What is the least common one?",
"_____no_output_____"
]
],
[
[
"homicides_sum=0\nrapes_sum=0\nassaults_sum=0\nrobberies_sum=0\nfor year in crime_reports.report_year.unique():\n year_df=crime_reports[crime_reports.report_year==year]\n homicides_sum_year=year_df.homicides.sum()\n rapes_sum_year=year_df.rapes.sum()\n assaults_sum_year=year_df.assaults.sum()\n robberies_sum_year=year_df.robberies.sum()\n if(homicides_sum_year>rapes_sum_year and homicides_sum_year>assaults_sum_year and homicides_sum_year>robberies_sum_year):\n homiciedes_sum+=1\n print(str(year)+' '+\"homicides\")\n elif(homicides_sum_year<rapes_sum_year and rapes_sum_year>assaults_sum_year and rapes_sum_year>robberies_sum_year):\n rapes_sum+=1\n print(str(year)+' '+\"rapes\")\n\n elif(homicides_sum_year<assaults_sum_year and rapes_sum_year<assaults_sum_year and assaults_sum_year>robberies_sum_year):\n assaults_sum+=1\n print(str(year)+' '+\"assaults\")\n\n elif(homicides_sum_year<robberies_sum_year and rapes_sum_year<robberies_sum_year and assaults_sum_year<robberies_sum_year):\n robberies_sum+=1\n print(str(year)+' '+\"robberies\")\n\n\nplt.bar(height=[assaults_sum,robberies_sum,homicides_sum,rapes_sum],left=[1,2,3,4],align='center')#most common one through the years\nplt.xticks([1,2,3,4,],['Assaults','Robberies','Homicides','Rapes'])\nplt.ylabel(\"Times a crime was most often for a year\")\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"I can see from the bar chart that assault were the most popular crime for a year almost thirty time and that the homicides and rapes were never the most popular crime for a year.",
"_____no_output_____"
],
[
"•\tCrime evolution (e. g. crime rates as a function of time): How do crime rates per capita evolve over the years? Create a plot (or a series) of plots displaying how each rate evolves. Create another plot of all crimes (total, not per capita) over the years.",
"_____no_output_____"
]
],
[
[
"rapes_per_capita=[]\nhomicides_per_capita=[]\nassaults_per_capita=[]\nrobberies_per_capita=[]\nfor year in crime_reports.report_year.unique():\n year_df=crime_reports[crime_reports.report_year==year]\n homicides_mean_year=year_df.homicides_percapita.mean()\n rapes_mean_year=year_df.rapes_percapita.mean()\n assaults_mean_year=year_df.assaults_percapita.mean()\n robberies_mean_year=year_df.robberies_percapita.mean()\n \n homicides_per_capita.append(homicides_mean_year)\n \n rapes_per_capita.append(rapes_mean_year)\n \n assaults_per_capita.append(assaults_mean_year)\n\n robberies_per_capita.append(robberies_mean_year)\nplt.plot(crime_reports.report_year.unique(),rapes_per_capita)\nplt.suptitle(\"Rapes\")\nplt.xlabel(\"Years\")\nplt.ylabel('Crimes per capira')\nplt.show()\nplt.plot(crime_reports.report_year.unique(),homicides_per_capita)\nplt.suptitle(\"Homicides\")\nplt.xlabel(\"Years\")\nplt.ylabel('Crimes per capira')\nplt.show()\nplt.plot(crime_reports.report_year.unique(),assaults_per_capita)\nplt.suptitle(\"Assaults\")\nplt.xlabel(\"Years\")\nplt.ylabel('Crimes per capira')\nplt.show()\nplt.plot(crime_reports.report_year.unique(),robberies_per_capita)\nplt.suptitle(\"Robberies\")\nplt.xlabel(\"Years\")\nplt.ylabel('Crimes per capira')\nplt.show()",
"_____no_output_____"
]
],
[
[
"From the plots we can see that each crime has significanttly lower rate per capita and that for all of them the peak was between 1990 and 1995.",
"_____no_output_____"
]
],
[
[
"rapes_per_year=[]\nhomicides_per_year=[]\nassaults_per_year=[]\nrobberies_per_year=[]\nfor year in crime_reports.report_year.unique():\n year_df=crime_reports[crime_reports.report_year==year]\n homicides_mean_year=year_df.homicides.sum()\n rapes_mean_year=year_df.rapes.sum()\n assaults_mean_year=year_df.assaults.sum()\n robberies_mean_year=year_df.robberies.sum()\n \n homicides_per_year.append(homicides_mean_year)\n \n rapes_per_year.append(rapes_mean_year)\n \n assaults_per_year.append(assaults_mean_year)\n\n robberies_per_year.append(robberies_mean_year)\nplt.plot(crime_reports.report_year.unique(),rapes_per_year,label=\"Rapes\")\nplt.plot(crime_reports.report_year.unique(),assaults_per_year,label=\"Assaults\")\nplt.plot(crime_reports.report_year.unique(),homicides_per_year,label=\"Homicides\")\nplt.plot(crime_reports.report_year.unique(),robberies_per_year,label=\"Robberies\")\nplt.legend()\nplt.ylabel(\"Number of crimes\")\nplt.xlabel(\"Years\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"Again our observations are confirmed that the peak of the crimes is around 1990 and that in present there are a lot less crimes except the rapes which between 2010 and 2015 have begun raise slightly.",
"_____no_output_____"
],
[
"## Crimes by States",
"_____no_output_____"
],
[
"•\t“Criminal” jurisdictions: Plot the sum of all crimes (total, not per capita) for each jurisdiction. Sort largest to smallest. Are any jurisdictions more prone to crime?",
"_____no_output_____"
]
],
[
[
"#agency_jurisdiction\njurisdicitons=[]\ncounter=0\ncrimes_per_jurisdiction=[]\nagencies_df=crime_reports.sort_values('violent_crimes',ascending=False)\n\nfor jurisdiciton in agencies_df.agency_jurisdiction.unique():\n jurisdicition_df=agencies_df[agencies_df.agency_jurisdiction==jurisdiciton]\n all_crimes=jurisdicition_df.violent_crimes.sum()\n crimes_per_jurisdiction.append(all_crimes)\n counter+=1\n jurisdicitons.append(jurisdiciton)\n if counter==10:\n break\ndf_plottt=pd.DataFrame({'area':jurisdicitons,'num':crimes_per_jurisdiction})\ndf_plottt=df_plottt.sort_values('num',ascending=False)\nplt.bar(height=df_plottt.num,left=[1,2,3,4,5,6,7,8,9,10],align='center')\nplt.xticks([1,2,3,4,5,6,7,8,9,10],df_plottt.area,rotation='vertical')\nplt.ylabel(\"Number of Crimes\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"From the bar chart we can see that the New York City,Ny jurisdiction has the most crimes.",
"_____no_output_____"
],
[
"•\t“Criminal” jurisdictions, part 2: Create the same type of chart as above, but use the crime rates per capita this time. Are you getting the same distribution? Why? You may need data from the “population” column to answer this. Don’t perform significance tests, just inspect the plots.",
"_____no_output_____"
]
],
[
[
"jurisdicitons=[]\ncounter=0\ncrimes_per_jurisdiction=[]\npopulation=[]\nagencies_df=crime_reports\nagencies_df=crime_reports.sort_values('crimes_percapita',ascending=False)\n\nfor a in agencies_df.agency_jurisdiction.unique():\n agencies_df[\"crimes_percapita_per_agency\"]=agencies_df[agencies_df.agency_jurisdiction==jurisdiciton].crimes_percapita.sum()\nagencies_df=agencies_df.sort_values('crimes_percapita_per_agency',ascending=True)\n\nfor jurisdiciton in agencies_df.agency_jurisdiction.unique():\n jurisdicition_df=agencies_df[agencies_df.agency_jurisdiction==jurisdiciton]\n all_crimes=jurisdicition_df.crimes_percapita.sum()\n crimes_per_jurisdiction.append(all_crimes)\n counter+=1\n jurisdicitons.append(jurisdiciton)\n population.append(jurisdicition_df.population.mean())\n if counter==10:\n break\n\ndf_plot=pd.DataFrame({'jurisdicitons':jurisdicitons,'num':crimes_per_jurisdiction})\ndf_plot=df_plot.sort_values('num',ascending=False,axis=0)\nplt.bar(height=df_plot.num,left=[1,2,3,4,5,6,7,8,9,10],align='center')\nplt.xticks([1,2,3,4,5,6,7,8,9,10],df_plot.jurisdicitons,rotation='vertical')\nplt.ylabel(\"Number of Crimes\")\nplt.show()\n\ndf_pop_plot=pd.DataFrame({'area':jurisdicitons,'num':population})\ndf_pop_plot=df_pop_plot.sort_values('num',ascending=False,axis=0)\nplt.bar(height=df_pop_plot.num,left=[1,2,3,4,5,6,7,8,9,10],align='center')\nplt.xticks([1,2,3,4,5,6,7,8,9,10],df_pop_plot.area,rotation='vertical')\nplt.ylabel(\"Population\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"We can see that the crime per capita in Miami is the biggest contary to the previous plot. However it appears to have little correlation between the number of crimes per capita and the population.",
"_____no_output_____"
],
[
"•\t“Criminal states”: Create the same type of chart as in the first subproblem, but use the states instead. You can get the state name in two ways: either the first two letters of the agency_code column or the symbols after the comma in the agency_jurisdiction column.",
"_____no_output_____"
]
],
[
[
"parts=crime_reports['agency_jurisdiction'].str.extract(\"(\\w+), (\\w+)\", expand = True)\nparts.columns=['something_else','state']\nparts['state']\ncrime_reports['state']=parts['state']\n\ncrime_states=[]\ntotal_crimes=[]\ncounter=0\ngencies_df=crime_reports.sort_values('violent_crimes',ascending=False)\nfor state in crime_reports.state.unique():\n jurisdicition_df=crime_reports[crime_reports.state==state]\n all_crimes=jurisdicition_df.violent_crimes.sum()\n total_crimes.append(all_crimes)\n crime_states.append(state)\n counter+=1\n jurisdicitons.append(jurisdiciton)\n if counter==10:\n break\n \nplot_df=pd.DataFrame({'states':crime_states,'num':total_crimes})\nplot_df=plot_df.sort_values('num',ascending=False)\nplt.bar(height=plot_df.num,left=[1,2,3,4,5,6,7,8,9,10],align='center')\nplt.xticks([1,2,3,4,5,6,7,8,9,10],plot_df.states)\nplt.ylabel(\"Number Of Crimes\")\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"From the chart we can see that New York has the biggest number of crimes.",
"_____no_output_____"
],
[
"•\tHypothesis testing: Are crime rates per capita related to population, e. g. does a more densely populated community produce more crime (because there are more people), or less crime (because there is a better police force)? Plot the total number of crimes vs. population to find out. Is there any correlation? If so, what is it? Is the correlation significant?",
"_____no_output_____"
]
],
[
[
"total_crimes=[]\nagency_jurisdiction=[]\npopulation=[]\ncounter=0\nfor jurisdiction in crime_reports.agency_jurisdiction.unique():\n jurisdicition_df=crime_reports[crime_reports.agency_jurisdiction==jurisdiction]\n all_crimes=jurisdicition_df.violent_crimes.sum()\n total_crimes.append(all_crimes) \n counter+=1\n agency_jurisdiction.append(jurisdiction)\n population.append(jurisdicition_df.population.mean())\n if counter==10:\n break\nprint(len(total_crimes),len(agency_jurisdiction)) \n\n\nplot_df=pd.DataFrame({'states':agency_jurisdiction,'num':total_crimes,'popu':population})\nplot_df=plot_df.sort_values('num',ascending=False)\nplt.bar(height=plot_df.popu,left=[1,2,3,4,5,6,7,8,9,10],align='center',color='r',label=\"Population\")\n\nplt.bar(height=plot_df.num,left=[1,2,3,4,5,6,7,8,9,10],align='center',color='b',label=\"Crimes\")\n\nplt.xticks([1,2,3,4,5,6,7,8,9,10],plot_df.states,rotation='vertical')\nplt.ylabel(\"Number\")\nplt.legend()\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"We can see that there isn't a corelation between the population and the crimes because some places like Atlanta,GA shows that there might be but others like Baltimore Country,MD show us totaly different results",
"_____no_output_____"
],
[
"## Additional data",
"_____no_output_____"
],
[
"First I am droping some of the unnecessary columns and then I am tranforming the dates to datetime objects.",
"_____no_output_____"
]
],
[
[
"crimes=pd.read_csv(\"crimes.csv\")\ncrimes=crimes.drop(['x','y','OBJECTID','ESRI_OID','Time'],axis=1)\ncrimes.columns=['publicaddress', 'controlnbr', 'CCN', 'precinct', 'reported_date',\n 'begin_date', 'offense', 'description', 'UCRCode', 'entered_date',\n 'long', 'lat', 'neighborhood', 'lastchanged', 'last_update_date']\ncrimes.dtypes\n#2015-09-21T14:16:59.000Z\ncrimes['reported_date']=pd.to_datetime(crimes['reported_date'],format='%Y-%m-%d',errors='ignore')\ncrimes['entered_date']=pd.to_datetime(crimes['entered_date'],format='%Y-%m-%d',errors='ignore')\ncrimes['lastchanged']=pd.to_datetime(crimes['lastchanged'],format='%Y-%m-%d',errors='ignore')\ncrimes['last_update_date']=pd.to_datetime(crimes['last_update_date'],format='%Y-%m-%d',errors='ignore')\ncrimes['begin_date']=pd.to_datetime(crimes['begin_date'],format='%Y-%m-%d',errors='ignore')\ncrimes=crimes.dropna()",
"_____no_output_____"
]
],
[
[
"•\tTotal number of crimes per year: Count all crimes for years in the dataset (2010-2016). Print the total number.",
"_____no_output_____"
]
],
[
[
"print(str(len(crimes))+\" \"+\"crimes between 2010 and 2016\")",
"_____no_output_____"
]
],
[
[
"•\tPlot how crimes evolve each year",
"_____no_output_____"
]
],
[
[
"year_10=0\nyear_11=0\nyear_12=0\nyear_13=0\nyear_14=0\nyear_15=0\nyear_16=0\n\nfor date in crimes.begin_date:\n if date.year==2010:\n year_10+=1\n elif date.year==2011:\n year_11+=1\n elif date.year==2012:\n year_12+=1\n elif date.year==2013:\n year_13+=1\n elif date.year==2014:\n year_14+=1 \n elif date.year==2015:\n year_15+=1\n elif date.year==2016:\n year_16+=1\nplt.bar(height=[year_10,year_11,year_12,year_13,year_14,year_15,year_16],left=[1, 2, 3, 4 ,5 ,6 ,7],align='center')\nplt.ylabel(\"Number of Crimes\")\nplt.xticks([1, 2, 3, 4 ,5 ,6 ,7],['2010','2011','2012','2013','2014','2015','2016',])\nplt.show()",
"_____no_output_____"
]
],
[
[
"From 2010 to 2012 ther is a sligth raise in the number of crimes.However from 2012 to 2016 there is a drop in the number of crimes committed.",
"_____no_output_____"
],
[
"•\tCompare the previous plot to the plots in the previous exercise.\nNote: In order to make comparison better, plot the data for all states again, but this time filter only years 2010-2016. Does the crime rate in MN have any connection to the total crime rate? What percentage of the total crime rate (in all given states) is given by MN?\n",
"_____no_output_____"
]
],
[
[
"crime_states=[]\ntotal_crimes=[]\ncounter=0\ngencies_df=crime_reports.sort_values('violent_crimes',ascending=False)\nfor state in crime_reports.state.unique():\n jurisdicition_df=crime_reports[crime_reports.state==state]\n right_year=jurisdicition_df[jurisdicition_df.report_year>2009]\n \n all_crimes=right_year.violent_crimes.sum()\n total_crimes.append(all_crimes)\n crime_states.append(state)\n counter+=1\n jurisdicitons.append(jurisdiciton)\n if counter==10:\n break\n \nplot_df=pd.DataFrame({'states':crime_states,'num':total_crimes})\nplot_df=plot_df.sort_values('num',ascending=False)\nplt.bar(height=plot_df.num,left=[1,2,3,4,5,6,7,8,9,10],align='center')\nplt.xticks([1,2,3,4,5,6,7,8,9,10],plot_df.states)\nplt.ylabel(\"Number Of Crimes\")\nplt.show()\n\nyear_10=0\nyear_11=0\nyear_12=0\nyear_13=0\nyear_14=0\nyear_15=0\nyear_16=0\n\nfor date in crimes.begin_date:\n if date.year==2010:\n year_10+=1\n elif date.year==2011:\n year_11+=1\n elif date.year==2012:\n year_12+=1\n elif date.year==2013:\n year_13+=1\n elif date.year==2014:\n year_14+=1 \n elif date.year==2015:\n year_15+=1\n elif date.year==2016:\n year_16+=1\nplt.bar(height=[year_10,year_11,year_12,year_13,year_14,year_15,year_16],left=[1, 2, 3, 4 ,5 ,6 ,7],align='center')\nplt.ylabel(\"Number of Crimes\")\nplt.xticks([1, 2, 3, 4 ,5 ,6 ,7],['2010','2011','2012','2013','2014','2015','2016',])\nplt.show()\nwhole_number = sum(i for i in total_crimes)\n\nprint(str(len(crimes)/whole_number)+' '+'% from the total number of crimes committed between 2010 and 2016')",
"_____no_output_____"
]
],
[
[
"•\tCross-dataset matching: Get data from the previous dataset (crime rates in the US) again. This time, search only for MN and only for years 2010-2016. Do you have any results? If so, the results for total crime in MN should match in both datasets. Do they match?",
"_____no_output_____"
]
],
[
[
"year_10n=4064.0\nyear_11n=3722.0\nyear_12n=3872.0\nyear_13n=4038.0\nyear_14n=4093.0\nyear_15n=0\nyear_16n=0\nMN=crime_reports[crime_reports.state==\"MN\"]\nMN=MN[MN.report_year>2009]\nnumber_crimes=sum(MN.violent_crimes)\nprint(str(int(number_crimes))+\" from the first data set\")\nprint(str(len(crimes))+\" \"+\"from the second data set\")\n\n\n\nyear_10=0\nyear_11=0\nyear_12=0\nyear_13=0\nyear_14=0\nyear_15=0\nyear_16=0\n\nfor date in crimes.begin_date:\n if date.year==2010:\n year_10+=1\n elif date.year==2011:\n year_11+=1\n elif date.year==2012:\n year_12+=1\n elif date.year==2013:\n year_13+=1\n elif date.year==2014:\n year_14+=1 \n elif date.year==2015:\n year_15+=1\n elif date.year==2016:\n year_16+=1\nplt.bar(height=[year_10,year_11,year_12,year_13,year_14,year_15,year_16],left=[1, 2, 3, 4 ,5 ,6 ,7],align='center',color='r',label=\"Second DataSet values\")\n\nplt.bar(height=[year_10n,year_11n,year_12n,year_13n,year_14n,year_15n,year_16n],left=[1,2,3,4,5,6,7],align='center',color='b',label=\"First DataSet values\")\nplt.legend()\nplt.xticks([1,2,3,4,5,6,7],['2010','2011','2012','2013','2014','2015','2016',])\nplt.ylabel(\"Crimes\")\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"The values in the first data set are until 2014 and they are much smaller than those in the second.There is a big difference between the two.",
"_____no_output_____"
],
[
"## Temporal Analysis",
"_____no_output_____"
],
[
"•\tLook at the crime categories. Which is the most popular crime category in MN overall?",
"_____no_output_____"
]
],
[
[
"crimes.description.unique()\nd={'Shoplifting':1, 'Theft From Motr Vehc':1, 'Other Theft':1,\n 'Theft From Building':1, 'Crim Sex Cond-rape':1, 'Burglary Of Dwelling':1,\n 'Theft From Person':1, 'Motor Vehicle Theft':1, 'Robbery Of Business':1,\n 'Aslt-police/emerg P':1, 'Domestic Assault/Strangulation':1,\n 'Theft-motr Veh Parts':1, 'Robbery Of Person':1, 'Asslt W/dngrs Weapon':1,\n 'Robbery Per Agg':1, 'Burglary Of Business':1, 'Arson':1,\n 'Theft By Swindle':1, 'Aslt-great Bodily Hm':1, 'Aslt-sgnfcnt Bdly Hm':1,\n 'On-line Theft':1, '2nd Deg Domes Aslt':1, 'Murder (general)':1,\n 'Adulteration/poison':1, 'Gas Station Driv-off':1,\n 'Other Vehicle Theft':1, '3rd Deg Domes Aslt':1, 'Pocket-picking':1,\n 'Theft/coinop Device':1, 'Disarm a Police Officer':1,\n 'Theft By Computer':1, '1st Deg Domes Asslt':1, 'Bike Theft':1,\n 'Scrapping-Recycling Theft':1, 'Justifiable Homicide':0, 'Looting':1}\nfor desc in crimes.description:\n d[desc]+=1\nsorted_d = sorted(d.items(), key=operator.itemgetter(1))\nprint(sorted_d)",
"_____no_output_____"
]
],
[
[
"The most common type is Other theft but since it si do unclear we can say that Burglary of Dwelling is the most commnon type of theft.",
"_____no_output_____"
],
[
"•\tBreak down the data by months. Plot the total number of crimes for each month, summed over the years. Is there a seasonal component? Which month has the highest crime rate? Which has the smallest? Are the differences significant?\n",
"_____no_output_____"
]
],
[
[
"january=0\nfebruary=0\nmarch=0\napril=0\nmay=0\njune=0\njuly=0\naugust=0\nseptember=0\noctober=0\nnovember=0\ndecember=0\nfor date in crimes.begin_date:\n if(date.month==1):\n january+=1\n elif(date.month==2):\n february+=1\n elif(date.month==3):\n march+=1\n elif(date.month==4):\n april+=1\n elif(date.month==5):\n may+=1\n elif(date.month==6):\n june+=1\n elif(date.month==7):\n july+=1\n elif(date.month==8):\n august+=1\n elif(date.month==9):\n september+=1\n elif(date.month==10):\n october+=1\n elif(date.month==11):\n november+=1\n elif(date.month==12):\n december+=1\nplt.bar(height=[january,february,march,april,may,june,july,august,september,october,november,december]\n ,left=[1,2,3,4,5,6,7,8,9,10,11,12],align='center')\nplt.xticks([1,2,3,4,5,6,7,8,9,10,11,12],\n ['january','february','march','april','may','june','july','august','september','october','november','december']\n ,rotation='vertical')\nplt.ylabel(\"Number Of Crimes\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"We can see that most of the crimes are in june and that there is seasonal tendency that most of the crimes are committer in the summer.",
"_____no_output_____"
],
[
"•\tBreak the results by weekday. You can get the weekday from the date (there are functions for this). Do more crimes happen on \nthe weekends?",
"_____no_output_____"
]
],
[
[
"Monday=0\nTuesday=0\nWednesday=0\nThursday=0\nFriday=0\nSaturday=0\nSunday=0\nfor date in crimes.begin_date:\n if(date.weekday()==0):\n Monday+=1\n elif(date.weekday()==1):\n Tuesday+=1\n elif(date.weekday()==2):\n Wednesday+=1\n elif(date.weekday()==3):\n Thursday+=1\n elif(date.weekday()==4):\n Friday+=1\n elif(date.weekday()==5):\n Saturday+=1\n elif(date.weekday()==6):\n Sunday+=1\n \nplt.bar(height=[Monday,Tuesday,Wednesday,Thursday,Friday,Saturday,Sunday]\n ,left=[1,2,3,4,5,6,7],align='center')\nplt.xticks([1,2,3,4,5,6,7],['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday'],rotation='vertical')\nplt.ylabel(\"Number Of Crimes\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"Most crimes are committed on Fridays.On the second place are Thursdays.",
"_____no_output_____"
],
[
"•\tBreak the weekday data by crime type. Are certain types of crime more likely to happen on a given day? Comment your findings.\nI have no time to complete this because I have a Programming Fundamentals Exam to take but I would make 7 plots one for each day of the week with the top 10 types of crimes.",
"_____no_output_____"
],
[
"## 5.\tSignificant Factors in Crime",
"_____no_output_____"
]
],
[
[
"communities= pd.read_table(\"communities.data\",sep=',',header=None)\ncommunities.columns\ncommunities_names= pd.read_table('communities.names',header=None)\ncommunities_names",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
d04d2e3e112f419f597cee31ef026d746bab01bc | 87,251 | ipynb | Jupyter Notebook | 00_core.ipynb | noklam/fastdot | da90709eeae35154638d041dbd3b5237f22f6b52 | [
"Apache-2.0"
] | null | null | null | 00_core.ipynb | noklam/fastdot | da90709eeae35154638d041dbd3b5237f22f6b52 | [
"Apache-2.0"
] | null | null | null | 00_core.ipynb | noklam/fastdot | da90709eeae35154638d041dbd3b5237f22f6b52 | [
"Apache-2.0"
] | null | null | null | 46.115751 | 413 | 0.538997 | [
[
[
"#default_exp core",
"_____no_output_____"
]
],
[
[
"# fastdot.core\n\n> Drawing graphs with graphviz.",
"_____no_output_____"
]
],
[
[
"#export\nfrom fastcore.all import *\nimport pydot\nfrom matplotlib.colors import rgb2hex, hex2color",
"_____no_output_____"
],
[
"#export\n_all_ = ['pydot']",
"_____no_output_____"
],
[
"#hide\nfrom nbdev.showdoc import *",
"_____no_output_____"
]
],
[
[
"## Nodes",
"_____no_output_____"
]
],
[
[
"#export\ndef Dot(defaults=None, rankdir='LR', directed=True, compound=True, **kwargs):\n \"Create a `pydot.Dot` graph with fastai/fastdot style defaults\"\n return pydot.Dot(rankdir=rankdir, directed=directed, compound=compound, **kwargs)",
"_____no_output_____"
],
[
"#export\ndef uniq_name(o): return 'n'+(uuid4().hex)\n\ndef quote(x, q='\"'):\n 'Surround `x` with `\"`'\n return f'\"{x}\"'\n\n@patch\ndef _repr_svg_(self:pydot.Dot):\n return self.create_svg().decode('utf-8')",
"_____no_output_____"
],
[
"#export\ngraph_objects = {}\nobject_names = {}",
"_____no_output_____"
],
[
"#export\ndef add_mapping(graph_item, obj):\n graph_objects[graph_item.get_name()] = graph_item\n object_names[id(obj)] = graph_item.get_name()\n return graph_item",
"_____no_output_____"
],
[
"#export\ndef _pydot_create(f, obj, **kwargs):\n for k,v in kwargs.items():\n if callable(v): v = kwargs[k] = v(obj)\n if k not in ('name','graph_name'): kwargs[k] = quote(v)\n return add_mapping(f(**kwargs), obj)",
"_____no_output_____"
],
[
"#export\nnode_defaults = dict(label=str, tooltip=str, name=uniq_name, shape='box', style='rounded, filled', fillcolor='white')",
"_____no_output_____"
],
[
"#export\ndef Node(obj, **kwargs):\n \"Create a `pydot.Node` with a unique name\"\n if not isinstance(obj,str) and isinstance(obj, Collection) and len(obj)==2:\n obj,kwargs['tooltip'] = obj\n kwargs = merge(node_defaults, kwargs)\n return _pydot_create(pydot.Node, obj, **kwargs)",
"_____no_output_____"
]
],
[
[
"`pydot` uses the same name-based approach to identifying graph items as `graphviz`. However we would rather use python objects. Therefore, we patch `pydot` to use unique names.",
"_____no_output_____"
]
],
[
[
"g = Dot()\na = Node('a')\ng.add_node(a)\ng",
"_____no_output_____"
]
],
[
[
"If a 2-tuple is passed to `add_node`, then the 2nd element becomes the tooltip. You can also pass any `kwargs` that are accepted by `graphviz`.",
"_____no_output_____"
]
],
[
[
"g = Dot()\ng.add_node(Node(['a', \"My tooltip\"], fillcolor='pink'))\ng",
"_____no_output_____"
]
],
[
[
"Keyword args can also be arbitrary functions, which will called with the node's label.",
"_____no_output_____"
]
],
[
[
"g = Dot()\no = 'a'\ng.add_node(Node(o, fillcolor=lambda o:'pink'))\ng",
"_____no_output_____"
],
[
"#export\ndef object2graph(o):\n \"Get graph item representing `o`\"\n return graph_objects[object_names[id(o)]]",
"_____no_output_____"
],
[
"object2graph(o).get_fillcolor()",
"_____no_output_____"
]
],
[
[
"## Colors",
"_____no_output_____"
],
[
"The callable kwargs functionality can be used to map labels to colors in a consistent way..",
"_____no_output_____"
]
],
[
[
"#export\ndef obj2node_color(cm, minalpha, rangealpha, o):\n \"Create a consistent mapping from objects to colors, using colormap `cm`\"\n h = hash(o)\n i = float(h % 256) / 256\n alpha = (h^hash('something')) % rangealpha + minalpha\n return rgb2hex(cm(i)) + f'{alpha:02X}'",
"_____no_output_____"
],
[
"#exports\ngraph_colors1 = partial(obj2node_color, plt.get_cmap('rainbow'), 30, 160)\ngraph_colors2 = partial(obj2node_color, plt.get_cmap('tab20'), 30, 160)",
"_____no_output_____"
]
],
[
[
"These predefined color mapping functions provide a good range of colors and readable text.",
"_____no_output_____"
]
],
[
[
"g = Dot()\ng.add_node(Node('a', fillcolor=graph_colors1))\ng.add_node(Node('b', fillcolor=graph_colors1))\ng",
"_____no_output_____"
],
[
"g = Dot()\ng.add_node(Node('a', fillcolor=graph_colors2))\ng.add_node(Node('b', fillcolor=graph_colors2))\ng",
"_____no_output_____"
]
],
[
[
"We'll use the former color function as our default. You can change it by simply modifying `node_defaults`.",
"_____no_output_____"
]
],
[
[
"#export\nnode_defaults['fillcolor'] = graph_colors1",
"_____no_output_____"
]
],
[
[
"## Clusters and Items",
"_____no_output_____"
]
],
[
[
"#export\ncluster_defaults = dict(label=str, tooltip=str, graph_name=uniq_name, style='rounded, filled', fillcolor='#55555522')",
"_____no_output_____"
],
[
"#export\ndef Cluster(obj='', **kwargs):\n \"Create a `pydot.Cluster` with a unique name\"\n kwargs = merge(cluster_defaults, kwargs)\n return _pydot_create(pydot.Cluster, obj, **kwargs)",
"_____no_output_____"
],
[
"g = Dot()\nsg = Cluster('clus', tooltip='Cluster tooltip')\nsg.add_node(Node(['a', \"My tooltip\"]))\nsg.add_node(Node('b'))\ng.add_subgraph(sg)\ng",
"_____no_output_____"
],
[
"#export\n@patch\ndef nodes(self:pydot.Graph):\n \"`i`th node in `Graph`\"\n return L(o for o in self.get_nodes() if o.get_label() is not None)",
"_____no_output_____"
],
[
"#export\n@patch\ndef __getitem__(self:pydot.Graph, i):\n \"`i`th node in `Graph`\"\n return self.nodes()[i]",
"_____no_output_____"
]
],
[
[
"You can subscript into a `Graph`'s `Node`s by index:",
"_____no_output_____"
]
],
[
[
"print(sg[0].get_label())",
"\"a\"\n"
],
[
"#export\n@patch\ndef add_item(self:pydot.Graph, item, **kwargs):\n \"Add a `Cluster`, `Node`, or `Edge` to the `Graph`\"\n if not isinstance(item, (pydot.Edge,pydot.Node,pydot.Graph)): item = Node(item, **kwargs)\n f = self.add_node if isinstance(item, pydot.Node ) else \\\n self.add_subgraph if isinstance(item, pydot.Graph) else \\\n self.add_edge if isinstance(item, pydot.Edge ) else None\n f(item)\n return item",
"_____no_output_____"
]
],
[
[
"There's no good reason to have different methods for adding clusters vs nodes (as `pydot` requires), so we provide a single method.",
"_____no_output_____"
]
],
[
[
"g = Dot()\nsg = Cluster('clus')\ng.add_item(sg)\nsg.add_item('a')\ng",
"_____no_output_____"
],
[
"#export\n@patch\ndef add_items(self:pydot.Graph, *items, **kwargs):\n \"Add `items` the `Graph`\"\n return L(self.add_item(it, **kwargs) for it in items)",
"_____no_output_____"
],
[
"#export\ndef graph_items(*items, **kwargs):\n \"Add `items` to a new `pydot.Dot`\"\n g = Dot()\n g.add_items(*items, **kwargs)\n return g",
"_____no_output_____"
],
[
"sg1 = Cluster('clus')\nsg1.add_items('n1', 'n2')\nsg2 = Cluster()\nsg2.add_item('n')\ngraph_items(sg1,sg2)",
"_____no_output_____"
]
],
[
[
"## Edges",
"_____no_output_____"
]
],
[
[
"#export\n@patch\ndef first(self:pydot.Graph):\n \"First node in `Graph`, searching subgraphs recursively as needed\"\n nodes = self.nodes()\n if nodes: return nodes[0]\n for subg in self.get_subgraphs():\n res = subg.first()\n if res: return res",
"_____no_output_____"
],
[
"#export\n@patch\ndef last(self:pydot.Graph):\n \"Lastt node in `Graph`, searching subgraphs recursively as needed\"\n nodes = self.nodes()\n if nodes: return nodes[-1]\n for subg in reversed(self.get_subgraphs()):\n res = subg.last()\n if res: return res",
"_____no_output_____"
],
[
"#export\n@patch\ndef with_compass(self:(pydot.Node,pydot.Graph), compass=None):\n r = self.get_name()\n return f'{r}:{compass}' if compass else r",
"_____no_output_____"
],
[
"# export\n@patch\ndef connect(self:(pydot.Node,pydot.Graph), item, compass1=None, compass2=None, **kwargs):\n \"Connect two nodes or clusters\"\n a,b,ltail,lhead = self,item,'',''\n if isinstance(self,pydot.Graph):\n a = self.last()\n ltail=self.get_name()\n if isinstance(item,pydot.Graph):\n b = item.first()\n lhead=item.get_name()\n a,b = a.with_compass(compass1),b.with_compass(compass2)\n return pydot.Edge(a, b, lhead=lhead, ltail=ltail, **kwargs)",
"_____no_output_____"
],
[
"sg2 = Cluster('clus2')\nn1 = sg2.add_item('n1', fillcolor='pink')\nn2 = sg2.add_item('n2', fillcolor='lightblue')\nsg2.add_item(n1.connect(n2))\n\nsg1 = Cluster('clus1')\nsg1.add_item(sg2)\n\na,b = Node('a'),Node('b')\nedges = a.connect(b),a.connect(a),sg1.connect(b),sg2[0].connect(a)\ng = Dot()\ng.add_items(sg1, a, b, *edges)\ng",
"_____no_output_____"
],
[
"#export\ndef object_connections(conns):\n \"Create connections between all pairs in `conns`\"\n return [object2graph(a).connect(object2graph(b)) for a,b in conns]",
"_____no_output_____"
]
],
[
[
"This is a shortcut for creating connections between objects that are already in a graph.",
"_____no_output_____"
]
],
[
[
"a,b = 'a','b'\ng = graph_items(a, b)\ng.add_items(*object_connections([(a,b)]))\ng",
"_____no_output_____"
]
],
[
[
"## Sequential",
"_____no_output_____"
],
[
"Since it's common to want to connect a series sequentially, we provide some simple shortcuts for this functionality.",
"_____no_output_____"
]
],
[
[
"#export\ndef graph_edges_seq(items):\n \"Add edges between each pair of nodes in `items`\"\n return L(items[i].connect(items[i+1]) for i in range(len(items)-1))",
"_____no_output_____"
],
[
"#export\n@patch\ndef add_edges_seq(self:pydot.Graph, items):\n \"Add edges between each pair of nodes in `items`\"\n return self.add_items(*graph_edges_seq(items))",
"_____no_output_____"
],
[
"g = Dot()\nits = g.add_items('a','b','c')\ng.add_edges_seq(its)\ng",
"_____no_output_____"
],
[
"#export\ndef seq_cluster(items, cluster_label='', **kwargs):\n sg = Cluster(cluster_label)\n its = sg.add_items(*items, **kwargs)\n sg.add_edges_seq(its)\n return sg",
"_____no_output_____"
],
[
"g = Dot()\ng.add_item(seq_cluster(['a','b','c'], 'clust'))\ng.add_item(seq_cluster(['1','2','c'], 'clust2'))\ng",
"_____no_output_____"
],
[
"g = Dot()\ng.add_item(seq_cluster(['a','b','c'], 'clust'))\ng",
"_____no_output_____"
],
[
"sg1 = seq_cluster(['a','b','c'], 'clust1')\nsg2 = seq_cluster(['a1','a2',sg1], 'clust2')\ng = Dot()\ng.add_item(sg2)\ng",
"_____no_output_____"
],
[
"sg1 = seq_cluster(['inp'], 'clust1')\nsg2 = seq_cluster(['a','b','c'], 'clust2')\nsg2.add_items(sg1.connect(sg2[-1]), sg1.connect(sg2))\ng = Dot()\ng.add_items(sg1,sg2)\ng",
"_____no_output_____"
],
[
"# export\ndef Point(label='pnt', **kwargs):\n \"Create a `Node` with a 'point' shape\"\n return (Node('pnt', shape='point'))",
"_____no_output_____"
],
[
"sg = Cluster('clus')\na,b,c = sg.add_items('a','b','c')\np = sg.add_item(Point())\nsg.add_item(p.connect(c))\nsg.add_items(p.connect(a), a.connect(b), b.connect(c))\n\ng = Dot()\ng.add_items(sg)\ng",
"_____no_output_____"
]
],
[
[
"# Export -",
"_____no_output_____"
]
],
[
[
"#hide\nfrom nbdev.export import notebook2script\nnotebook2script()",
"Converted 00_core.ipynb.\nConverted index.ipynb.\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
d04d2f0d858469a01fddeaa61c49b45767d50dfa | 319,061 | ipynb | Jupyter Notebook | iPython Notebooks/Introduction to Pandas Part 1.ipynb | AlpYuzbasioglu/Zeppelin-Notebooks | 13a374f3826d2b4d178bbbf47e279b898c4f1e5f | [
"Apache-2.0"
] | 1 | 2018-05-17T13:12:46.000Z | 2018-05-17T13:12:46.000Z | iPython Notebooks/Introduction to Pandas Part 1.ipynb | alpyuzbasioglu/Zeppelin-Notebooks | 13a374f3826d2b4d178bbbf47e279b898c4f1e5f | [
"Apache-2.0"
] | null | null | null | iPython Notebooks/Introduction to Pandas Part 1.ipynb | alpyuzbasioglu/Zeppelin-Notebooks | 13a374f3826d2b4d178bbbf47e279b898c4f1e5f | [
"Apache-2.0"
] | null | null | null | 29.992574 | 1,650 | 0.429457 | [
[
[
"# Data Science Boot Camp",
"_____no_output_____"
],
[
"## Introduction to Pandas Part 1",
"_____no_output_____"
],
[
"* __Pandas__ is a Python package providing fast, flexible, and expressive data structures designed to work with *relational* or *labeled* data both.<br>\n<br>\n* It is a fundamental high-level building block for doing practical, real world data analysis in Python.<br>\n<br>\n* Python has always been great for prepping and munging data, but it's never been great for analysis - you'd usually end up using R or loading it into a database and using SQL. Pandas makes Python great for analysis.<br>",
"_____no_output_____"
],
[
"* Pandas is well suited for:<br>\n<br>\n * Tabular data with heterogeneously-typed columns, as in an SQL table or Excel spreadsheet<br>\n <br>\n * Ordered and unordered (not necessarily fixed-frequency) time series data.<br>\n <br>\n * Arbitrary matrix data (homogeneously typed or heterogeneous) with row and column labels<br>\n <br>\n * Any other form of observational / statistical data sets. The data actually need not be labeled at all to be placed into a pandas data structure<br>\n\n",
"_____no_output_____"
],
[
"* Key features of Pandas:<br>\n<br>\n * Easy handling of __missing data__<br>\n<br>\n * __Size mutability__: columns can be inserted and deleted from DataFrame and higher dimensional objects.<br>\n<br>\n * Automatic and explicit __data alignment__: objects can be explicitly aligned to a set of labels, or the data can be aligned automatically.<br>\n<br>\n * __Fast__ and __efficient__ DataFrame object with default and customized indexing.<br>\n<br>\n * __Reshaping__ and __pivoting__ of data sets.<br>",
"_____no_output_____"
],
[
"* Key features of Pandas (Continued):<br>\n<br>\n * Label-based __slicing__, __indexing__, __fancy indexing__ and __subsetting__ of large data sets.<br>\n<br>\n * __Group by__ data for aggregation and transformations.<br>\n<br>\n * High performance __merging__ and __joining__ of data.<br>\n<br>\n * __IO Tools__ for loading data into in-memory data objects from different file formats.<br>\n<br>\n * __Time Series__ functionality.<br>",
"_____no_output_____"
],
[
"* First thing we have to import pandas and numpy library under the aliases pd and np.<br>\n<br>\n* Then check our pandas version.<br>",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n\nimport pandas as pd\nimport numpy as np\n\nprint(pd.__version__)",
"0.22.0\n"
]
],
[
[
"* Let's set some options for `Pandas`",
"_____no_output_____"
]
],
[
[
"pd.set_option('display.notebook_repr_html', False)\npd.set_option('max_columns', 10)\npd.set_option('max_rows', 10)",
"_____no_output_____"
]
],
[
[
"## Pandas Objects",
"_____no_output_____"
],
[
"* At the very basic level, Pandas objects can be thought of as enhanced versions of NumPy structured arrays in which the rows and columns are identified with labels rather than simple integer indices.<br>\n<br>\n* There are three fundamental Pandas data structures: the Series, DataFrame, and Index.",
"_____no_output_____"
],
[
"### Series",
"_____no_output_____"
],
[
"* A __Series__ is a single vector of data (like a NumPy array) with an *index* that labels each element in the vector.<br><br>\n* It can be created from a list or array as follows:",
"_____no_output_____"
]
],
[
[
"counts = pd.Series([15029231, 7529491, 7499740, 5445026, 2702492, 2742534, 4279677, 2133548, 2146129])\ncounts",
"_____no_output_____"
]
],
[
[
"* If an index is not specified, a default sequence of integers is assigned as the index. A NumPy array comprises the values of the `Series`, while the index is a pandas `Index` object.",
"_____no_output_____"
]
],
[
[
"counts.values",
"_____no_output_____"
],
[
"counts.index",
"_____no_output_____"
]
],
[
[
"* We can assign meaningful labels to the index, if they are available:",
"_____no_output_____"
]
],
[
[
"population = pd.Series([15029231, 7529491, 7499740, 5445026, 2702492, 2742534, 4279677, 2133548, 2146129], \n index=['Istanbul Total', 'Istanbul Males', 'Istanbul Females', 'Ankara Total', 'Ankara Males', 'Ankara Females', 'Izmir Total', 'Izmir Males', 'Izmir Females'])\npopulation",
"_____no_output_____"
]
],
[
[
"* These labels can be used to refer to the values in the `Series`.",
"_____no_output_____"
]
],
[
[
"population['Istanbul Total']",
"_____no_output_____"
],
[
"mask = [city.endswith('Females') for city in population.index]\nmask ",
"_____no_output_____"
],
[
"population[mask]",
"_____no_output_____"
]
],
[
[
"* As you noticed that we can masking in series.<br>\n<br>\n* Also we can still use positional indexing even we assign meaningful labels to the index, if we wish.<br>",
"_____no_output_____"
]
],
[
[
"population[0]",
"_____no_output_____"
]
],
[
[
"* We can give both the array of values and the index meaningful labels themselves:<br>",
"_____no_output_____"
]
],
[
[
"population.name = 'population'\npopulation.index.name = 'city'\npopulation",
"_____no_output_____"
]
],
[
[
"* Also, NumPy's math functions and other operations can be applied to Series without losing the data structure.<br>",
"_____no_output_____"
]
],
[
[
"np.ceil(population / 1000000) * 1000000",
"_____no_output_____"
]
],
[
[
"* We can also filter according to the values in the `Series` like in the Numpy's:",
"_____no_output_____"
]
],
[
[
"population[population>3000000]",
"_____no_output_____"
]
],
[
[
"* A `Series` can be thought of as an ordered key-value store. In fact, we can create one from a `dict`:",
"_____no_output_____"
]
],
[
[
"populationDict = {'Istanbul Total': 15029231, 'Ankara Total': 5445026, 'Izmir Total': 4279677}\npd.Series(populationDict)",
"_____no_output_____"
]
],
[
[
"* Notice that the `Series` is created in key-sorted order.<br>\n<br>\n* If we pass a custom index to `Series`, it will select the corresponding values from the dict, and treat indices without corrsponding values as missing. Pandas uses the `NaN` (not a number) type for missing values.<br>",
"_____no_output_____"
]
],
[
[
"population2 = pd.Series(populationDict, index=['Istanbul Total','Ankara Total','Izmir Total','Bursa Total', 'Antalya Total'])\npopulation2",
"_____no_output_____"
],
[
"population2.isnull()",
"_____no_output_____"
]
],
[
[
"* Critically, the labels are used to **align data** when used in operations with other Series objects:",
"_____no_output_____"
]
],
[
[
"population + population2",
"_____no_output_____"
]
],
[
[
"* Contrast this with NumPy arrays, where arrays of the same length will combine values element-wise; adding Series combined values with the same label in the resulting series. Notice also that the missing values were propogated by addition.",
"_____no_output_____"
],
[
"### DataFrame",
"_____no_output_____"
],
[
"* A `DataFrame` represents a tabular, spreadsheet-like data structure containing an or- dered collection of columns, each of which can be a different value type (numeric, string, boolean, etc.).<br>\n<br>\n* `DataFrame` has both a row and column index; it can be thought of as a dict of Series (one for all sharing the same index).",
"_____no_output_____"
]
],
[
[
"areaDict = {'Istanbul': 5461, 'Ankara': 25632, 'Izmir': 11891,\n 'Bursa': 10813, 'Antalya': 20177}\narea = pd.Series(areaDict)\narea",
"_____no_output_____"
],
[
"populationDict = {'Istanbul': 15029231, 'Ankara': 5445026, 'Izmir': 4279677, 'Bursa': 2936803, 'Antalya': 2364396}\npopulation3 = pd.Series(populationDict)\npopulation3",
"_____no_output_____"
]
],
[
[
"* Now that we have 2 Series population by cities and areas by cities, we can use a dictionary to construct a single two-dimensional object containing this information:",
"_____no_output_____"
]
],
[
[
"cities = pd.DataFrame({'population': population3, 'area': area})\ncities",
"_____no_output_____"
]
],
[
[
"* Or we can create our cities `DataFrame` with lists and indexes.",
"_____no_output_____"
]
],
[
[
"cities = pd.DataFrame({\n 'population':[15029231, 5445026, 4279677, 2936803, 2364396],\n 'area':[5461, 25632, 11891, 10813, 20177],\n 'city':['Istanbul', 'Ankara', 'Izmir', 'Bursa', 'Antalya']\n })\ncities",
"_____no_output_____"
]
],
[
[
"Notice the `DataFrame` is sorted by column name. We can change the order by indexing them in the order we desire:",
"_____no_output_____"
]
],
[
[
"cities[['city','area', 'population']]",
"_____no_output_____"
]
],
[
[
"* A `DataFrame` has a second index, representing the columns:",
"_____no_output_____"
]
],
[
[
"cities.columns",
"_____no_output_____"
]
],
[
[
"* If we wish to access columns, we can do so either by dictionary like indexing or by attribute:",
"_____no_output_____"
]
],
[
[
"cities['area']",
"_____no_output_____"
],
[
"cities.area",
"_____no_output_____"
],
[
"type(cities.area)",
"_____no_output_____"
],
[
"type(cities[['area']])",
"_____no_output_____"
]
],
[
[
"* Notice this is different than with `Series`, where dictionary like indexing retrieved a particular element (row). If we want access to a row in a `DataFrame`, we index its `iloc` attribute.\n",
"_____no_output_____"
]
],
[
[
"cities.iloc[2]",
"_____no_output_____"
],
[
"cities.iloc[0:2]",
"_____no_output_____"
]
],
[
[
"Alternatively, we can create a `DataFrame` with a dict of dicts:",
"_____no_output_____"
]
],
[
[
"cities = pd.DataFrame({\n 0: {'city': 'Istanbul', 'area': 5461, 'population': 15029231},\n 1: {'city': 'Ankara', 'area': 25632, 'population': 5445026},\n 2: {'city': 'Izmir', 'area': 11891, 'population': 4279677},\n 3: {'city': 'Bursa', 'area': 10813, 'population': 2936803},\n 4: {'city': 'Antalya', 'area': 20177, 'population': 2364396},\n \n})\ncities",
"_____no_output_____"
]
],
[
[
"* We probably want this transposed:",
"_____no_output_____"
]
],
[
[
"cities = cities.T\ncities",
"_____no_output_____"
]
],
[
[
"* It's important to note that the Series returned when a DataFrame is indexted is merely a **view** on the DataFrame, and not a copy of the data itself. <br>\n<br>\n* So you must be cautious when manipulating this data just like in the Numpy.<br>",
"_____no_output_____"
]
],
[
[
"areas = cities.area\nareas",
"_____no_output_____"
],
[
"areas[3] = 0\nareas",
"_____no_output_____"
],
[
"cities",
"_____no_output_____"
]
],
[
[
"* It's a usefull behavior for large data sets but for preventing this you can use copy method.<br>",
"_____no_output_____"
]
],
[
[
"areas = cities.area.copy()\nareas[3] = 10813\nareas",
"_____no_output_____"
],
[
"cities",
"_____no_output_____"
]
],
[
[
"* We can create or modify columns by assignment:<br>",
"_____no_output_____"
]
],
[
[
"cities.area[3] = 10813\ncities",
"_____no_output_____"
],
[
"cities['year'] = 2017\ncities",
"_____no_output_____"
]
],
[
[
"* But note that, we can not use the attribute indexing method to add a new column:<br>",
"_____no_output_____"
]
],
[
[
"cities.projection2020 = 20000000\ncities",
"_____no_output_____"
]
],
[
[
"* It creates another variable.<br>",
"_____no_output_____"
]
],
[
[
"cities.projection2020 ",
"_____no_output_____"
]
],
[
[
"* Specifying a `Series` as a new columns cause its values to be added according to the `DataFrame`'s index:",
"_____no_output_____"
]
],
[
[
"populationIn2000 = pd.Series([11076840, 3889199, 3431204, 2150571, 1430539])\npopulationIn2000",
"_____no_output_____"
],
[
"cities['population_2000'] = populationIn2000\ncities",
"_____no_output_____"
]
],
[
[
"* Other Python data structures (ones without an index) need to be the same length as the `DataFrame`:",
"_____no_output_____"
]
],
[
[
"populationIn2007 = [12573836, 4466756, 3739353, 2439876]\ncities['population_2007'] = populationIn2007",
"_____no_output_____"
]
],
[
[
"* We can use `del` to remove columns, in the same way `dict` entries can be removed:",
"_____no_output_____"
]
],
[
[
"cities",
"_____no_output_____"
],
[
"del cities['population_2000']\ncities",
"_____no_output_____"
]
],
[
[
"* We can extract the underlying data as a simple `ndarray` by accessing the `values` attribute:<br>",
"_____no_output_____"
]
],
[
[
"cities.values",
"_____no_output_____"
]
],
[
[
"* Notice that because of the mix of string and integer (and could be`NaN`) values, the dtype of the array is `object`.",
"_____no_output_____"
],
[
"* The dtype will automatically be chosen to be as general as needed to accomodate all the columns.",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame({'integers': [1,2,3], 'floatNumbers':[0.5, -1.25, 2.5]})\ndf",
"_____no_output_____"
],
[
"print(df.values.dtype)\ndf.values",
"float64\n"
]
],
[
[
"* Pandas uses a custom data structure to represent the indices of Series and DataFrames.",
"_____no_output_____"
]
],
[
[
"cities.index",
"_____no_output_____"
]
],
[
[
"* Index objects are immutable:",
"_____no_output_____"
]
],
[
[
"cities.index[0] = 15",
"_____no_output_____"
]
],
[
[
"* This is so that Index objects can be shared between data structures without fear that they will be changed.\n* That means you can move, copy your meaningful labels to other `DataFrames`",
"_____no_output_____"
]
],
[
[
"cities",
"_____no_output_____"
],
[
"cities.index = population2.index\ncities",
"_____no_output_____"
]
],
[
[
"## Importing data",
"_____no_output_____"
],
[
"* A key, but often under appreciated, step in data analysis is importing the data that we wish to analyze.<br>\n<br>\n* Though it is easy to load basic data structures into Python using built-in tools or those provided by packages like NumPy, it is non-trivial to import structured data well, and to easily convert this input into a robust data structure.<br>\n<br>\n* Pandas provides a convenient set of functions for importing tabular data in a number of formats directly into a `DataFrame` object. ",
"_____no_output_____"
],
[
"* Let's start with some more population data, stored in csv format.",
"_____no_output_____"
]
],
[
[
"!cat data/population.csv",
"Provinces;2000;2001;2002;2003;2004;2005;2006;2007;2008;2009;2010;2011;2012;2013;2014;2015;2016;2017\r\r\nTotal;64729501;65603160;66401851;67187251;68010215;68860539;69729967;70586256;71517100;72561312;73722988;74724269;75627384;76667864;77695904;78741053;79814871;80810525\r\r\nAdana;1879695;1899324;1916637;1933428;1951142;1969512;1988277;2006650;2026319;2062226;2085225;2108805;2125635;2149260;2165595;2183167;2201670;2216475\r\r\nAdıyaman;568432;571180;573149;574886;576808;578852;580926;582762;585067;588475;590935;593931;595261;597184;597835;602774;610484;615076\r\r\nAfyonkarahisar;696292;698029;698773;699193;699794;700502;701204;701572;697365;701326;697559;698626;703948;707123;706371;709015;714523;715693\r\r\nAğrı;519190;521514;523123;524514;526070;527732;529417;530879;532180;537665;542022;555479;552404;551177;549435;547210;542255;536285\r\r\nAmasya;333927;333768;333110;332271;331491;330739;329956;328674;323675;324268;334786;323079;322283;321977;321913;322167;326351;329888\r\r\nAnkara;3889199;3971642;4050309;4128889;4210596;4294678;4380736;4466756;4548939;4650802;4771716;4890893;4965542;5045083;5150072;5270575;5346518;5445026\r\r\nAntalya;1430539;1480282;1529110;1578367;1629338;1681656;1735239;1789295;1859275;1919729;1978333;2043482;2092537;2158265;2222562;2288456;2328555;2364396\r\r\nArtvin;167909;168184;168215;168164;168153;168164;168170;168092;166584;165580;164759;166394;167082;169334;169674;168370;168068;166143\r\r\nAydın;870460;881911;892345;902594;913340;924446;935800;946971;965500;979155;989862;999163;1006541;1020957;1041979;1053506;1068260;1080839\r\r\nBalıkesir;1069260;1077362;1084072;1090411;1097187;1104261;1111475;1118313;1130276;1140085;1152323;1154314;1160731;1162761;1189057;1186688;1196176;1204824\r\r\nBilecik;197625;198736;199580;200346;201182;202063;202960;203777;193169;202061;225381;203849;204116;208888;209925;212361;218297;221693\r\r\nBingöl;240337;242183;243717;245168;246718;248336;249986;251552;256091;255745;255170;262263;262507;265514;266019;267184;269560;273354\r\r\nBitlis;318886;320555;321791;322898;324114;325401;326709;327886;326897;328489;328767;336624;337253;337156;338023;340449;341225;341474\r\r\nBolu;255576;257926;259953;261902;263967;266114;268305;270417;268882;271545;271208;276506;281080;283496;284789;291095;299896;303184\r\r\nBurdur;246060;247106;247811;248412;249090;249816;250552;251181;247437;251550;258868;250527;254341;257267;256898;258339;261401;264779\r\r\nBursa;2150571;2192169;2231582;2270852;2311735;2353834;2396916;2439876;2507963;2550645;2605495;2652126;2688171;2740970;2787539;2842547;2901396;2936803\r\r\nÇanakkale;449418;453632;457280;460792;464511;468375;472320;476128;474791;477735;490397;486445;493691;502328;511790;513341;519793;530417\r\r\nÇankırı;169044;169955;170637;171252;171924;172635;173358;174012;176093;185019;179067;177211;184406;190909;183550;180945;183880;186074\r\r\nÇorum;567609;566094;563698;560968;558300;555649;552911;549828;545444;540704;535405;534578;529975;532080;527220;525180;527863;528422\r\r\nDenizli;845493;854958;863396;871614;880267;889229;898387;907325;917836;926362;931823;942278;950557;963464;978700;993442;1005687;1018735\r\r\nDiyarbakır;1317750;1338378;1357550;1376518;1396333;1416775;1437684;1460714;1492828;1515011;1528958;1570943;1592167;1607437;1635048;1654196;1673119;1699901\r\r\nEdirne;392134;393292;393896;394320;394852;395449;396047;396462;394644;395463;390428;399316;399708;398582;400280;402537;401701;406855\r\r\nElazığ;517551;521467;524710;527774;531048;534467;537954;541258;547562;550667;552646;558556;562703;568239;568753;574304;578789;583671\r\r\nErzincan;206815;208015;208937;209779;210694;211658;212639;213538;210645;213288;224949;215277;217886;219996;223633;222918;226032;231511\r\r\nErzurum;801287;800311;798119;795482;792968;790505;787952;784941;774967;774207;769085;780847;778195;766729;763320;762321;762021;760476\r\r\nEskişehir;651672;662354;672328;682212;692529;703168;714051;724849;741739;755427;764584;781247;789750;799724;812320;826716;844842;860620\r\r\nGaziantep;1292817;1330205;1366581;1403165;1441079;1480026;1519905;1560023;1612223;1653670;1700763;1753596;1799558;1844438;1889466;1931836;1974244;2005515\r\r\nGiresun;410946;412428;413335;414062;414909;415830;416760;417505;421766;421860;419256;419498;419555;425007;429984;426686;444467;437393\r\r\nGümüşhane;116008;118147;120166;122175;124267;126423;128628;130825;131367;130976;129618;132374;135216;141412;146353;151449;172034;170173\r\r\nHakkari;223264;226676;229839;232966;236234;239606;243055;246469;258590;256761;251302;272165;279982;273041;276287;278775;267813;275761\r\r\nHatay;1280457;1296401;1310828;1324961;1339798;1355144;1370831;1386224;1413287;1448418;1480571;1474223;1483674;1503066;1519836;1533507;1555165;1575226\r\r\nIsparta;418507;419307;419505;419502;419601;419758;419905;419845;407463;420796;448298;411245;416663;417774;418780;421766;427324;433830\r\r\nMersin;1488755;1505196;1519824;1534060;1549054;1564588;1580460;1595938;1602908;1640888;1647899;1667939;1682848;1705774;1727255;1745221;1773852;1793931\r\r\nİstanbul;11076840;11292009;11495948;11699172;11910733;12128577;12351506;12573836;12697164;12915158;13255685;13624240;13854740;14160467;14377018;14657434;14804116;15029231\r\r\nİzmir;3431204;3477209;3519233;3560544;3603838;3648575;3694316;3739353;3795978;3868308;3948848;3965232;4005459;4061074;4113072;4168415;4223545;4279677\r\r\nKars;326292;324908;323005;320898;318812;316723;314570;312205;312128;306536;301766;305755;304821;300874;296466;292660;289786;287654\r\r\nKastamonu;351582;353271;354479;355541;356719;357972;359243;360366;360424;359823;361222;359759;359808;368093;368907;372633;376945;372373\r\r\nKayseri;1038671;1056995;1074221;1091336;1109179;1127566;1146378;1165088;1184386;1205872;1234651;1255349;1274968;1295355;1322376;1341056;1358980;1376722\r\r\nKırklareli;323427;325213;326561;327782;329116;330523;331955;333256;336942;333179;332791;340199;341218;340559;343723;346973;351684;356050\r\r\nKırşehir;221473;222028;222267;222403;222596;222824;223050;223170;222735;223102;221876;221015;221209;223498;222707;225562;229975;234529\r\r\nKocaeli;1192053;1226460;1259932;1293594;1328481;1364317;1401013;1437926;1490358;1522408;1560138;1601720;1634691;1676202;1722795;1780055;1830772;1883270\r\r\nKonya;1835987;1855057;1871862;1888154;1905345;1923174;1941386;1959082;1969868;1992675;2013845;2038555;2052281;2079225;2108808;2130544;2161303;2180149\r\r\nKütahya;592921;592607;591405;589883;588464;587092;585666;583910;565884;571804;590496;564264;573421;572059;571554;571463;573642;572256\r\r\nMalatya;685533;691399;696387;701155;706222;711496;716879;722065;733789;736884;740643;757930;762366;762538;769544;772904;781305;786676\r\r\nManisa;1276590;1284241;1290180;1295630;1301542;1307760;1314090;1319920;1316750;1331957;1379484;1340074;1346162;1359463;1367905;1380366;1396945;1413041\r\r\nKahramanmaraş;937074;947317;956417;965268;974592;984254;994126;1004414;1029298;1037491;1044816;1054210;1063174;1075706;1089038;1096610;1112634;1127623\r\r\nMardin;709316;715211;720195;724946;730002;735267;740641;745778;750697;737852;744606;764033;773026;779738;788996;796591;796237;809719\r\r\nMuğla;663606;678204;692171;706136;720650;735582;750865;766156;791424;802381;817503;838324;851145;866665;894509;908877;923773;938751\r\r\nMuş;403236;404138;404462;404596;404832;405127;405416;405509;404309;404484;406886;414706;413260;412553;411216;408728;406501;404544\r\r\nNevşehir;275262;276309;276971;277514;278138;278814;279498;280058;281699;284025;282337;283247;285190;285460;286250;286767;290895;292365\r\r\nNiğde;321330;323181;324600;325894;327302;328786;330295;331677;338447;339921;337931;337553;340270;343658;343898;346114;351468;352727\r\r\nOrdu;705746;708079;709420;710444;711670;713018;714375;715409;719278;723507;719183;714390;741371;731452;724268;728949;750588;742341\r\r\nRize;307133;308800;310052;311181;312417;313722;315049;316252;319410;319569;319637;323012;324152;328205;329779;328979;331048;331041\r\r\nSakarya;750485;762848;774397;785845;797793;810112;822715;835222;851292;861570;872872;888556;902267;917373;932706;953181;976948;990214\r\r\nSamsun;1191926;1198574;1203611;1208179;1213165;1218424;1223774;1228959;1233677;1250076;1252693;1251729;1251722;1261810;1269989;1279884;1295927;1312990\r\r\nSiirt;270832;273982;276806;279562;282461;285462;288529;291528;299819;303622;300695;310468;310879;314153;318366;320351;322664;324394\r\r\nSinop;194318;195151;195715;196196;196739;197319;197908;198412;200791;201134;202740;203027;201311;204568;204526;204133;205478;207427\r\r\nSivas;651825;650946;649078;646845;644709;642614;640442;638464;631112;633347;642224;627056;623535;623824;623116;618617;621224;621301\r\r\nTekirdağ;577812;598658;619152;639837;661237;683199;705692;728396;770772;783310;798109;829873;852321;874475;906732;937910;972875;1005463\r\r\nTokat;641033;639371;636715;633682;630722;627781;624744;620722;617158;624439;617802;608299;613990;598708;597920;593990;602662;602086\r\r\nTrabzon;720620;724340;727080;729529;732221;735072;737969;740569;748982;765127;763714;757353;757898;758237;766782;768417;779379;786326\r\r\nTunceli;82554;82871;83074;83241;83433;83640;83849;84022;86449;83061;76699;85062;86276;85428;86527;86076;82193;82498\r\r\nŞanlıurfa;1257753;1294842;1330964;1367305;1404961;1443639;1483244;1523099;1574224;1613737;1663371;1716254;1762075;1801980;1845667;1892320;1940627;1985753\r\r\nUşak;320535;322814;324673;326417;328287;330243;332237;334115;334111;335860;338019;339731;342269;346508;349459;353048;358736;364971\r\r\nVan;895836;908296;919727;930984;942771;954945;967394;979671;1004369;1022310;1035418;1022532;1051975;1070113;1085542;1096397;1100190;1106891\r\r\nYozgat;544446;538313;531220;523696;516096;508398;500487;492127;484206;487365;476096;465696;453211;444211;432560;419440;421041;418650\r\r\nZonguldak;630323;629346;627407;625114;622912;620744;618500;615890;619151;619812;619703;612406;606527;601567;598796;595907;597524;596892\r\r\nAksaray;351474;353939;355942;357819;359834;361941;364089;366109;370598;376907;377505;378823;379915;382806;384252;386514;396673;402404\r\r\nBayburt;75221;75517;75709;75868;76050;76246;76444;76609;75675;74710;74412;76724;75797;75620;80607;78550;90154;80417\r\r\nKaraman;214461;216318;217902;219417;221026;222700;224409;226049;230145;231872;232633;234005;235424;237939;240362;242196;245610;246672\r\r\nKırıkkale;287427;286900;285933;284803;283711;282633;281518;280234;279325;280834;276647;274992;274727;274658;271092;270271;277984;278749\r\r\nBatman;408820;418186;427172;436165;445508;455118;464954;472487;485616;497998;510200;524499;534205;547581;557593;566633;576899;585252\r\r\nŞırnak;362700;370314;377574;384824;392364;400123;408065;416001;429287;430424;430109;457997;466982;475255;488966;490184;483788;503236\r\r\nBartın;175982;177060;177903;178678;179519;180401;181300;182131;185368;188449;187758;187291;188436;189139;189405;190708;192389;193577\r\r\nArdahan;122409;121305;119993;118590;117178;115750;114283;112721;112242;108169;105454;107455;106643;102782;100809;99265;98335;97096\r\r\nIğdır;174285;175550;176588;177563;178609;179701;180815;181866;184025;183486;184418;188857;190409;190424;192056;192435;192785;194775\r\r\nYalova;144923;150027;155041;160099;165333;170705;176207;181758;197412;202531;203741;206535;211799;220122;226514;233009;241665;251203\r\r\nKarabük;205172;207241;209056;210812;212667;214591;216557;218463;216248;218564;227610;219728;225145;230251;231333;236978;242347;244453\r\r\nKilis;109698;111024;112219;113387;114615;115886;117185;118457;120991;122104;123135;124452;124320;128586;128781;130655;130825;136319\r\r\nOsmaniye;411163;417418;423214;428943;434930;441108;447428;452880;464704;471804;479221;485357;492135;498981;506807;512873;522175;527724\r\r\nDüzce;296712;300686;304316;307884;311623;315487;319438;323328;328611;335156;338188;342146;346493;351509;355549;360388;370371;377610"
]
],
[
[
"* This table can be read into a DataFrame using `read_csv`:",
"_____no_output_____"
]
],
[
[
"populationDF = pd.read_csv(\"data/population.csv\")\npopulationDF",
"_____no_output_____"
]
],
[
[
"* Notice that `read_csv` automatically considered the first row in the file to be a header row.<br>\n<br>\n* We can override default behavior by customizing some the arguments, like `header`, `names` or `index_col`.<br>",
"_____no_output_____"
],
[
"* `read_csv` is just a convenience function for `read_table`, since csv is such a common format:<br>",
"_____no_output_____"
]
],
[
[
"pd.set_option('max_columns', 5)\npopulationDF = pd.read_table(\"data/population_missing.csv\", sep=';')\npopulationDF",
"_____no_output_____"
]
],
[
[
"* The `sep` argument can be customized as needed to accomodate arbitrary separators.<br>",
"_____no_output_____"
],
[
"* If we have sections of data that we do not wish to import (for example, in this example empty rows), we can populate the `skiprows` argument:",
"_____no_output_____"
]
],
[
[
"populationDF = pd.read_csv(\"data/population_missing.csv\", sep=';', skiprows=[1,2])\npopulationDF",
"_____no_output_____"
]
],
[
[
"* For a more useful index, we can specify the first column, which provide a unique index to the data.",
"_____no_output_____"
]
],
[
[
"populationDF = pd.read_csv(\"data/population.csv\", sep=';', index_col='Provinces')\npopulationDF.index",
"_____no_output_____"
]
],
[
[
"Conversely, if we only want to import a small number of rows from, say, a very large data file we can use `nrows`:",
"_____no_output_____"
]
],
[
[
"pd.read_csv(\"data/population.csv\", sep=';', nrows=4)",
"_____no_output_____"
]
],
[
[
"* Most real-world data is incomplete, with values missing due to incomplete observation, data entry or transcription error, or other reasons. Pandas will automatically recognize and parse common missing data indicators, including `NA`, `NaN`, `NULL`.",
"_____no_output_____"
]
],
[
[
"pd.read_csv(\"data/population_missing.csv\", sep=';').head(10)",
"_____no_output_____"
]
],
[
[
"Above, Pandas recognized `NaN` and an empty field as missing data.",
"_____no_output_____"
]
],
[
[
"pd.isnull(pd.read_csv(\"data/population_missing.csv\", sep=';')).head(10)",
"_____no_output_____"
]
],
[
[
"### Microsoft Excel",
"_____no_output_____"
],
[
"* Since so much financial and scientific data ends up in Excel spreadsheets, Pandas' ability to directly import Excel spreadsheets is valuable. <br>\n<br>\n* This support is contingent on having one or two dependencies (depending on what version of Excel file is being imported) installed: `xlrd` and `openpyxl`.<br>\n<br>\n* Importing Excel data to Pandas is a two-step process. First, we create an `ExcelFile` object using the path of the file: ",
"_____no_output_____"
]
],
[
[
"excel_file = pd.ExcelFile('data/population.xlsx')\nexcel_file",
"_____no_output_____"
]
],
[
[
"* Then, since modern spreadsheets consist of one or more \"sheets\", we parse the sheet with the data of interest:",
"_____no_output_____"
]
],
[
[
"excelDf = excel_file.parse(\"Sheet 1 \")\nexcelDf",
"_____no_output_____"
]
],
[
[
"* Also, there is a `read_excel` conveneince function in Pandas that combines these steps into a single call:",
"_____no_output_____"
]
],
[
[
"excelDf2 = pd.read_excel('data/population.xlsx', sheet_name='Sheet 1 ')\nexcelDf2.head(10)",
"_____no_output_____"
]
],
[
[
"* In, the first day we learned how to read and write `JSON` Files, with that way you can also import JSON files to `DataFrames`. \n\n* Also, you can connect to databases and import your data into `DataFrames` by help of 3rd party libraries.",
"_____no_output_____"
],
[
"## Pandas Fundamentals",
"_____no_output_____"
],
[
"* This section introduces the new user to the key functionality of Pandas that is required to use the software effectively.<br>\n<br>\n* For some variety, we will leave our population data behind and employ some `Superhero` data.<br>",
"_____no_output_____"
],
[
"* The data comes from Marvel Wikia.<br>\n<br>\n* The file has the following variables:<br>",
"_____no_output_____"
],
[
"<table>\n<table>\n<thead>\n<tr>\n<th style=\"text-align:left;\">Variable</th>\n<th style=\"text-align:left;\">Definition</th>\n</tr>\n</thead>\n<tbody>\n<tr>\n<td style=\"text-align:left;\">page_id</td>\n<td style=\"text-align:left;\">The unique identifier for that characters page within the wikia</td>\n</tr>\n<tr>\n<td style=\"text-align:left;\">name</td>\n<td style=\"text-align:left;\">The name of the character</td>\n</tr>\n<tr>\n<td style=\"text-align:left;\">urlslug</td>\n<td style=\"text-align:left;\">The unique url within the wikia that takes you to the character</td>\n</tr>\n<tr>\n<td style=\"text-align:left;\">ID</td>\n<td style=\"text-align:left;\">The identity status of the character (Secret Identity, Public identity No Dual Identity)</td>\n</tr>\n<tr>\n<td style=\"text-align:left;\">ALIGN</td>\n<td style=\"text-align:left;\">If the character is Good, Bad or Neutral</td>\n</tr>\n<tr>\n<td style=\"text-align:left;\">EYE</td>\n<td style=\"text-align:left;\">Eye color of the character</td>\n</tr>\n<tr>\n<td style=\"text-align:left;\">HAIR</td>\n<td style=\"text-align:left;\">Hair color of the character</td>\n</tr>\n<tr>\n<td style=\"text-align:left;\">SEX</td>\n<td style=\"text-align:left;\">Sex of the character (e.g. Male, Female, etc.)</td>\n</tr>\n<tr>\n<td style=\"text-align:left;\">GSM</td>\n<td style=\"text-align:left;\">If the character is a gender or sexual minority (e.g. Homosexual characters, bisexual characters)</td>\n</tr>\n<tr>\n<td style=\"text-align:left;\">ALIVE</td>\n<td style=\"text-align:left;\">If the character is alive or deceased</td>\n</tr>\n<tr>\n<td style=\"text-align:left;\">APPEARANCES</td>\n<td style=\"text-align:left;\">The number of appareances of the character in comic books (as of Sep. 2, 2014. Number will become increasingly out of date as time goes on.)</td>\n</tr>\n<tr>\n<td style=\"text-align:left;\">FIRST APPEARANCE</td>\n<td style=\"text-align:left;\">The month and year of the character's first appearance in a comic book, if available</td>\n</tr>\n<tr>\n<td style=\"text-align:left;\">YEAR</td>\n<td style=\"text-align:left;\">The year of the character's first appearance in a comic book, if available</td>\n</tr>\n</tbody>\n</table>",
"_____no_output_____"
]
],
[
[
"pd.set_option('max_columns', 12)\npd.set_option('display.notebook_repr_html', True)\nmarvelDF = pd.read_csv(\"data/marvel-wikia-data.csv\", index_col='page_id')\nmarvelDF.head(5)",
"_____no_output_____"
]
],
[
[
"* Notice that we specified the `page_id` column as the index, since it appears to be a unique identifier. We could try to create a unique index ourselves by trimming `name`:",
"_____no_output_____"
],
[
"* First, import the regex module of python.<br>\n<br>\n* Then, trim the name column with regex.<br> ",
"_____no_output_____"
]
],
[
[
"import re\npattern = re.compile('([a-zA-Z]|-|\\s|\\.|\\')*([a-zA-Z])')\nheroName = []\nfor name in marvelDF.name:\n match = re.search(pattern, name)\n if match: \n heroName.append(match.group())\n else:\n heroName.append(name)\nheroName",
"_____no_output_____"
]
],
[
[
"* This looks okay, let's copy '__marvelDF__' to '__marvelDF_newID__' and assign new indexes.<br>",
"_____no_output_____"
]
],
[
[
"marvelDF_newID = marvelDF.copy()\nmarvelDF_newID.index = heroName\nmarvelDF_newID.head(5)",
"_____no_output_____"
]
],
[
[
"* Let's check the uniqueness of ID's:",
"_____no_output_____"
]
],
[
[
"marvelDF_newID.index.is_unique",
"_____no_output_____"
]
],
[
[
"* So, indices need not be unique. Our choice is not unique because some of superheros have some differenet variations.",
"_____no_output_____"
]
],
[
[
"pd.Series(marvelDF_newID.index).value_counts()",
"_____no_output_____"
]
],
[
[
"* The most important consequence of a non-unique index is that indexing by label will return multiple values for some labels:",
"_____no_output_____"
]
],
[
[
"marvelDF_newID.loc['Peter Parker']",
"_____no_output_____"
]
],
[
[
"* Let's give a truly unique index by not triming `name` column:",
"_____no_output_____"
]
],
[
[
"hero_id = marvelDF.name\nmarvelDF_newID = marvelDF.copy()\nmarvelDF_newID.index = hero_id\nmarvelDF_newID.head()",
"_____no_output_____"
],
[
"marvelDF_newID.index.is_unique",
"_____no_output_____"
]
],
[
[
"* We can create meaningful indices more easily using a hierarchical index.<br>\n<br>\n* For now, we will stick with the numeric IDs as our index for '__NewID__' DataFrame.<br>",
"_____no_output_____"
]
],
[
[
"marvelDF_newID.index = range(16376)\nmarvelDF.index = marvelDF['name']\nmarvelDF_newID.head(5)",
"_____no_output_____"
]
],
[
[
"### Manipulating indices",
"_____no_output_____"
],
[
"* __Reindexing__ allows users to manipulate the data labels in a DataFrame. <br>\n<br>\n* It forces a DataFrame to conform to the new index, and optionally, fill in missing data if requested.<br>\n<br>\n* A simple use of `reindex` is reverse the order of the rows:",
"_____no_output_____"
]
],
[
[
"marvelDF_newID.reindex(marvelDF_newID.index[::-1]).head()",
"_____no_output_____"
]
],
[
[
"* Keep in mind that `reindex` does not work if we pass a non-unique index series.",
"_____no_output_____"
],
[
"* We can remove rows or columns via the `drop` method:",
"_____no_output_____"
]
],
[
[
"marvelDF_newID.shape",
"_____no_output_____"
],
[
"marvelDF_dropped = marvelDF_newID.drop([16375, 16374])",
"_____no_output_____"
],
[
"print(marvelDF_newID.shape)\nprint(marvelDF_dropped.shape)",
"(16376, 12)\n(16374, 12)\n"
],
[
"marvelDF_dropped = marvelDF_newID.drop(['EYE','HAIR'], axis=1)",
"_____no_output_____"
],
[
"print(marvelDF_newID.shape)\nprint(marvelDF_dropped.shape)",
"(16376, 12)\n(16376, 10)\n"
]
],
[
[
"## Indexing and Selection",
"_____no_output_____"
],
[
"* Indexing works like indexing in NumPy arrays, except we can use the labels in the `Index` object to extract values in addition to arrays of integers.<br>",
"_____no_output_____"
]
],
[
[
"heroAppearances = marvelDF.APPEARANCES\nheroAppearances",
"_____no_output_____"
]
],
[
[
"* Let's start with Numpy style indexing:",
"_____no_output_____"
]
],
[
[
"heroAppearances[:3]",
"_____no_output_____"
]
],
[
[
"* Indexing by Label:",
"_____no_output_____"
]
],
[
[
"heroAppearances[['Spider-Man (Peter Parker)','Hulk (Robert Bruce Banner)']]",
"_____no_output_____"
]
],
[
[
"* We can also slice with data labels, since they have an intrinsic order within the Index:",
"_____no_output_____"
]
],
[
[
"heroAppearances['Spider-Man (Peter Parker)':'Matthew Murdock (Earth-616)']",
"_____no_output_____"
]
],
[
[
"* You can change sliced array, and if you get warning it's ok.<br>",
"_____no_output_____"
]
],
[
[
"heroAppearances['Minister of Castile D\\'or (Earth-616)':'Yologarch (Earth-616)'] = 0\nheroAppearances",
"/Users/alpyuzbasioglu/anaconda2/lib/python2.7/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\"\"Entry point for launching an IPython kernel.\n"
]
],
[
[
"* In a `DataFrame` we can slice along either or both axes:",
"_____no_output_____"
]
],
[
[
"marvelDF[['SEX','ALIGN']]",
"_____no_output_____"
],
[
"mask = marvelDF.APPEARANCES>50\nmarvelDF[mask]",
"_____no_output_____"
]
],
[
[
"* The indexing field `loc` allows us to select subsets of rows and columns in an intuitive way:",
"_____no_output_____"
]
],
[
[
"marvelDF.loc['Spider-Man (Peter Parker)', ['ID', 'EYE', 'HAIR']]",
"_____no_output_____"
],
[
"marvelDF.loc[['Spider-Man (Peter Parker)','Thor (Thor Odinson)'],['ID', 'EYE', 'HAIR']]",
"_____no_output_____"
]
],
[
[
"## Operations",
"_____no_output_____"
],
[
"* `DataFrame` and `Series` objects allow for several operations to take place either on a single object, or between two or more objects.<br>\n<br>\n* For example, we can perform arithmetic on the elements of two objects, such as change in population across years:",
"_____no_output_____"
]
],
[
[
"populationDF",
"_____no_output_____"
],
[
"pop2000 = populationDF['2000']\npop2017 = populationDF['2017']",
"_____no_output_____"
],
[
"pop2000DF = pd.Series(pop2000.values, index=populationDF.index)\npop2017DF = pd.Series(pop2017.values, index=populationDF.index)",
"_____no_output_____"
],
[
"popDiff = pop2017DF - pop2000DF\npopDiff",
"_____no_output_____"
]
],
[
[
"* Let's assume our '__pop2000DF__' DataFrame has not row which index is \"Yalova\"",
"_____no_output_____"
]
],
[
[
"pop2000DF[\"Yalova\"] = np.nan\npop2000DF",
"_____no_output_____"
],
[
"popDiff = pop2017DF - pop2000DF\npopDiff",
"_____no_output_____"
]
],
[
[
"* For accessing not null elements, we can use Pandas'notnull function.",
"_____no_output_____"
]
],
[
[
"popDiff[popDiff.notnull()]",
"_____no_output_____"
]
],
[
[
"* We can add `fill_value` argument to insert a zero for home `NaN` values.",
"_____no_output_____"
]
],
[
[
"pop2017DF.subtract(pop2000DF, fill_value=0)",
"_____no_output_____"
]
],
[
[
"* We can also use functions to each column or row of a `DataFrame`",
"_____no_output_____"
]
],
[
[
"minPop = pop2017DF.values.min()\nindexOfMinPop = pop2017DF.index[pop2017DF.values.argmin()]\nprint(indexOfMinPop + \" -> \" + str(minPop))",
"Bayburt -> 80417\n"
],
[
"populationDF['2000'] = np.ceil(populationDF['2000'] / 10000) * 10000\npopulationDF",
"_____no_output_____"
]
],
[
[
"## Sorting and Ranking",
"_____no_output_____"
],
[
"* Pandas objects include methods for re-ordering data.",
"_____no_output_____"
]
],
[
[
"populationDF.sort_index(ascending=True).head()",
"_____no_output_____"
],
[
"populationDF.sort_index().head()",
"_____no_output_____"
],
[
"populationDF.sort_index(axis=1, ascending=False).head()",
"_____no_output_____"
]
],
[
[
"* We can also use `order` to sort a `Series` by value, rather than by label.",
"_____no_output_____"
],
[
"* For a `DataFrame`, we can sort according to the values of one or more columns using the `by` argument of `sort_values`:",
"_____no_output_____"
]
],
[
[
"populationDF[['2017','2001']].sort_values(by=['2017', '2001'],ascending=[False,True]).head(10)",
"_____no_output_____"
]
],
[
[
"* __Ranking__ does not re-arrange data, but instead returns an index that ranks each value relative to others in the Series.",
"_____no_output_____"
]
],
[
[
"populationDF['2010'].rank(ascending=False)",
"_____no_output_____"
],
[
"populationDF[['2017','2001']].sort_values(by=['2017', '2001'],ascending=[False,True]).rank(ascending=False)",
"_____no_output_____"
]
],
[
[
"* Ties are assigned the mean value of the tied ranks, which may result in decimal values.",
"_____no_output_____"
]
],
[
[
"pd.Series([50,60,50]).rank()",
"_____no_output_____"
]
],
[
[
"* Alternatively, you can break ties via one of several methods, such as by the order in which they occur in the dataset:",
"_____no_output_____"
]
],
[
[
"pd.Series([100,50,100]).rank(method='first')",
"_____no_output_____"
]
],
[
[
"* Calling the `DataFrame`'s `rank` method results in the ranks of all columns:",
"_____no_output_____"
]
],
[
[
"populationDF.rank(ascending=False)",
"_____no_output_____"
]
],
[
[
"## Hierarchical indexing",
"_____no_output_____"
],
[
"* Hierarchical indexing is an important feature of pandas enabling you to have multiple (two or more) index levels on an axis.<br>\n<br>\n* Somewhat abstractly, it provides a way for you to work with higher dimensional data in a lower dimensional form.<br>",
"_____no_output_____"
],
[
"* Let’s create a Series with a list of lists or arrays as the index:",
"_____no_output_____"
]
],
[
[
"data = pd.Series(np.random.randn(10),\n index=[['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'd', 'd'],\n [1, 2, 3, 1, 2, 3, 1, 2, 2, 3]])\ndata",
"_____no_output_____"
],
[
"data.index",
"_____no_output_____"
]
],
[
[
"* With a hierarchically-indexed object, so-called partial indexing is possible, enabling you to concisely select subsets of the data:",
"_____no_output_____"
]
],
[
[
"data['b']",
"_____no_output_____"
],
[
"data['a':'c']",
"_____no_output_____"
]
],
[
[
"* Selection is even possible in some cases from an “inner” level:",
"_____no_output_____"
]
],
[
[
"data[:, 1]",
"_____no_output_____"
]
],
[
[
"* Hierarchical indexing plays a critical role in reshaping data and group-based operations like forming a pivot table. For example, this data could be rearranged into a DataFrame using its unstack method:",
"_____no_output_____"
]
],
[
[
"dataDF = data.unstack()\ndataDF",
"_____no_output_____"
]
],
[
[
"* The inverse operation of unstack is stack:",
"_____no_output_____"
]
],
[
[
"dataDF.stack()",
"_____no_output_____"
]
],
[
[
"## Missing data",
"_____no_output_____"
],
[
"* The occurence of missing data is so prevalent that it pays to use tools like Pandas, which seamlessly integrates missing data handling so that it can be dealt with easily, and in the manner required by the analysis at hand.\n\n* Missing data are represented in `Series` and `DataFrame` objects by the `NaN` floating point value. However, `None` is also treated as missing, since it is commonly used as such in other contexts (NumPy).",
"_____no_output_____"
]
],
[
[
"weirdSeries = pd.Series([np.nan, None, 'string', 1])\nweirdSeries",
"_____no_output_____"
],
[
"weirdSeries.isnull()",
"_____no_output_____"
]
],
[
[
"* Missing values may be dropped or indexed out:",
"_____no_output_____"
]
],
[
[
"population2",
"_____no_output_____"
],
[
"population2.dropna()",
"_____no_output_____"
],
[
"population2[population2.notnull()]",
"_____no_output_____"
],
[
"dataDF",
"_____no_output_____"
]
],
[
[
"* By default, `dropna` drops entire rows in which one or more values are missing.",
"_____no_output_____"
]
],
[
[
"dataDF.dropna()",
"_____no_output_____"
]
],
[
[
"* This can be overridden by passing the `how='all'` argument, which only drops a row when every field is a missing value.",
"_____no_output_____"
]
],
[
[
"dataDF.dropna(how='all')",
"_____no_output_____"
]
],
[
[
"* This can be customized further by specifying how many values need to be present before a row is dropped via the `thresh` argument.",
"_____no_output_____"
]
],
[
[
"dataDF[2]['c'] = np.nan\ndataDF",
"_____no_output_____"
],
[
"dataDF.dropna(thresh=2)",
"_____no_output_____"
]
],
[
[
"* If we want to drop missing values column-wise instead of row-wise, we use `axis=1`.",
"_____no_output_____"
]
],
[
[
"dataDF[1]['d'] = np.random.randn(1)\ndataDF",
"_____no_output_____"
],
[
"dataDF.dropna(axis=1)",
"_____no_output_____"
]
],
[
[
"* Rather than omitting missing data from an analysis, in some cases it may be suitable to fill the missing value in, either with a default value (such as zero) or a value that is either imputed or carried forward/backward from similar data points. <br>\n<br>\n* We can do this programmatically in Pandas with the `fillna` argument.<br>",
"_____no_output_____"
]
],
[
[
"dataDF",
"_____no_output_____"
],
[
"dataDF.fillna(0)",
"_____no_output_____"
],
[
"dataDF.fillna({2: 1.5, 3:0.50})",
"_____no_output_____"
]
],
[
[
"* Notice that `fillna` by default returns a new object with the desired filling behavior, rather than changing the `Series` or `DataFrame` in place.",
"_____no_output_____"
]
],
[
[
"dataDF",
"_____no_output_____"
]
],
[
[
"* If you don't like this behaviour you can alter values in-place using `inplace=True`.",
"_____no_output_____"
]
],
[
[
"dataDF.fillna({2: 1.5, 3:0.50}, inplace=True)\ndataDF",
"_____no_output_____"
]
],
[
[
"* Missing values can also be interpolated, using any one of a variety of methods:",
"_____no_output_____"
]
],
[
[
"dataDF[2]['c'] = np.nan\ndataDF[3]['d'] = np.nan\ndataDF",
"_____no_output_____"
]
],
[
[
"* We can also propagate non-null values forward or backward.",
"_____no_output_____"
]
],
[
[
"dataDF.fillna(method='ffill')",
"_____no_output_____"
],
[
"dataDF.fillna(dataDF.mean())",
"_____no_output_____"
]
],
[
[
"## Data summarization",
"_____no_output_____"
],
[
"* We often wish to summarize data in `Series` or `DataFrame` objects, so that they can more easily be understood or compared with similar data.<br>\n<br>\n* The NumPy package contains several functions that are useful here, but several summarization or reduction methods are built into Pandas data structures.<br>",
"_____no_output_____"
]
],
[
[
"marvelDF.sum()",
"_____no_output_____"
]
],
[
[
"* Clearly, `sum` is more meaningful for some columns than others.(Total Appearances)<br> ",
"_____no_output_____"
],
[
"* For methods like `mean` for which application to string variables is not just meaningless, but impossible, these columns are automatically exculded:",
"_____no_output_____"
]
],
[
[
"marvelDF.mean()",
"_____no_output_____"
]
],
[
[
"* The important difference between NumPy's functions and Pandas' methods is that Numpy have different functions for handling missing data like 'nansum' but Pandas use same functions.",
"_____no_output_____"
]
],
[
[
"dataDF",
"_____no_output_____"
],
[
"dataDF.mean()",
"_____no_output_____"
]
],
[
[
"* Sometimes we may not want to ignore missing values, and allow the `nan` to propagate.",
"_____no_output_____"
]
],
[
[
"dataDF.mean(skipna=False)",
"_____no_output_____"
]
],
[
[
"* A useful summarization that gives a quick snapshot of multiple statistics for a `Series` or `DataFrame` is `describe`:",
"_____no_output_____"
]
],
[
[
"dataDF.describe()",
"_____no_output_____"
]
],
[
[
"* `describe` can detect non-numeric data and sometimes yield useful information about it.",
"_____no_output_____"
],
[
"## Writing Data to Files",
"_____no_output_____"
],
[
"* Pandas can also export data to a variety of storage formats.<br>\n<br>\n* We will bring your attention to just a couple of these.",
"_____no_output_____"
]
],
[
[
"myDF = populationDF['2000']\nmyDF.to_csv(\"data/roundedPopulation2000.csv\")",
"_____no_output_____"
]
],
[
[
"* The `to_csv` method writes a `DataFrame` to a comma-separated values (csv) file.<br>\n<br>\n* You can specify custom delimiters (via `sep` argument), how missing values are written (via `na_rep` argument), whether the index is writen (via `index` argument), whether the header is included (via `header` argument), among other options.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d04d3f59a34031f62230cf00d84a55d318ab34a4 | 6,058 | ipynb | Jupyter Notebook | src/cnn_hao/main_mimic.ipynb | haozhu233/time_series_prediction | 9fc40ebd76d44217025aa29b63bd620163773902 | [
"MIT"
] | null | null | null | src/cnn_hao/main_mimic.ipynb | haozhu233/time_series_prediction | 9fc40ebd76d44217025aa29b63bd620163773902 | [
"MIT"
] | null | null | null | src/cnn_hao/main_mimic.ipynb | haozhu233/time_series_prediction | 9fc40ebd76d44217025aa29b63bd620163773902 | [
"MIT"
] | null | null | null | 55.577982 | 100 | 0.629416 | [
[
[
"from trainer import train_test_model",
"_____no_output_____"
],
[
"model, train_results, test_results = train_test_model('config_mimic.json')",
"Epoch 1 / 50, train_loss: 0.0102, train_auroc: 0.507, val_loss: 0.0026, val_auroc: 0.499\nEpoch 2 / 50, train_loss: 0.0099, train_auroc: 0.522, val_loss: 0.0026, val_auroc: 0.509\nEpoch 3 / 50, train_loss: 0.0098, train_auroc: 0.530, val_loss: 0.0026, val_auroc: 0.501\nEpoch 4 / 50, train_loss: 0.0098, train_auroc: 0.535, val_loss: 0.0026, val_auroc: 0.491\nEpoch 5 / 50, train_loss: 0.0098, train_auroc: 0.533, val_loss: 0.0026, val_auroc: 0.510\nEpoch 6 / 50, train_loss: 0.0098, train_auroc: 0.541, val_loss: 0.0026, val_auroc: 0.521\nEpoch 7 / 50, train_loss: 0.0098, train_auroc: 0.547, val_loss: 0.0026, val_auroc: 0.522\nEpoch 8 / 50, train_loss: 0.0098, train_auroc: 0.550, val_loss: 0.0026, val_auroc: 0.507\nEpoch 9 / 50, train_loss: 0.0097, train_auroc: 0.567, val_loss: 0.0026, val_auroc: 0.498\nEpoch 10 / 50, train_loss: 0.0097, train_auroc: 0.570, val_loss: 0.0026, val_auroc: 0.509\nEpoch 11 / 50, train_loss: 0.0097, train_auroc: 0.581, val_loss: 0.0026, val_auroc: 0.496\nEpoch 12 / 50, train_loss: 0.0097, train_auroc: 0.587, val_loss: 0.0026, val_auroc: 0.506\nEpoch 13 / 50, train_loss: 0.0096, train_auroc: 0.595, val_loss: 0.0026, val_auroc: 0.500\nEpoch 14 / 50, train_loss: 0.0096, train_auroc: 0.602, val_loss: 0.0026, val_auroc: 0.503\nEpoch 15 / 50, train_loss: 0.0096, train_auroc: 0.605, val_loss: 0.0026, val_auroc: 0.519\nEpoch 16 / 50, train_loss: 0.0095, train_auroc: 0.618, val_loss: 0.0026, val_auroc: 0.506\nEpoch 17 / 50, train_loss: 0.0095, train_auroc: 0.626, val_loss: 0.0026, val_auroc: 0.514\nEpoch 18 / 50, train_loss: 0.0094, train_auroc: 0.639, val_loss: 0.0026, val_auroc: 0.501\nEpoch 19 / 50, train_loss: 0.0094, train_auroc: 0.642, val_loss: 0.0026, val_auroc: 0.497\nEpoch 20 / 50, train_loss: 0.0094, train_auroc: 0.635, val_loss: 0.0027, val_auroc: 0.512\nEpoch 21 / 50, train_loss: 0.0094, train_auroc: 0.646, val_loss: 0.0027, val_auroc: 0.498\nEpoch 22 / 50, train_loss: 0.0094, train_auroc: 0.650, val_loss: 0.0026, val_auroc: 0.498\nEpoch 23 / 50, train_loss: 0.0093, train_auroc: 0.657, val_loss: 0.0026, val_auroc: 0.505\nEpoch 24 / 50, train_loss: 0.0093, train_auroc: 0.666, val_loss: 0.0026, val_auroc: 0.519\nEpoch 25 / 50, train_loss: 0.0093, train_auroc: 0.671, val_loss: 0.0027, val_auroc: 0.509\nEpoch 26 / 50, train_loss: 0.0090, train_auroc: 0.710, val_loss: 0.0027, val_auroc: 0.508\nEpoch 27 / 50, train_loss: 0.0089, train_auroc: 0.724, val_loss: 0.0027, val_auroc: 0.509\nEpoch 28 / 50, train_loss: 0.0089, train_auroc: 0.731, val_loss: 0.0028, val_auroc: 0.509\nEpoch 29 / 50, train_loss: 0.0088, train_auroc: 0.733, val_loss: 0.0028, val_auroc: 0.511\nEpoch 30 / 50, train_loss: 0.0088, train_auroc: 0.731, val_loss: 0.0028, val_auroc: 0.510\nEpoch 31 / 50, train_loss: 0.0088, train_auroc: 0.737, val_loss: 0.0028, val_auroc: 0.510\nEpoch 32 / 50, train_loss: 0.0088, train_auroc: 0.738, val_loss: 0.0028, val_auroc: 0.508\nEpoch 33 / 50, train_loss: 0.0088, train_auroc: 0.739, val_loss: 0.0028, val_auroc: 0.513\nEpoch 34 / 50, train_loss: 0.0087, train_auroc: 0.743, val_loss: 0.0028, val_auroc: 0.510\nEpoch 35 / 50, train_loss: 0.0087, train_auroc: 0.746, val_loss: 0.0029, val_auroc: 0.508\nEpoch 36 / 50, train_loss: 0.0087, train_auroc: 0.747, val_loss: 0.0029, val_auroc: 0.509\nEpoch 37 / 50, train_loss: 0.0087, train_auroc: 0.749, val_loss: 0.0029, val_auroc: 0.508\nEpoch 38 / 50, train_loss: 0.0087, train_auroc: 0.749, val_loss: 0.0029, val_auroc: 0.507\nEpoch 39 / 50, train_loss: 0.0086, train_auroc: 0.752, val_loss: 0.0029, val_auroc: 0.506\nEpoch 40 / 50, train_loss: 0.0086, train_auroc: 0.752, val_loss: 0.0029, val_auroc: 0.506\nEpoch 41 / 50, train_loss: 0.0086, train_auroc: 0.754, val_loss: 0.0029, val_auroc: 0.506\nEpoch 42 / 50, train_loss: 0.0086, train_auroc: 0.757, val_loss: 0.0029, val_auroc: 0.501\nEpoch 43 / 50, train_loss: 0.0086, train_auroc: 0.757, val_loss: 0.0029, val_auroc: 0.502\nEpoch 44 / 50, train_loss: 0.0085, train_auroc: 0.761, val_loss: 0.0030, val_auroc: 0.503\nEpoch 45 / 50, train_loss: 0.0085, train_auroc: 0.761, val_loss: 0.0030, val_auroc: 0.504\nEpoch 46 / 50, train_loss: 0.0085, train_auroc: 0.762, val_loss: 0.0029, val_auroc: 0.504\nEpoch 47 / 50, train_loss: 0.0085, train_auroc: 0.764, val_loss: 0.0030, val_auroc: 0.503\nEpoch 48 / 50, train_loss: 0.0085, train_auroc: 0.764, val_loss: 0.0030, val_auroc: 0.505\nEpoch 49 / 50, train_loss: 0.0085, train_auroc: 0.765, val_loss: 0.0030, val_auroc: 0.505\nEpoch 50 / 50, train_loss: 0.0085, train_auroc: 0.768, val_loss: 0.0030, val_auroc: 0.502\nTest AUROC: 0.534\n"
]
]
] | [
"code"
] | [
[
"code",
"code"
]
] |
d04d4870e6ec5428a66b176a04f8c9106d4c1b56 | 27,743 | ipynb | Jupyter Notebook | pyqstrat/src_nb/trade_bars.ipynb | zouhx11/pyqstrat | 05f0d42ee661ae0a85b7534c15b5536103b17043 | [
"BSD-3-Clause"
] | null | null | null | pyqstrat/src_nb/trade_bars.ipynb | zouhx11/pyqstrat | 05f0d42ee661ae0a85b7534c15b5536103b17043 | [
"BSD-3-Clause"
] | null | null | null | pyqstrat/src_nb/trade_bars.ipynb | zouhx11/pyqstrat | 05f0d42ee661ae0a85b7534c15b5536103b17043 | [
"BSD-3-Clause"
] | null | null | null | 54.079922 | 157 | 0.51631 | [
[
[
"import pandas as pd\nimport numpy as np\nimport IPython.display as dsp\nfrom pyqstrat.pq_utils import zero_to_nan, get_empty_np_value, infer_frequency, resample_trade_bars, has_display, strtup2date\nfrom pyqstrat.plot import TradeBarSeries, TimeSeries, Subplot, Plot\n\nfrom typing import Optional, Sequence, Tuple, Union, Callable\n\n\ndef _sort_trade_bars_key(a: str) -> int:\n sorted_cols = ['timestamp', 'o', 'h', 'l', 'c', 'v', 'vwap']\n if a in sorted_cols:\n return sorted_cols.index(a)\n else:\n return len(sorted_cols)\n \n\ndef sort_trade_bars(columns: Sequence[str]) -> Sequence[str]:\n '''Given a list of column names, sort them in olhcv order'''\n columns = sorted(list(columns)) # Use stable sort to sort columns that we don't know about alphabetically\n return sorted(columns, key=_sort_trade_bars_key)\n \n\nclass TradeBars:\n '''Used to store OHLCV bars. You must at least supply timestamps and close prices. All other fields are optional.\n \n Attributes:\n timestamp: A numpy datetime array with the datetime for each bar. Must be monotonically increasing.\n c: A numpy float array with close prices for the bar.\n o: A numpy float array with open prices . Default None\n h: A numpy float array with high prices. Default None\n l: A numpy float array with high prices. Default None\n v: A numpy integer array with volume for the bar. Default None\n vwap: A numpy float array with the volume weighted average price for the bar. Default None\n '''\n def __init__(self, \n timestamps: np.ndarray, \n c: np.ndarray, \n o: Optional[np.ndarray] = None, \n h: Optional[np.ndarray] = None, \n l: Optional[np.ndarray] = None,\n v: Optional[np.ndarray] = None, \n vwap: Optional[np.ndarray] = None) -> None:\n '''Zeroes in o, h, l, c are set to nan'''\n assert(len(timestamps) > 1)\n assert(len(c) == len(timestamps))\n assert(o is None or len(o) == len(timestamps))\n assert(h is None or len(h) == len(timestamps))\n assert(l is None or len(l) == len(timestamps))\n assert(v is None or len(v) == len(timestamps))\n assert(vwap is None or len(vwap) == len(timestamps))\n \n if not np.all(np.diff(timestamps).astype(np.float) > 0): # check for monotonically increasing timestamps\n raise Exception('timestamps must be unique monotonically increasing')\n self.timestamps, self.o, self.h, self.l, self.c, self.v, self.vwap = timestamps, o, h, l, c, v, vwap\n \n for field in ['timestamps', 'h', 'l', 'c', 'v', 'vwap']:\n v = getattr(self, field)\n if isinstance(v, pd.Series):\n setattr(self, field, v.values)\n \n for field in ['o', 'h', 'l', 'c']:\n setattr(self, field, zero_to_nan(getattr(self, field)))\n \n self._set_valid_rows()\n \n def add_timestamps(self, timestamps: np.ndarray) -> None:\n '''\n Adds new timestamps to a market data object.\n \n Args:\n timestamps (np.array of np.datetime64): New timestamps to add. Does not have to be sorted or unique\n \n >>> timestamps = np.array(['2018-01-05', '2018-01-09', '2018-01-10'], dtype = 'M8[ns]')\n >>> c = np.array([8.1, 8.2, 8.3])\n >>> o = np.array([9, 10, 11])\n >>> trade_bar = TradeBars(timestamps, c, o)\n >>> new_timestamps = np.array(['2018-01-07', '2018-01-09'], dtype = 'M8[ns]')\n >>> trade_bar.add_timestamps(new_timestamps)\n >>> print(trade_bar.timestamps)\n ['2018-01-05T00:00:00.000000000' '2018-01-07T00:00:00.000000000'\n '2018-01-09T00:00:00.000000000' '2018-01-10T00:00:00.000000000']\n >>> np.set_printoptions(formatter = {'float': lambda x: f'{x:.4f}'}) # After numpy 1.13 positive floats don't have a leading space for sign\n >>> print(trade_bar.o, trade_bar.c)\n [9.0000 nan 10.0000 11.0000] [8.1000 nan 8.2000 8.3000]\n '''\n if timestamps is None or len(timestamps) == 0: return\n timestamps = np.unique(timestamps)\n new_timestamps = np.setdiff1d(timestamps, self.timestamps, assume_unique=True)\n all_timestamps = np.concatenate([self.timestamps, new_timestamps])\n col_list = ['o', 'h', 'l', 'c', 'vwap']\n sort_index = all_timestamps.argsort()\n for col in col_list:\n v = getattr(self, col)\n if v is None: continue\n dtype = getattr(self, col).dtype\n fill_value = get_empty_np_value(dtype)\n v = np.concatenate([v, np.full(len(new_timestamps), fill_value, dtype=dtype)])\n v = v[sort_index]\n setattr(self, col, v)\n self.timestamps = np.sort(all_timestamps)\n self._set_valid_rows\n \n def _get_fill_value(self, col_name: str) -> np.generic:\n dtype = getattr(self, col_name).dtype\n return get_empty_np_value(dtype)\n \n def _set_valid_rows(self) -> None:\n col_list = [col for col in [self.o, self.h, self.l, self.c, self.vwap] if col is not None]\n nans = np.any(np.isnan(col_list), axis=0)\n self.valid_rows = ~nans\n \n def valid_row(self, i: int) -> bool:\n '''Return True if the row with index i has no nans in it.'''\n return self.valid_rows[i]\n \n def resample(self, sampling_frequency: str) -> Optional['TradeBars']:\n '''\n Downsample the trade bars data into a new bar frequency\n \n Args:\n sampling_frequency: See sampling frequency in pandas\n '''\n if sampling_frequency is None:\n return self\n \n df = self.df()\n # Rename timestamps to timestamp\n df.index.name = 'timestamp'\n\n df = resample_trade_bars(df, sampling_frequency)\n o = df.o if 'o' in df.columns else None\n h = df.h if 'h' in df.columns else None\n _l = df.l if 'l' in df.columns else None\n v = df.v if 'v' in df.columns else None\n vwap = df.vwap if 'vwap' in df.columns else None\n \n trade_bar = TradeBars(df.timestamp, df.c, o, h, _l, v, vwap)\n \n trade_bar._set_valid_rows()\n \n return trade_bar\n \n def errors(self, display: bool = True) -> Optional[pd.DataFrame]:\n '''Returns a dataframe indicating any highs that are lower than opens, closes, lows or lows that are higher than other columns\n Also includes any ohlcv values that are negative\n '''\n df = self.df()\n errors_list = []\n if 'h' in df.columns:\n bad_highs = df[(df.h < df.c) | (df.h < df.o)]\n if len(bad_highs): \n bad_highs.insert(len(df.columns), 'error', 'bad high')\n errors_list.append(bad_highs)\n if 'l' in df.columns:\n bad_lows = df[(df.l > df.c) | (df.l > df.o)]\n if len(bad_lows): \n bad_lows.insert(len(df.columns), 'error', 'bad low')\n errors_list.append(bad_lows)\n\n neg_values_mask = (df.c < 0)\n for col in ['o', 'h', 'l', 'c', 'v', 'vwap']:\n if col in df.columns:\n neg_values_mask |= (df[col] < 0)\n neg_values = df[neg_values_mask]\n if len(neg_values): \n neg_values.insert(len(df.columns), 'error', 'negative values')\n errors_list.append(neg_values)\n \n if not len(errors_list): return None\n \n df = pd.concat(errors_list)\n df = df[sort_trade_bars(df.columns)]\n \n if display: dsp.display(df)\n return df\n \n def warnings(self, warn_std: int = 10, display: bool = True) -> pd.DataFrame:\n '''Returns a dataframe indicating any values where the bar over bar change is more than warn_std standard deviations.\n \n Args:\n warn_std: Number of standard deviations to use as a threshold (default 10)\n display: Whether to print out the warning dataframe as well as returning it\n '''\n df = self.df()\n warnings_list = []\n\n for col in ['o', 'h', 'l', 'c', 'vwap']:\n if col in df.columns:\n ret = np.abs(df[col].pct_change())\n std = ret.std()\n mask = ret > warn_std * std\n df_tmp = df[mask]\n if len(df_tmp):\n double_mask = mask | mask.shift(-1) # Add the previous row so we know the two values computing a return\n df_tmp = df[double_mask]\n df_tmp.insert(len(df_tmp.columns), 'ret', ret[mask])\n df_tmp.insert(len(df_tmp.columns), 'warning', f'{col} ret > {warn_std} * std: {std:.5g}')\n warnings_list.append(df_tmp)\n\n if not len(warnings_list): return None\n df = pd.concat(warnings_list)\n df = df[sort_trade_bars(df.columns)]\n if display: dsp.display(df)\n return df\n \n def overview(self, display: bool = True) -> pd.DataFrame:\n '''Returns a dataframe showing basic information about the data, including count, number and percent missing, min, max\n \n Args:\n display: Whether to print out the warning dataframe as well as returning it\n '''\n df = self.df().reset_index()\n df_overview = pd.DataFrame({'count': len(df), \n 'num_missing': df.isnull().sum(), \n 'pct_missing': df.isnull().sum() / len(df), \n 'min': df.min(), \n 'max': df.max()})\n df_overview = df_overview.T\n df_overview = df_overview[sort_trade_bars(df_overview.columns)]\n if display: dsp.display(df_overview)\n return df_overview\n \n def time_distribution(self, \n frequency: str = '15 minutes', \n display: bool = True, \n plot: bool = True, \n figsize: Optional[Tuple[int, int]] = None) -> pd.DataFrame:\n '''\n Return a dataframe with the time distribution of the bars\n \n Args:\n frequency: The width of each bin (default \"15 minutes\"). You can use hours or days as well.\n display: Whether to display the data in addition to returning it.\n plot: Whether to plot the data in addition to returning it.\n figsize: If plot is set, optional figure size for the plot (default (20,8))\n '''\n group_col = None\n \n n = int(frequency.split(' ')[0])\n freq = frequency.split(' ')[1]\n \n df = self.df().reset_index()\n \n if freq == 'minutes' or freq == 'mins' or freq == 'min':\n group_col = [df.date.dt.hour, df.date.dt.minute // n * n]\n names = ['hour', 'minute']\n elif freq == 'hours' or freq == 'hrs' or freq == 'hr':\n group_col = [df.date.dt.weekday_name, df.date.dt.hour // n * n]\n names = ['weekday', 'hour']\n elif freq == 'weekdays' or freq == 'days' or freq == 'day':\n group_col = df.date.dt.weekday_name // n * n\n names = ['weekday']\n else:\n raise Exception(f'unknown time freq: {freq}')\n \n count = df.groupby(group_col)['c'].count()\n tdf = pd.DataFrame({'close_count': count, 'count_pct': count / df.c.count()})[['close_count', 'count_pct']]\n \n if 'v' in df.columns:\n vsum = df.groupby(group_col)['v'].sum()\n vdf = pd.DataFrame({'volume': vsum, 'volume_pct': vsum / df.v.sum()})[['volume', 'volume_pct']]\n tdf = pd.concat([vdf, tdf], axis=1)\n \n tdf.index.names = names\n \n if display:\n dsp.display(tdf)\n \n if plot:\n if not figsize: figsize = (20, 8)\n cols = ['close_count', 'volume'] if 'v' in df.columns else ['close_count']\n if not has_display():\n print('no display found, cannot plot time distribution')\n return tdf\n tdf[cols].plot(figsize=figsize, kind='bar', subplots=True, title='Time Distribution')\n \n return tdf\n \n def freq_str(self) -> str:\n \n freq = infer_frequency(self.timestamps)\n if freq < 1:\n freq_str = f'{round(freq * 24. * 60, 2)} minutes'\n else:\n freq_str = f'{freq} days'\n return freq_str\n \n def describe(self, \n warn_std: int = 10, \n time_distribution_frequency: str = '15 min', \n print_time_distribution: bool = False) -> None:\n '''\n Describe the bars. Shows an overview, errors and warnings for the bar data. This is a good function to use \n before running any backtests on a set of bar data.\n \n Args:\n warn_std: See warning function\n time_distribution_frequency: See time_distribution function\n print_time_distribution: Whether to print the time distribution in addition to plotting it.\n '''\n print(f'Inferred Frequency: {self.freq_str()}')\n self.overview()\n print('Errors:')\n self.errors()\n print('Warnings:')\n self.warnings(warn_std=warn_std)\n print('Time distribution:')\n self.time_distribution(display=print_time_distribution, frequency=time_distribution_frequency)\n \n def has_ohlc(self) -> bool:\n '''\n Returns True if we have all ohlc columns and none are empty\n '''\n return not (self.o is None or self.h is None or self.l is None or self.c is None)\n\n def plot(self,\n figsize: Tuple[int, int] = (15, 8),\n date_range: Optional[Union[Tuple[str, str], Tuple[np.datetime64, np.datetime64]]] = None,\n sampling_frequency: str = None,\n title: str = 'Price / Volume') -> None:\n '''\n Plot a candlestick or line plot depending on whether we have ohlc data or just close prices\n \n Args:\n figsize: Size of the figure (default (15,8))\n date_range: A tuple of strings or numpy datetimes for plotting a smaller sample of the data, e.g. (\"2018-01-01\", \"2018-01-06\")\n sampling_frequency: Downsample before plotting. See pandas frequency strings for possible values.\n title: Title of the graph, default \"Price / Volume\"\n '''\n if date_range and isinstance(date_range[0], str):\n date_range = strtup2date(date_range)\n data: Union[TradeBarSeries, TimeSeries]\n if self.has_ohlc():\n data = TradeBarSeries('price', self.timestamps, self.o, self.h, self.l, self.c, self.v, self.vwap)\n else:\n data = TimeSeries('price', self.timestamps, self.c)\n subplot = Subplot(data)\n plot = Plot([subplot], figsize=figsize, date_range=date_range, sampling_frequency=sampling_frequency, title=title)\n plot.draw()\n \n def df(self, \n start_date: Optional[np.datetime64] = None, \n end_date: Optional[np.datetime64] = None) -> pd.DataFrame:\n df = pd.DataFrame({'date': self.timestamps, 'c': self.c}).set_index('date')\n for tup in [('o', self.o), ('h', self.h), ('l', self.l), ('v', self.v), ('vwap', self.vwap)]:\n if tup[1] is not None: df.insert(0, tup[0], tup[1])\n if start_date: df = df[df.index.values >= start_date]\n if end_date: df = df[df.index.values <= end_date]\n return df\n \n \ndef roll_futures(fut_prices: pd.DataFrame, \n date_func: Callable[[pd.DataFrame], np.ndarray], \n condition_func: Callable[[pd.DataFrame], np.ndarray], \n expiries: pd.DataFrame = None,\n return_full_df: bool = False) -> pd.DataFrame:\n '''Construct a continuous futures dataframe with one row per datetime given rolling logic\n \n Args:\n fut_prices: A dataframe containing the columns 'date', 'series', and any other market data, \n for example, ohlcv data. Date can contain time for sub-daily bars. \n The series column must contain a different string name for each futures series, e.g. SEP2018, DEC2018, etc.\n date_func: A function that takes the future prices as an input and returns a numpy array of booleans\n True indicates that the future should be rolled on this date if the condition specified in condition_func is met.\n This function can assume that we have all the columns in the original market data object plus the same \n columns suffixed with _next for the potential series to roll over to.\n condition_func: A function that takes the future prices as input and returns a numpy array of booleans.\n True indicates that we should try to roll the future at that row.\n expiries: An optional dataframe with 2 columns, 'series' and 'expiry'. This should have one row per future series \n indicating that future's expiry date.\n If you don't pass this in, the function will assume that the expiry column is present in the original dataframe.\n return_full_df: If set, will return the datframe without removing extra timestamps so you can use your own logic for rolling, \n including the _next columns and the roll flag\n \n Returns:\n A pandas DataFrame with one row per date, which contains the columns in the original md DataFrame and the same columns suffixed with _next \n representing the series we want to roll to. There is also a column called roll_flag which is set to True whenever \n the date and roll condition functions are met.\n\n \n >>> fut_prices = pd.DataFrame({'timestamp': np.concatenate((np.arange(np.datetime64('2018-03-11'), np.datetime64('2018-03-16')),\n ... np.arange(np.datetime64('2018-03-11'), np.datetime64('2018-03-16')))),\n ... 'c': [10, 10.1, 10.2, 10.3, 10.4] + [10.35, 10.45, 10.55, 10.65, 10.75],\n ... 'v': [200, 200, 150, 100, 100] + [100, 50, 200, 250, 300],\n ... 'series': ['MAR2018'] * 5 + ['JUN2018'] * 5})[['timestamp','series', 'c', 'v']]\n >>> expiries = pd.Series(np.array(['2018-03-15', '2018-06-15'], dtype = 'M8[D]'), index = ['MAR2018', 'JUN2018'], name = \"expiry\")\n >>> date_func = lambda fut_prices: fut_prices.expiry - fut_prices.timestamp <= np.timedelta64(3, 'D')\n >>> condition_func = lambda fut_prices: fut_prices.v_next > fut_prices.v\n >>> df = roll_futures(fut_prices, date_func, condition_func, expiries)\n >>> print(df[df.series == 'MAR2018'].timestamp.max() == np.datetime64('2018-03-14'))\n True\n >>> print(df[df.series == 'JUN2018'].timestamp.max() == np.datetime64('2018-03-15'))\n True\n '''\n if 'timestamp' not in fut_prices.columns or 'series' not in fut_prices.columns:\n raise Exception('timestamp or series not found in columns: {fut_prices.columns}')\n \n if expiries is not None:\n expiries = expiries.to_frame(name='expiry')\n fut_prices = pd.merge(fut_prices, expiries, left_on=['series'], right_index=True, how='left')\n else:\n if 'expiry' not in fut_prices.columns: raise Exception('expiry column must be present in market data if expiries argument is not specified')\n expiries = fut_prices[['series', 'expiry']].drop_duplicates().sort_values(by='expiry').set_index('s')\n\n expiries = pd.merge(expiries, expiries.shift(-1), left_index=True, right_index=True, how='left', suffixes=['', '_next'])\n\n orig_cols = [col for col in fut_prices.columns if col not in ['timestamp']]\n fut_prices1 = pd.merge(fut_prices, expiries[['expiry', 'expiry_next']], on=['expiry'], how='left')\n fut_prices = pd.merge(fut_prices1, fut_prices, left_on=['timestamp', 'expiry_next'],\n right_on=['timestamp', 'expiry'], how='left', suffixes=['', '_next'])\n\n fut_prices = fut_prices.sort_values(by=['expiry', 'timestamp'])\n\n roll_flag = date_func(fut_prices) & condition_func(fut_prices) \n\n df_roll = pd.DataFrame({'series': fut_prices.series, 'timestamp': fut_prices.timestamp, 'roll_flag': roll_flag})\n df_roll = df_roll[df_roll.roll_flag].groupby('series', as_index=False).first()\n fut_prices = pd.merge(fut_prices, df_roll, on=['series', 'timestamp'], how='left')\n fut_prices.roll_flag = fut_prices.roll_flag.fillna(False)\n \n cols = ['timestamp'] + orig_cols + [col + '_next' for col in orig_cols] + ['roll_flag']\n fut_prices = fut_prices[cols]\n \n if return_full_df: return fut_prices\n \n df_list = []\n for series, g in fut_prices.groupby('expiry'):\n roll_flag = g.roll_flag\n true_values = roll_flag[roll_flag]\n if len(true_values):\n first_true_index = true_values.index[0]\n roll_flag = roll_flag[first_true_index:]\n false_after_true_values = roll_flag[~roll_flag]\n if len(false_after_true_values):\n first_false_after_true_idx = false_after_true_values.index[0]\n g = g.loc[:first_false_after_true_idx]\n df_list.append(g)\n\n full_df = pd.concat(df_list)\n full_df = full_df.sort_values(by=['expiry', 'timestamp']).drop_duplicates(subset=['timestamp'])\n\n return full_df\n\n\ndef test_trade_bars() -> None:\n from datetime import datetime, timedelta\n np.random.seed(0)\n timestamps = np.arange(datetime(2018, 1, 1, 9, 0, 0), datetime(2018, 3, 1, 16, 0, 0), timedelta(minutes=5))\n timestamps = np.array([dt for dt in timestamps.astype(object) if dt.hour >= 9 and dt.hour <= 16]).astype('M8[m]')\n rets = np.random.normal(size=len(timestamps)) / 1000\n c_0 = 100\n c = np.round(c_0 * np.cumprod(1 + rets), 2)\n _l = np.round(c * (1. - np.abs(np.random.random(size=len(timestamps)) / 1000.)), 2) # PEP8 thinks l is hard to distinguish\n h = np.round(c * (1. + np.abs(np.random.random(size=len(timestamps)) / 1000.)), 2)\n o = np.round(_l + (h - _l) * np.random.random(size=len(timestamps)), 2)\n v = np.abs(np.round(np.random.normal(size=len(timestamps)) * 1000))\n vwap = 0.5 * (_l + h)\n c[18] = np.nan\n _l[85] = 1000\n trade_bar = TradeBars(timestamps, c, o, h, _l, v, vwap)\n trade_bar.describe()\n trade_bar.plot(date_range=('2018-01-02', '2018-01-02 12:00'))\n\n\nif __name__ == \"__main__\":\n test_trade_bars()\n import doctest\n doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code"
]
] |
d04d66e4798f2e63de140d634d81ab524c7f1bdb | 213,467 | ipynb | Jupyter Notebook | experiments/cnn_1/oracle.run2/trials/9/trial.ipynb | stevester94/csc500-notebooks | 4c1b04c537fe233a75bed82913d9d84985a89177 | [
"MIT"
] | null | null | null | experiments/cnn_1/oracle.run2/trials/9/trial.ipynb | stevester94/csc500-notebooks | 4c1b04c537fe233a75bed82913d9d84985a89177 | [
"MIT"
] | null | null | null | experiments/cnn_1/oracle.run2/trials/9/trial.ipynb | stevester94/csc500-notebooks | 4c1b04c537fe233a75bed82913d9d84985a89177 | [
"MIT"
] | null | null | null | 93.584831 | 74,908 | 0.795453 | [
[
[
"import os, json, sys, time, random\nimport numpy as np\nimport torch\nfrom easydict import EasyDict\nfrom math import floor\nfrom easydict import EasyDict\n\nfrom steves_utils.vanilla_train_eval_test_jig import Vanilla_Train_Eval_Test_Jig\n\nfrom steves_utils.torch_utils import get_dataset_metrics, independent_accuracy_assesment\nfrom steves_models.configurable_vanilla import Configurable_Vanilla\nfrom steves_utils.torch_sequential_builder import build_sequential\nfrom steves_utils.lazy_map import Lazy_Map\nfrom steves_utils.sequence_aggregator import Sequence_Aggregator\n\nfrom steves_utils.stratified_dataset.traditional_accessor import Traditional_Accessor_Factory\n\nfrom steves_utils.cnn_do_report import (\n get_loss_curve,\n get_results_table,\n get_parameters_table,\n get_domain_accuracies,\n)\n\nfrom steves_utils.torch_utils import (\n confusion_by_domain_over_dataloader,\n independent_accuracy_assesment\n)\n\nfrom steves_utils.utils_v2 import (\n per_domain_accuracy_from_confusion,\n get_datasets_base_path\n)\n\n# from steves_utils.ptn_do_report import TBD",
"_____no_output_____"
],
[
"required_parameters = {\n \"experiment_name\",\n \"lr\",\n \"device\",\n \"dataset_seed\",\n \"seed\",\n \"labels\",\n \"domains_target\",\n \"domains_source\",\n \"num_examples_per_domain_per_label_source\",\n \"num_examples_per_domain_per_label_target\",\n \"batch_size\",\n \"n_epoch\",\n \"patience\",\n \"criteria_for_best\",\n \"normalize_source\",\n \"normalize_target\",\n \"x_net\",\n \"NUM_LOGS_PER_EPOCH\",\n \"BEST_MODEL_PATH\",\n \"pickle_name_source\",\n \"pickle_name_target\",\n \"torch_default_dtype\",\n}",
"_____no_output_____"
],
[
"from steves_utils.ORACLE.utils_v2 import (\n ALL_SERIAL_NUMBERS,\n ALL_DISTANCES_FEET_NARROWED,\n)\n\nstandalone_parameters = {}\nstandalone_parameters[\"experiment_name\"] = \"MANUAL CORES CNN\"\nstandalone_parameters[\"lr\"] = 0.0001\nstandalone_parameters[\"device\"] = \"cuda\"\n\nstandalone_parameters[\"dataset_seed\"] = 1337\nstandalone_parameters[\"seed\"] = 1337\nstandalone_parameters[\"labels\"] = ALL_SERIAL_NUMBERS\n\nstandalone_parameters[\"domains_source\"] = [8,32,50]\nstandalone_parameters[\"domains_target\"] = [14,20,26,38,44,]\n\nstandalone_parameters[\"num_examples_per_domain_per_label_source\"]=-1\nstandalone_parameters[\"num_examples_per_domain_per_label_target\"]=-1\n\nstandalone_parameters[\"pickle_name_source\"] = \"oracle.Run1_framed_2000Examples_stratified_ds.2022A.pkl\"\nstandalone_parameters[\"pickle_name_target\"] = \"oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl\"\n\nstandalone_parameters[\"torch_default_dtype\"] = \"torch.float32\" \n\nstandalone_parameters[\"batch_size\"]=128\n\nstandalone_parameters[\"n_epoch\"] = 3\n\nstandalone_parameters[\"patience\"] = 10\n\nstandalone_parameters[\"criteria_for_best\"] = \"target_accuracy\"\nstandalone_parameters[\"normalize_source\"] = False\nstandalone_parameters[\"normalize_target\"] = False\n\nstandalone_parameters[\"x_net\"] = [\n {\"class\": \"nnReshape\", \"kargs\": {\"shape\":[-1, 1, 2, 256]}},\n {\"class\": \"Conv2d\", \"kargs\": { \"in_channels\":1, \"out_channels\":256, \"kernel_size\":(1,7), \"bias\":False, \"padding\":(0,3), },},\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm2d\", \"kargs\": {\"num_features\":256}},\n\n {\"class\": \"Conv2d\", \"kargs\": { \"in_channels\":256, \"out_channels\":80, \"kernel_size\":(2,7), \"bias\":True, \"padding\":(0,3), },},\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm2d\", \"kargs\": {\"num_features\":80}},\n {\"class\": \"Flatten\", \"kargs\": {}},\n\n {\"class\": \"Linear\", \"kargs\": {\"in_features\": 80*256, \"out_features\": 256}}, # 80 units per IQ pair\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm1d\", \"kargs\": {\"num_features\":256}},\n\n {\"class\": \"Linear\", \"kargs\": {\"in_features\": 256, \"out_features\": len(standalone_parameters[\"labels\"])}},\n]\n\nstandalone_parameters[\"NUM_LOGS_PER_EPOCH\"] = 10\nstandalone_parameters[\"BEST_MODEL_PATH\"] = \"./best_model.pth\"",
"_____no_output_____"
],
[
"# Parameters\nparameters = {\n \"experiment_name\": \"cnn_1:oracle.run2\",\n \"labels\": [\n \"3123D52\",\n \"3123D65\",\n \"3123D79\",\n \"3123D80\",\n \"3123D54\",\n \"3123D70\",\n \"3123D7B\",\n \"3123D89\",\n \"3123D58\",\n \"3123D76\",\n \"3123D7D\",\n \"3123EFE\",\n \"3123D64\",\n \"3123D78\",\n \"3123D7E\",\n \"3124E4A\",\n ],\n \"domains_source\": [8, 32, 50],\n \"domains_target\": [14, 20, 26, 38, 44],\n \"pickle_name_source\": \"oracle.Run2_10kExamples_stratified_ds.2022A.pkl\",\n \"pickle_name_target\": \"oracle.Run2_10kExamples_stratified_ds.2022A.pkl\",\n \"device\": \"cuda\",\n \"lr\": 0.0001,\n \"batch_size\": 128,\n \"normalize_source\": False,\n \"normalize_target\": False,\n \"num_examples_per_domain_per_label_source\": -1,\n \"num_examples_per_domain_per_label_target\": -1,\n \"torch_default_dtype\": \"torch.float32\",\n \"n_epoch\": 50,\n \"patience\": 3,\n \"criteria_for_best\": \"target_accuracy\",\n \"x_net\": [\n {\"class\": \"nnReshape\", \"kargs\": {\"shape\": [-1, 1, 2, 256]}},\n {\n \"class\": \"Conv2d\",\n \"kargs\": {\n \"in_channels\": 1,\n \"out_channels\": 256,\n \"kernel_size\": [1, 7],\n \"bias\": False,\n \"padding\": [0, 3],\n },\n },\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm2d\", \"kargs\": {\"num_features\": 256}},\n {\n \"class\": \"Conv2d\",\n \"kargs\": {\n \"in_channels\": 256,\n \"out_channels\": 80,\n \"kernel_size\": [2, 7],\n \"bias\": True,\n \"padding\": [0, 3],\n },\n },\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm2d\", \"kargs\": {\"num_features\": 80}},\n {\"class\": \"Flatten\", \"kargs\": {}},\n {\"class\": \"Linear\", \"kargs\": {\"in_features\": 20480, \"out_features\": 256}},\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm1d\", \"kargs\": {\"num_features\": 256}},\n {\"class\": \"Linear\", \"kargs\": {\"in_features\": 256, \"out_features\": 16}},\n ],\n \"NUM_LOGS_PER_EPOCH\": 10,\n \"BEST_MODEL_PATH\": \"./best_model.pth\",\n \"dataset_seed\": 7,\n \"seed\": 7,\n}\n",
"_____no_output_____"
],
[
"# Set this to True if you want to run this template directly\nSTANDALONE = False\nif STANDALONE:\n print(\"parameters not injected, running with standalone_parameters\")\n parameters = standalone_parameters\n\nif not 'parameters' in locals() and not 'parameters' in globals():\n raise Exception(\"Parameter injection failed\")\n\n#Use an easy dict for all the parameters\np = EasyDict(parameters)\n\nsupplied_keys = set(p.keys())\n\nif supplied_keys != required_parameters:\n print(\"Parameters are incorrect\")\n if len(supplied_keys - required_parameters)>0: print(\"Shouldn't have:\", str(supplied_keys - required_parameters))\n if len(required_parameters - supplied_keys)>0: print(\"Need to have:\", str(required_parameters - supplied_keys))\n raise RuntimeError(\"Parameters are incorrect\")\n\n",
"_____no_output_____"
],
[
"###################################\n# Set the RNGs and make it all deterministic\n###################################\nnp.random.seed(p.seed)\nrandom.seed(p.seed)\ntorch.manual_seed(p.seed)\n\ntorch.use_deterministic_algorithms(True) ",
"_____no_output_____"
],
[
"torch.set_default_dtype(eval(p.torch_default_dtype))",
"_____no_output_____"
],
[
"###################################\n# Build the network(s)\n# Note: It's critical to do this AFTER setting the RNG\n###################################\nx_net = build_sequential(p.x_net)",
"_____no_output_____"
],
[
"start_time_secs = time.time()",
"_____no_output_____"
],
[
"def wrap_in_dataloader(p, ds):\n return torch.utils.data.DataLoader(\n ds,\n batch_size=p.batch_size,\n shuffle=True,\n num_workers=1,\n persistent_workers=True,\n prefetch_factor=50,\n pin_memory=True\n )\n\ntaf_source = Traditional_Accessor_Factory(\n labels=p.labels,\n domains=p.domains_source,\n num_examples_per_domain_per_label=p.num_examples_per_domain_per_label_source,\n pickle_path=os.path.join(get_datasets_base_path(), p.pickle_name_source),\n seed=p.dataset_seed\n)\ntrain_original_source, val_original_source, test_original_source = \\\n taf_source.get_train(), taf_source.get_val(), taf_source.get_test()\n\n\ntaf_target = Traditional_Accessor_Factory(\n labels=p.labels,\n domains=p.domains_target,\n num_examples_per_domain_per_label=p.num_examples_per_domain_per_label_source,\n pickle_path=os.path.join(get_datasets_base_path(), p.pickle_name_target),\n seed=p.dataset_seed\n)\ntrain_original_target, val_original_target, test_original_target = \\\n taf_target.get_train(), taf_target.get_val(), taf_target.get_test()\n\n\n# For CNN We only use X and Y. And we only train on the source.\n# Properly form the data using a transform lambda and Lazy_Map. Finally wrap them in a dataloader\n\ntransform_lambda = lambda ex: ex[:2] # Strip the tuple to just (x,y)\n\n\ntrain_processed_source = wrap_in_dataloader(\n p,\n Lazy_Map(train_original_source, transform_lambda)\n)\nval_processed_source = wrap_in_dataloader(\n p,\n Lazy_Map(val_original_source, transform_lambda)\n)\ntest_processed_source = wrap_in_dataloader(\n p,\n Lazy_Map(test_original_source, transform_lambda)\n)\n\ntrain_processed_target = wrap_in_dataloader(\n p,\n Lazy_Map(train_original_target, transform_lambda)\n)\nval_processed_target = wrap_in_dataloader(\n p,\n Lazy_Map(val_original_target, transform_lambda)\n)\ntest_processed_target = wrap_in_dataloader(\n p,\n Lazy_Map(test_original_target, transform_lambda)\n)\n\n\n\ndatasets = EasyDict({\n \"source\": {\n \"original\": {\"train\":train_original_source, \"val\":val_original_source, \"test\":test_original_source},\n \"processed\": {\"train\":train_processed_source, \"val\":val_processed_source, \"test\":test_processed_source}\n },\n \"target\": {\n \"original\": {\"train\":train_original_target, \"val\":val_original_target, \"test\":test_original_target},\n \"processed\": {\"train\":train_processed_target, \"val\":val_processed_target, \"test\":test_processed_target}\n },\n})",
"_____no_output_____"
],
[
"ep = next(iter(test_processed_target))\nep[0].dtype",
"_____no_output_____"
],
[
"model = Configurable_Vanilla(\n x_net=x_net,\n label_loss_object=torch.nn.NLLLoss(),\n learning_rate=p.lr\n)",
"_____no_output_____"
],
[
"jig = Vanilla_Train_Eval_Test_Jig(\n model=model,\n path_to_best_model=p.BEST_MODEL_PATH,\n device=p.device,\n label_loss_object=torch.nn.NLLLoss(),\n)\n\njig.train(\n train_iterable=datasets.source.processed.train,\n source_val_iterable=datasets.source.processed.val,\n target_val_iterable=datasets.target.processed.val,\n patience=p.patience,\n num_epochs=p.n_epoch,\n num_logs_per_epoch=p.NUM_LOGS_PER_EPOCH,\n criteria_for_best=p.criteria_for_best\n)",
"epoch: 1, [batch: 1 / 2625], examples_per_second: 548.1569, train_label_loss: 2.7749, \n"
],
[
"total_experiment_time_secs = time.time() - start_time_secs",
"_____no_output_____"
],
[
"source_test_label_accuracy, source_test_label_loss = jig.test(datasets.source.processed.test)\ntarget_test_label_accuracy, target_test_label_loss = jig.test(datasets.target.processed.test)\n\nsource_val_label_accuracy, source_val_label_loss = jig.test(datasets.source.processed.val)\ntarget_val_label_accuracy, target_val_label_loss = jig.test(datasets.target.processed.val)\n\nhistory = jig.get_history()\n\ntotal_epochs_trained = len(history[\"epoch_indices\"])\n\nval_dl = wrap_in_dataloader(p, Sequence_Aggregator((datasets.source.original.val, datasets.target.original.val)))\n\nconfusion = confusion_by_domain_over_dataloader(model, p.device, val_dl, forward_uses_domain=False)\nper_domain_accuracy = per_domain_accuracy_from_confusion(confusion)\n\n# Add a key to per_domain_accuracy for if it was a source domain\nfor domain, accuracy in per_domain_accuracy.items():\n per_domain_accuracy[domain] = {\n \"accuracy\": accuracy,\n \"source?\": domain in p.domains_source\n }\n\n# Do an independent accuracy assesment JUST TO BE SURE!\n# _source_test_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.test, p.device)\n# _target_test_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.test, p.device)\n# _source_val_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.val, p.device)\n# _target_val_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.val, p.device)\n\n# assert(_source_test_label_accuracy == source_test_label_accuracy)\n# assert(_target_test_label_accuracy == target_test_label_accuracy)\n# assert(_source_val_label_accuracy == source_val_label_accuracy)\n# assert(_target_val_label_accuracy == target_val_label_accuracy)\n\n###################################\n# Write out the results\n###################################\n\nexperiment = {\n \"experiment_name\": p.experiment_name,\n \"parameters\": p,\n \"results\": {\n \"source_test_label_accuracy\": source_test_label_accuracy,\n \"source_test_label_loss\": source_test_label_loss,\n \"target_test_label_accuracy\": target_test_label_accuracy,\n \"target_test_label_loss\": target_test_label_loss,\n \"source_val_label_accuracy\": source_val_label_accuracy,\n \"source_val_label_loss\": source_val_label_loss,\n \"target_val_label_accuracy\": target_val_label_accuracy,\n \"target_val_label_loss\": target_val_label_loss,\n \"total_epochs_trained\": total_epochs_trained,\n \"total_experiment_time_secs\": total_experiment_time_secs,\n \"confusion\": confusion,\n \"per_domain_accuracy\": per_domain_accuracy,\n },\n \"history\": history,\n \"dataset_metrics\": get_dataset_metrics(datasets, \"cnn\"),\n}",
"_____no_output_____"
],
[
"get_loss_curve(experiment)",
"_____no_output_____"
],
[
"get_results_table(experiment)",
"_____no_output_____"
],
[
"get_domain_accuracies(experiment)",
"_____no_output_____"
],
[
"print(\"Source Test Label Accuracy:\", experiment[\"results\"][\"source_test_label_accuracy\"], \"Target Test Label Accuracy:\", experiment[\"results\"][\"target_test_label_accuracy\"])\nprint(\"Source Val Label Accuracy:\", experiment[\"results\"][\"source_val_label_accuracy\"], \"Target Val Label Accuracy:\", experiment[\"results\"][\"target_val_label_accuracy\"])",
"Source Test Label Accuracy: 0.8225555555555556 Target Test Label Accuracy: 0.087825\nSource Val Label Accuracy: 0.8238888888888889 Target Val Label Accuracy: 0.08761666666666666\n"
],
[
"json.dumps(experiment)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d04d7335542800ef0152ee7e3b8ade34a18610aa | 3,814 | ipynb | Jupyter Notebook | S6_transfer_function_equivalance.ipynb | torebutlin/PartIIA-3C6 | c586885e9876b3af22c9d3cab65933c9b1a23f48 | [
"MIT"
] | null | null | null | S6_transfer_function_equivalance.ipynb | torebutlin/PartIIA-3C6 | c586885e9876b3af22c9d3cab65933c9b1a23f48 | [
"MIT"
] | null | null | null | S6_transfer_function_equivalance.ipynb | torebutlin/PartIIA-3C6 | c586885e9876b3af22c9d3cab65933c9b1a23f48 | [
"MIT"
] | null | null | null | 22.975904 | 111 | 0.498689 | [
[
[
"# S6 Transfer Function Equivalence\n3C6 Section 6: equivalence of transfer function expressions\n\n## imports and definitions",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport scipy.linalg as la\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nmatplotlib.rcParams.update({'font.size': 12,'font.family':'serif'})\nimport os\nfrom IPython.display import HTML, display\nfrom ipywidgets import Output, widgets, Layout",
"_____no_output_____"
],
[
"%matplotlib notebook",
"_____no_output_____"
]
],
[
[
"## Setup properties",
"_____no_output_____"
]
],
[
[
"# setup parameters\nL = 1\nP = 1\nm = 1\nc = np.sqrt(P/m)\nx = 0.6*L\na = 0.2*L\n\nw1 = np.pi*c/L\nN = 20\n\n# Create axes\nw = np.linspace(0.01,(N+1)*w1,1000)\n\n# Direct approach\nif x<a:\n G1 = c/(w*P) * np.sin(w*(L-a)/c) * np.sin(w*x/c) / np.sin(w*L/c)\nelse:\n G1 = c/(w*P) * np.sin(w*a/c) * np.sin(w*(L-x)/c) / np.sin(w*L/c)",
"_____no_output_____"
],
[
"plt.figure(figsize=(9,5),dpi=100)\nyscale = np.percentile(20*np.log10(np.abs(G1)),[1,99]) # Get axis scaling to look ok for undamped case\nplt.ylim(yscale)\nplt.xlabel('Frequency')\nplt.ylabel('$20 \\log_{10}|G|$')\nplt.xlim([0,11*w1])\nplt.ylim([yscale[0]-10,yscale[1]+10])\n\np1 = plt.plot([],[],linestyle='--',linewidth=2,label='direct')\np1[0].set_data(w,20*np.log10(np.abs(G1)))\np2 = plt.plot([],[],linewidth=2,label='modal sum')\n\nG2 = 0\nn=0\nbutton = widgets.Button(description=\"Add another mode\",layout=Layout(width='95%'))\nbutton.button_style = 'primary'\ndisplay(button)\n\ndef next_plot(b):\n global G2,n\n n += 1\n G2 += 2/(m*L) * np.sin(n*np.pi*a/L) * np.sin(n*np.pi*x/L) / ((n*w1)**2 - w**2)\n \n p2[0].set_data(w,20*np.log10(np.abs(G2)))\n \n plt.title(\"number of modes = {}\".format(n))\n plt.legend(loc='lower left')\n if n >= 20:\n button.layout.visibility = 'hidden'\n \n\nbutton.on_click(next_plot)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
d04d7d14333aa6e7ee741b9cf6da3abb7f2a21b3 | 36,367 | ipynb | Jupyter Notebook | 9) domain_adaptation/5.domain_adaptation.ipynb | initOS/research-criticality-identification | 6af61986ec16bb6fdae2e22a6198e472ea4c5317 | [
"MIT"
] | null | null | null | 9) domain_adaptation/5.domain_adaptation.ipynb | initOS/research-criticality-identification | 6af61986ec16bb6fdae2e22a6198e472ea4c5317 | [
"MIT"
] | null | null | null | 9) domain_adaptation/5.domain_adaptation.ipynb | initOS/research-criticality-identification | 6af61986ec16bb6fdae2e22a6198e472ea4c5317 | [
"MIT"
] | null | null | null | 52.027182 | 13,396 | 0.699618 | [
[
[
"\\# Developer: Ali Hashaam ([email protected]) <br>\n\\# 5th March 2019 <br>\n\n\\# © 2019 initOS GmbH <br>\n\\# License MIT <br>\n\n\\# Library for TSVM and SelfLearning taken from https://github.com/tmadl/semisup-learn <br>\n\\# Library for lagrangean-S3VM taken from https://github.com/fbagattini/lagrangean-s3vm <br>",
"_____no_output_____"
]
],
[
[
"from sklearn.svm import SVC\nimport pandas as pd\nimport numpy as np\nfrom __future__ import division\nimport re\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom frameworks.SelfLearning import *\nfrom imblearn.over_sampling import SMOTE\nfrom collections import Counter\nfrom imblearn.under_sampling import RepeatedEditedNearestNeighbours\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.externals import joblib\nimport time\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import classification_report\nfrom methods.scikitTSVM import SKTSVM\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.utils.multiclass import unique_labels",
"_____no_output_____"
],
[
"source_domain = pd.read_csv('github_preprocessed_data.csv')\ntarget_domain = pd.read_csv('mantis_data_for_domain_adaptation.csv')",
"_____no_output_____"
],
[
"source_domain.drop([\"Unnamed: 0\"], axis=1, inplace=True)\ntarget_domain.drop([\"Unnamed: 0\"], axis=1, inplace=True)\n\nsource_domain['text'] = source_domain['text'].fillna(\"\")\ntarget_domain['textual_data'] = target_domain['textual_data'].fillna(\"\")",
"_____no_output_____"
],
[
"print source_domain['type'].value_counts()",
"1 734\n0 430\nName: type, dtype: int64\n"
],
[
"print target_domain['type'].value_counts()",
"1.0 756\n0.0 92\nName: type, dtype: int64\n"
],
[
"unlabelled_index_target = target_domain[(target_domain['bug_or_not'].isnull())].index\nlabelled_index_target = target_domain[~(target_domain['bug_or_not'].isnull())].index",
"_____no_output_____"
],
[
"len(unlabelled_index_target), len(labelled_index_target)",
"_____no_output_____"
],
[
"target_domain_labeled = target_domain.loc[labelled_index_target]",
"_____no_output_____"
],
[
"len(target_domain_labeled), len(target_domain)",
"_____no_output_____"
],
[
"tfidf_vectorizer_source = TfidfVectorizer(max_df=0.95, min_df=2, max_features=500, stop_words='english')",
"_____no_output_____"
],
[
"source_domain_balanced = source_domain.groupby('type').apply(lambda x: x.sample(400))\nprint source_domain['type'].value_counts()",
"1 734\n0 430\nName: type, dtype: int64\n"
],
[
"source_domain_X = tfidf_vectorizer_source.fit_transform(source_domain_balanced['text'])\nsource_domain_Y = np.array(source_domain_balanced['type'])",
"_____no_output_____"
],
[
"stratified_shuffle_split = StratifiedShuffleSplit(n_splits=3, test_size=0.3, random_state=0)\nscores = []\niteration = 1\nfor train_index, test_index in stratified_shuffle_split.split(source_domain_X, source_domain_Y):\n X_train = source_domain_X[train_index].copy()\n Y_train = source_domain_Y[train_index].copy()\n X_test = source_domain_X[test_index].copy()\n Y_test = source_domain_Y[test_index].copy()\n clf = MultinomialNB()\n clf.fit(X_train, Y_train)\n print clf.score(X_test, Y_test.astype(float))\n y_pred = clf.predict(X_test)\n result = classification_report(Y_test.astype(float), y_pred.astype(float), output_dict=True)\n src = pd.DataFrame(result)\n src.transpose().to_csv('{}_{}_{}_latex_table_report.csv'.format('source_vs_source', '500', iteration))\n print src.transpose()\n iteration += 1",
"0.845833333333\n f1-score precision recall support\n0.0 0.841202 0.867257 0.816667 120.0\n1.0 0.850202 0.826772 0.875000 120.0\nmacro avg 0.845702 0.847014 0.845833 240.0\nmicro avg 0.845833 0.845833 0.845833 240.0\nweighted avg 0.845702 0.847014 0.845833 240.0\n0.85\n f1-score precision recall support\n0.0 0.848739 0.855932 0.841667 120.0\n1.0 0.851240 0.844262 0.858333 120.0\nmacro avg 0.849990 0.850097 0.850000 240.0\nmicro avg 0.850000 0.850000 0.850000 240.0\nweighted avg 0.849990 0.850097 0.850000 240.0\n0.895833333333\n f1-score precision recall support\n0.0 0.889868 0.943925 0.841667 120.0\n1.0 0.901186 0.857143 0.950000 120.0\nmacro avg 0.895527 0.900534 0.895833 240.0\nmicro avg 0.895833 0.895833 0.895833 240.0\nweighted avg 0.895527 0.900534 0.895833 240.0\n"
]
],
[
[
"# Baseline TL Source VS Target Supervised",
"_____no_output_____"
]
],
[
[
"source_domain_X = tfidf_vectorizer_source.fit_transform(source_domain['text'])\nsource_domain_Y = np.array(source_domain['type'])",
"_____no_output_____"
],
[
"for x in xrange(5):\n clf = MultinomialNB()\n clf.fit(source_domain_X, source_domain_Y)\n target_domain_labeled_balanced = target_domain_labeled.groupby('type').apply(lambda x: x.sample(90))\n target_domain_X = tfidf_vectorizer_source.transform(target_domain_labeled_balanced['textual_data'])\n target_domain_Y = np.array(target_domain_labeled_balanced['type'])\n print(\"members for classes {}\".format(\",\".join(\"(%s,%s)\" % tup for tup in sorted(Counter(target_domain_Y).items()))))\n score = clf.score(target_domain_X, target_domain_Y.astype(float))\n print \"Baseline TL Score: \"+ str(score)\n y_pred = clf.predict(target_domain_X)\n print(\"members for classes {}\".format(\",\".join(\"(%s,%s)\" % tup for tup in sorted(Counter(y_pred).items()))))\n result = classification_report(target_domain_Y.astype(float), y_pred.astype(float), output_dict=True)\n src = pd.DataFrame(result)\n print src\n src.transpose().to_csv('{}_{}_{}_latex_table_report.csv'.format('source_vs_target_supervised', '500', x))",
"members for classes (0.0,90),(1.0,90)\nBaseline TL Score: 0.566666666667\nmembers for classes (0,44),(1,136)\n 0.0 1.0 macro avg micro avg weighted avg\nf1-score 0.417910 0.654867 0.536389 0.566667 0.536389\nprecision 0.636364 0.544118 0.590241 0.566667 0.590241\nrecall 0.311111 0.822222 0.566667 0.566667 0.566667\nsupport 90.000000 90.000000 180.000000 180.000000 180.000000\nmembers for classes (0.0,90),(1.0,90)\nBaseline TL Score: 0.6\nmembers for classes (0,38),(1,142)\n 0.0 1.0 macro avg micro avg weighted avg\nf1-score 0.437500 0.689655 0.563578 0.6 0.563578\nprecision 0.736842 0.563380 0.650111 0.6 0.650111\nrecall 0.311111 0.888889 0.600000 0.6 0.600000\nsupport 90.000000 90.000000 180.000000 180.0 180.000000\nmembers for classes (0.0,90),(1.0,90)\nBaseline TL Score: 0.6\nmembers for classes (0,36),(1,144)\n 0.0 1.0 macro avg micro avg weighted avg\nf1-score 0.428571 0.692308 0.56044 0.6 0.56044\nprecision 0.750000 0.562500 0.65625 0.6 0.65625\nrecall 0.300000 0.900000 0.60000 0.6 0.60000\nsupport 90.000000 90.000000 180.00000 180.0 180.00000\nmembers for classes (0.0,90),(1.0,90)\nBaseline TL Score: 0.583333333333\nmembers for classes (0,41),(1,139)\n 0.0 1.0 macro avg micro avg weighted avg\nf1-score 0.427481 0.672489 0.549985 0.583333 0.549985\nprecision 0.682927 0.553957 0.618442 0.583333 0.618442\nrecall 0.311111 0.855556 0.583333 0.583333 0.583333\nsupport 90.000000 90.000000 180.000000 180.000000 180.000000\nmembers for classes (0.0,90),(1.0,90)\nBaseline TL Score: 0.633333333333\nmembers for classes (0,32),(1,148)\n 0.0 1.0 macro avg micro avg weighted avg\nf1-score 0.459016 0.722689 0.590853 0.633333 0.590853\nprecision 0.875000 0.581081 0.728041 0.633333 0.728041\nrecall 0.311111 0.955556 0.633333 0.633333 0.633333\nsupport 90.000000 90.000000 180.000000 180.000000 180.000000\n"
]
],
[
[
"# TL Source Semi-Supervised",
"_____no_output_____"
]
],
[
[
"target_domain_unlabeled = target_domain.loc[unlabelled_index_target, [\"textual_data\", \"type\"]].copy()\ntarget_domain_unlabeled[\"type\"] = -1\nsource_domain_df = source_domain[[\"text\", \"type\"]].copy()\nsource_domain_df.rename({\"text\":\"textual_data\", \"type\":\"type\"}, axis=1, inplace=1)\ndomain_adaptation_df = pd.concat([source_domain_df, target_domain_unlabeled])",
"_____no_output_____"
],
[
"len(domain_adaptation_df), len(source_domain_df), len(target_domain_unlabeled)",
"_____no_output_____"
],
[
"print domain_adaptation_df['type'].value_counts()\nprint target_domain_labeled_balanced['type'].value_counts()",
"-1 8367\n 1 734\n 0 430\nName: type, dtype: int64\n1.0 90\n0.0 90\nName: type, dtype: int64\n"
],
[
"from collections import Counter\n#print(\"members for classes {}\".format(\",\".join(\"(%s,%s)\" % tup for tup in sorted(Counter(Y).items()))))\ndef domain_adaptation(dom_a_df, target_domain_labeled, \n classifier, label_type, neg_class, classifier_name):\n dom_a_df.loc[dom_a_df['type']==0, 'type'] = neg_class\n dom_a_df.loc[dom_a_df['type']==1, 'type'] = 1\n \n target_domain_labeled.loc[target_domain_labeled['type']==0, 'type'] = neg_class\n target_domain_labeled.loc[target_domain_labeled['type']==1, 'type'] = 1\n \n tfidf_vectorizer_source = TfidfVectorizer(max_df=0.95, min_df=2, max_features=500, stop_words='english')\n source_X = tfidf_vectorizer_source.fit_transform(dom_a_df['textual_data']).toarray()\n source_Y = np.array(dom_a_df['type'])\n \n target_domain_X = tfidf_vectorizer_source.transform(target_domain_labeled['textual_data']).toarray()\n target_domain_Y = np.array(target_domain_labeled['type'])\n \n if label_type != 'int':\n source_Y = source_Y.astype(float)\n else:\n source_Y = source_Y.astype(int)\n \n classifier.fit(source_X, source_Y)\n score = classifier.score(target_domain_X, target_domain_Y.astype(int))\n joblib.dump(classifier, 'models/DA_{}.pkl'.format(classifier_name))\n joblib.dump(target_domain_X, 'models/X_test_DA_{}.pkl'.format(classifier_name))\n joblib.dump(target_domain_Y, 'models/Y_test_DA_{}.pkl'.format(classifier_name))\n print \"{} score: {}\".format(classifier_name, score)",
"_____no_output_____"
],
[
"sklearn_lr = LogisticRegression(solver='lbfgs')\ndomain_adaptation(domain_adaptation_df.copy(), target_domain_labeled_balanced.copy(), \n SelfLearningModel(sklearn_lr), 'float', 0, 'ST_LR')",
"ST_LR score: 0.5\n"
],
[
"domain_adaptation(domain_adaptation_df.copy(), target_domain_labeled_balanced.copy(), \n SKTSVM(), 'int', 0, 'TSVM')",
"TSVM score: 0.516666666667\n"
],
[
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n #classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=['Bug', 'Non-Bug'], yticklabels=['Bug', 'Non-Bug'],\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n #plt.savefig('Confusion_matrix(Phase_3).png', bbox_inches='tight')\n return ax",
"_____no_output_____"
],
[
"def get_results(classifier, data_type):\n dict_features = {}\n model = joblib.load('models/DA_{}.pkl'.format(classifier, 3))\n x_tst = joblib.load('models/X_test_DA_{}.pkl'.format(classifier, 3))\n y_tst = joblib.load('models/Y_test_DA_{}.pkl'.format(classifier, 3))\n acc = model.score(x_tst, y_tst.astype(data_type))\n print acc\n y_pred = model.predict(x_tst)\n print(\"members for classes {}\".format(\",\".join(\"(%s,%s)\" % tup for tup in sorted(Counter(y_tst).items()))))\n print(\"members for classes {}\".format(\",\".join(\"(%s,%s)\" % tup for tup in sorted(Counter(y_pred).items()))))\n result = classification_report(y_tst.astype(data_type), y_pred.astype(data_type), output_dict=True)\n result_df = pd.DataFrame(result)\n result_df.transpose().to_csv('DA_{}_latex_table_report.csv'.format(classifier))\n np.set_printoptions(precision=2)\n\n # Plot non-normalized confusion matrix\n plot_confusion_matrix(y_tst.astype(data_type), y_pred.astype(data_type), classes=[0, 1],\n title='Confusion matrix, without normalization')\n\n # Plot normalized confusion matrix\n #plot_confusion_matrix(y_tst.astype(data_type), y_pred.astype(data_type), classes=[0, 1], normalize=True,\n # title='Normalized confusion matrix')\n\n plt.show()\n print result_df.transpose()\n return result_df",
"_____no_output_____"
],
[
"st_results = get_results('ST_LR', float)",
"_____no_output_____"
],
[
"tsvm_results = get_results('TSVM', int)",
"0.516666666667\nmembers for classes (0.0,90),(1.0,90)\nmembers for classes (0.0,5),(1.0,175)\nConfusion matrix, without normalization\n[[ 4 86]\n [ 1 89]]\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d04d83f48979df26d1ceac4a67fb2e14135e3d75 | 22,391 | ipynb | Jupyter Notebook | project_tfif.ipynb | lauraAriasFdez/Ciphers | da4856ee1275447d01118601819330775e5aba15 | [
"MIT"
] | null | null | null | project_tfif.ipynb | lauraAriasFdez/Ciphers | da4856ee1275447d01118601819330775e5aba15 | [
"MIT"
] | null | null | null | project_tfif.ipynb | lauraAriasFdez/Ciphers | da4856ee1275447d01118601819330775e5aba15 | [
"MIT"
] | null | null | null | 39.560071 | 233 | 0.522933 | [
[
[
"<a href=\"https://colab.research.google.com/github/lauraAriasFdez/Ciphers/blob/master/project_tfif.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"### 1. Connect To Google Drive + Get Data\n",
"_____no_output_____"
]
],
[
[
"# MAIN DIRECTORY STILL TO DO \nfrom google.colab import drive\ndrive.mount('/content/gdrive')",
"Mounted at /content/gdrive\n"
],
[
"data_file = \"/content/gdrive/MyDrive/CSCI4511W/project/sentiments.csv\"",
"_____no_output_____"
],
[
"import pandas as pd\nimport numpy as np\n\ncols = ['sentiment','id','date','query_string','user','text']\nsms_data = pd.read_csv(data_file, encoding='latin-1',header=None,names=cols)\n\n# replace lables 0 = neg 1= pos\nsms_data.sentiment = sms_data.sentiment.replace({0: 0, 4: 1})\n\n\nlabels = sms_data[sms_data.columns[0]].to_numpy()\n",
"_____no_output_____"
]
],
[
[
"### Preprocess Data\n",
"_____no_output_____"
]
],
[
[
"import re\nimport nltk\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\n\n#We import English stop-words from the NLTK package and removed them if found in the sentence.\n#While removing stop-words, we perform stemming that is if the word is not a stop-word, it will be converted to its root form. This is called stemming.\n\n\"\"\"\nhttps://stackoverflow.com/questions/52026677/sentiment140-preprocessing\nhttps://www.analyticsvidhya.com/blog/2020/11/understanding-naive-bayes-svm-and-its-implementation-on-spam-sms/\n\n\n\"\"\"\ndef clean_data(content):\n stemming = PorterStemmer()\n\n for i in range (0,len(content)):\n\n ## print where in cleaning they are\n if (i%1000000==0):\n print(i ,\" already cleaned\")\n\n #remove @mentions\n tweet = re.sub(r'@[A-Za-z0-9]+',\"\",content[i]) \n #remove urls\n tweet = re.sub(r'https?:\\/\\/\\S+',\"\",tweet) \n\n #remove all unecessary charachters like punctuations\n tweet = re.sub('[^a-zA-Z]',repl = ' ',string = tweet)\n tweet.lower()\n tweet = tweet.split()\n\n ## steeeming and remove stop words\n tweet = [stemming.stem(word) for word in tweet if word not in set(stopwords.words('english'))]\n tweet = ' '.join(tweet)\n\n #cleaned Twwet\n content[i] = tweet\n return content\n",
"[nltk_data] Downloading package stopwords to /root/nltk_data...\n[nltk_data] Unzipping corpora/stopwords.zip.\n"
]
],
[
[
"\nhttps://getpocket.com/read/3040941140\n\n Texthero is designed as a Pandas wrapper, so it makes it easier than ever to preprocess and analyze text based Pandas Series",
"_____no_output_____"
]
],
[
[
"!pip install texthero\nimport pandas as pd\nimport texthero as hero #config import cid, csec, ua",
"Collecting texthero\n Downloading texthero-1.1.0-py3-none-any.whl (24 kB)\nRequirement already satisfied: tqdm>=4.3 in /usr/local/lib/python3.7/dist-packages (from texthero) (4.63.0)\nRequirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.7/dist-packages (from texthero) (1.21.5)\nCollecting unidecode>=1.1.1\n Downloading Unidecode-1.3.4-py3-none-any.whl (235 kB)\n\u001b[K |████████████████████████████████| 235 kB 7.0 MB/s \n\u001b[?25hRequirement already satisfied: spacy<3.0.0 in /usr/local/lib/python3.7/dist-packages (from texthero) (2.2.4)\nRequirement already satisfied: pandas>=1.0.2 in /usr/local/lib/python3.7/dist-packages (from texthero) (1.3.5)\nRequirement already satisfied: matplotlib>=3.1.0 in /usr/local/lib/python3.7/dist-packages (from texthero) (3.2.2)\nCollecting nltk>=3.3\n Downloading nltk-3.7-py3-none-any.whl (1.5 MB)\n\u001b[K |████████████████████████████████| 1.5 MB 42.7 MB/s \n\u001b[?25hRequirement already satisfied: scikit-learn>=0.22 in /usr/local/lib/python3.7/dist-packages (from texthero) (1.0.2)\nRequirement already satisfied: plotly>=4.2.0 in /usr/local/lib/python3.7/dist-packages (from texthero) (5.5.0)\nRequirement already satisfied: wordcloud>=1.5.0 in /usr/local/lib/python3.7/dist-packages (from texthero) (1.5.0)\nRequirement already satisfied: gensim<4.0,>=3.6.0 in /usr/local/lib/python3.7/dist-packages (from texthero) (3.6.0)\nRequirement already satisfied: smart-open>=1.2.1 in /usr/local/lib/python3.7/dist-packages (from gensim<4.0,>=3.6.0->texthero) (5.2.1)\nRequirement already satisfied: six>=1.5.0 in /usr/local/lib/python3.7/dist-packages (from gensim<4.0,>=3.6.0->texthero) (1.15.0)\nRequirement already satisfied: scipy>=0.18.1 in /usr/local/lib/python3.7/dist-packages (from gensim<4.0,>=3.6.0->texthero) (1.4.1)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=3.1.0->texthero) (1.3.2)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=3.1.0->texthero) (3.0.7)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=3.1.0->texthero) (0.11.0)\nRequirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=3.1.0->texthero) (2.8.2)\nRequirement already satisfied: click in /usr/local/lib/python3.7/dist-packages (from nltk>=3.3->texthero) (7.1.2)\nRequirement already satisfied: joblib in /usr/local/lib/python3.7/dist-packages (from nltk>=3.3->texthero) (1.1.0)\nCollecting regex>=2021.8.3\n Downloading regex-2022.3.15-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (749 kB)\n\u001b[K |████████████████████████████████| 749 kB 51.4 MB/s \n\u001b[?25hRequirement already satisfied: pytz>=2017.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=1.0.2->texthero) (2018.9)\nRequirement already satisfied: tenacity>=6.2.0 in /usr/local/lib/python3.7/dist-packages (from plotly>=4.2.0->texthero) (8.0.1)\nRequirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.22->texthero) (3.1.0)\nRequirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0->texthero) (3.0.6)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0->texthero) (57.4.0)\nRequirement already satisfied: blis<0.5.0,>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0->texthero) (0.4.1)\nRequirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0->texthero) (2.0.6)\nRequirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0->texthero) (1.0.6)\nRequirement already satisfied: catalogue<1.1.0,>=0.0.7 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0->texthero) (1.0.0)\nRequirement already satisfied: srsly<1.1.0,>=1.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0->texthero) (1.0.5)\nRequirement already satisfied: thinc==7.4.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0->texthero) (7.4.0)\nRequirement already satisfied: requests<3.0.0,>=2.13.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0->texthero) (2.23.0)\nRequirement already satisfied: wasabi<1.1.0,>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0->texthero) (0.9.0)\nRequirement already satisfied: plac<1.2.0,>=0.9.6 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0->texthero) (1.1.3)\nRequirement already satisfied: importlib-metadata>=0.20 in /usr/local/lib/python3.7/dist-packages (from catalogue<1.1.0,>=0.0.7->spacy<3.0.0->texthero) (4.11.2)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=0.20->catalogue<1.1.0,>=0.0.7->spacy<3.0.0->texthero) (3.7.0)\nRequirement already satisfied: typing-extensions>=3.6.4 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=0.20->catalogue<1.1.0,>=0.0.7->spacy<3.0.0->texthero) (3.10.0.2)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<3.0.0->texthero) (1.24.3)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<3.0.0->texthero) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<3.0.0->texthero) (2021.10.8)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<3.0.0->texthero) (2.10)\nRequirement already satisfied: pillow in /usr/local/lib/python3.7/dist-packages (from wordcloud>=1.5.0->texthero) (7.1.2)\nInstalling collected packages: regex, unidecode, nltk, texthero\n Attempting uninstall: regex\n Found existing installation: regex 2019.12.20\n Uninstalling regex-2019.12.20:\n Successfully uninstalled regex-2019.12.20\n Attempting uninstall: nltk\n Found existing installation: nltk 3.2.5\n Uninstalling nltk-3.2.5:\n Successfully uninstalled nltk-3.2.5\nSuccessfully installed nltk-3.7 regex-2022.3.15 texthero-1.1.0 unidecode-1.3.4\n"
],
[
"\ncustom_cleaning = [\n \n #Replace not assigned values with empty space\n hero.preprocessing.fillna,\n hero.preprocessing.lowercase,\n hero.preprocessing.remove_digits,\n hero.preprocessing.remove_punctuation,\n hero.preprocessing.remove_diacritics,\n hero.preprocessing.remove_stopwords,\n hero.preprocessing.remove_whitespace,\n hero.preprocessing.stem\n\n\n]\n\ncontent = hero.clean(sms_data['text'], pipeline = custom_cleaning)\n",
"_____no_output_____"
],
[
"#content = content.to_numpy()",
"_____no_output_____"
]
],
[
[
"### TF-IDF Feature Extraction ",
"_____no_output_____"
]
],
[
[
"from sklearn.feature_extraction.text import TfidfVectorizer\n\ntfidf = TfidfVectorizer()\ntfidf_data = tfidf.fit_transform(content)",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\ntfidf_x_train,tfidf_x_test,y_train,y_test = train_test_split(tfidf_data,labels,test_size = 0.3, stratify=labels,random_state=100)",
"_____no_output_____"
]
],
[
[
"### Multinomial Naive Bayes",
"_____no_output_____"
]
],
[
[
"from sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.metrics import f1_score\n\n# NAIVE BAYES + TLF \nprint(\"NAIVE BAYES + TLF______________________________________________________________\")\nclf_multinomialnb = MultinomialNB()\nclf_multinomialnb.fit(tfidf_x_train,y_train)\n\ny_pred = clf_multinomialnb.predict(tfidf_x_test)\nprint(classification_report(y_test,y_pred))\n\n#>>> f1_score(y_true, y_pred, average='weighted')\nf1_score(y_test,y_pred)",
"NAIVE BAYES + TLF______________________________________________________________\n precision recall f1-score support\n\n 0 0.74 0.79 0.76 240000\n 1 0.77 0.72 0.75 240000\n\n accuracy 0.76 480000\n macro avg 0.76 0.76 0.76 480000\nweighted avg 0.76 0.76 0.76 480000\n\n"
]
],
[
[
"### SVM",
"_____no_output_____"
]
],
[
[
"from sklearn.svm import LinearSVC\n\n# SVM + TLF \nprint(\"LINEAR SVM + TLF______________________________________________________________\")\nlinearsvc = LinearSVC()\nlinearsvc.fit(tfidf_x_train,y_train)\ny_pred = linearsvc.predict(tfidf_x_test)\n\nprint(classification_report(y_test,y_pred))\nf1_score(y_test,y_pred)",
"LINEAR SVM + TLF______________________________________________________________\n precision recall f1-score support\n\n 0 0.78 0.75 0.77 240000\n 1 0.76 0.79 0.77 240000\n\n accuracy 0.77 480000\n macro avg 0.77 0.77 0.77 480000\nweighted avg 0.77 0.77 0.77 480000\n\n"
]
],
[
[
"### Logistic Regression\n",
"_____no_output_____"
]
],
[
[
"#https://towardsdatascience.com/logistic-regression-using-python-sklearn-numpy-mnist-handwriting-recognition-matplotlib-a6b31e2b166a\n\nfrom sklearn.linear_model import LogisticRegression\nlogisticRegr = LogisticRegression()\nlogisticRegr.fit(tfidf_x_train,y_train)\n\ny_pred = logisticRegr.predict(tfidf_x_test)\n\nprint(classification_report(y_test,y_pred))\nf1_score(y_test,y_pred)",
"/usr/local/lib/python3.7/dist-packages/sklearn/linear_model/_logistic.py:818: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG,\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
d04d843fabe8d05fad1baa385d3187e46d952fce | 89,379 | ipynb | Jupyter Notebook | doc/jupyter_execute/examples/models/chainer_mnist/chainer_mnist.ipynb | edshee/seldon-core | 78c10fbca16a5e2a0c25b9673aa3deb220070e26 | [
"Apache-2.0"
] | null | null | null | doc/jupyter_execute/examples/models/chainer_mnist/chainer_mnist.ipynb | edshee/seldon-core | 78c10fbca16a5e2a0c25b9673aa3deb220070e26 | [
"Apache-2.0"
] | null | null | null | doc/jupyter_execute/examples/models/chainer_mnist/chainer_mnist.ipynb | edshee/seldon-core | 78c10fbca16a5e2a0c25b9673aa3deb220070e26 | [
"Apache-2.0"
] | null | null | null | 63.389362 | 521 | 0.488112 | [
[
[
"# Chainer MNIST Model Deployment\n\n * Wrap a Chainer MNIST python model for use as a prediction microservice in seldon-core\n * Run locally on Docker to test\n * Deploy on seldon-core running on minikube\n \n## Dependencies\n\n * [Helm](https://github.com/kubernetes/helm)\n * [Minikube](https://github.com/kubernetes/minikube)\n * [S2I](https://github.com/openshift/source-to-image)\n\n```bash\npip install seldon-core\npip install chainer==6.2.0\n```\n\n## Train locally\n ",
"_____no_output_____"
]
],
[
[
"#!/usr/bin/env python\nimport argparse\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nimport chainerx\nfrom chainer import training\nfrom chainer.training import extensions\n\n\n# Network definition\nclass MLP(chainer.Chain):\n def __init__(self, n_units, n_out):\n super(MLP, self).__init__()\n with self.init_scope():\n # the size of the inputs to each layer will be inferred\n self.l1 = L.Linear(None, n_units) # n_in -> n_units\n self.l2 = L.Linear(None, n_units) # n_units -> n_units\n self.l3 = L.Linear(None, n_out) # n_units -> n_out\n\n def forward(self, x):\n h1 = F.relu(self.l1(x))\n h2 = F.relu(self.l2(h1))\n return self.l3(h2)\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Chainer example: MNIST\")\n parser.add_argument(\n \"--batchsize\",\n \"-b\",\n type=int,\n default=100,\n help=\"Number of images in each mini-batch\",\n )\n parser.add_argument(\n \"--epoch\",\n \"-e\",\n type=int,\n default=20,\n help=\"Number of sweeps over the dataset to train\",\n )\n parser.add_argument(\n \"--frequency\", \"-f\", type=int, default=-1, help=\"Frequency of taking a snapshot\"\n )\n parser.add_argument(\n \"--device\",\n \"-d\",\n type=str,\n default=\"-1\",\n help=\"Device specifier. Either ChainerX device \"\n \"specifier or an integer. If non-negative integer, \"\n \"CuPy arrays with specified device id are used. If \"\n \"negative integer, NumPy arrays are used\",\n )\n parser.add_argument(\n \"--out\", \"-o\", default=\"result\", help=\"Directory to output the result\"\n )\n parser.add_argument(\n \"--resume\", \"-r\", type=str, help=\"Resume the training from snapshot\"\n )\n parser.add_argument(\"--unit\", \"-u\", type=int, default=1000, help=\"Number of units\")\n parser.add_argument(\n \"--noplot\",\n dest=\"plot\",\n action=\"store_false\",\n help=\"Disable PlotReport extension\",\n )\n group = parser.add_argument_group(\"deprecated arguments\")\n group.add_argument(\n \"--gpu\",\n \"-g\",\n dest=\"device\",\n type=int,\n nargs=\"?\",\n const=0,\n help=\"GPU ID (negative value indicates CPU)\",\n )\n args = parser.parse_args(args=[])\n\n device = chainer.get_device(args.device)\n\n print(\"Device: {}\".format(device))\n print(\"# unit: {}\".format(args.unit))\n print(\"# Minibatch-size: {}\".format(args.batchsize))\n print(\"# epoch: {}\".format(args.epoch))\n print(\"\")\n\n # Set up a neural network to train\n # Classifier reports softmax cross entropy loss and accuracy at every\n # iteration, which will be used by the PrintReport extension below.\n model = L.Classifier(MLP(args.unit, 10))\n model.to_device(device)\n device.use()\n\n # Setup an optimizer\n optimizer = chainer.optimizers.Adam()\n optimizer.setup(model)\n\n # Load the MNIST dataset\n train, test = chainer.datasets.get_mnist()\n\n train_iter = chainer.iterators.SerialIterator(train, args.batchsize)\n test_iter = chainer.iterators.SerialIterator(\n test, args.batchsize, repeat=False, shuffle=False\n )\n\n # Set up a trainer\n updater = training.updaters.StandardUpdater(train_iter, optimizer, device=device)\n trainer = training.Trainer(updater, (args.epoch, \"epoch\"), out=args.out)\n\n # Evaluate the model with the test dataset for each epoch\n trainer.extend(extensions.Evaluator(test_iter, model, device=device))\n\n # Dump a computational graph from 'loss' variable at the first iteration\n # The \"main\" refers to the target link of the \"main\" optimizer.\n # TODO(niboshi): Temporarily disabled for chainerx. Fix it.\n if device.xp is not chainerx:\n trainer.extend(extensions.DumpGraph(\"main/loss\"))\n\n # Take a snapshot for each specified epoch\n frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)\n trainer.extend(extensions.snapshot(), trigger=(frequency, \"epoch\"))\n\n # Write a log of evaluation statistics for each epoch\n trainer.extend(extensions.LogReport())\n\n # Save two plot images to the result dir\n if args.plot and extensions.PlotReport.available():\n trainer.extend(\n extensions.PlotReport(\n [\"main/loss\", \"validation/main/loss\"], \"epoch\", file_name=\"loss.png\"\n )\n )\n trainer.extend(\n extensions.PlotReport(\n [\"main/accuracy\", \"validation/main/accuracy\"],\n \"epoch\",\n file_name=\"accuracy.png\",\n )\n )\n\n # Print selected entries of the log to stdout\n # Here \"main\" refers to the target link of the \"main\" optimizer again, and\n # \"validation\" refers to the default name of the Evaluator extension.\n # Entries other than 'epoch' are reported by the Classifier link, called by\n # either the updater or the evaluator.\n trainer.extend(\n extensions.PrintReport(\n [\n \"epoch\",\n \"main/loss\",\n \"validation/main/loss\",\n \"main/accuracy\",\n \"validation/main/accuracy\",\n \"elapsed_time\",\n ]\n )\n )\n\n # Print a progress bar to stdout\n trainer.extend(extensions.ProgressBar())\n\n if args.resume is not None:\n # Resume from a snapshot\n chainer.serializers.load_npz(args.resume, trainer)\n\n # Run the training\n trainer.run()\n\n\nif __name__ == \"__main__\":\n main()",
"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/chainer/_environment_check.py:41: UserWarning: Accelerate has been detected as a NumPy backend library.\nvecLib, which is a part of Accelerate, is known not to work correctly with Chainer.\nWe recommend using other BLAS libraries such as OpenBLAS.\nFor details of the issue, please see\nhttps://docs.chainer.org/en/stable/tips.html#mnist-example-does-not-converge-in-cpu-mode-on-mac-os-x.\n\nPlease be aware that Mac OS X is not an officially supported OS.\n\n ''') # NOQA\n"
]
],
[
[
"Wrap model using s2i",
"_____no_output_____"
]
],
[
[
"!s2i build . seldonio/seldon-core-s2i-python37-ubi8:1.7.0-dev chainer-mnist:0.1",
"---> Installing application source...\n---> Installing dependencies ...\nLooking in links: /whl\nCollecting chainer==6.2.0 (from -r requirements.txt (line 1))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/2c/5a/86c50a0119a560a39d782c4cdd9b72927c090cc2e3f70336e01b19a5f97a/chainer-6.2.0.tar.gz (873kB)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.6/site-packages (from chainer==6.2.0->-r requirements.txt (line 1)) (41.0.1)\nCollecting typing<=3.6.6 (from chainer==6.2.0->-r requirements.txt (line 1))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/4a/bd/eee1157fc2d8514970b345d69cb9975dcd1e42cd7e61146ed841f6e68309/typing-3.6.6-py3-none-any.whl\nCollecting typing_extensions<=3.6.6 (from chainer==6.2.0->-r requirements.txt (line 1))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/62/4f/392a1fa2873e646f5990eb6f956e662d8a235ab474450c72487745f67276/typing_extensions-3.6.6-py3-none-any.whl\nCollecting filelock (from chainer==6.2.0->-r requirements.txt (line 1))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/93/83/71a2ee6158bb9f39a90c0dea1637f81d5eef866e188e1971a1b1ab01a35a/filelock-3.0.12-py3-none-any.whl\nRequirement already satisfied: numpy>=1.9.0 in /usr/local/lib/python3.6/site-packages (from chainer==6.2.0->-r requirements.txt (line 1)) (1.16.4)\nCollecting protobuf<3.8.0rc1,>=3.0.0 (from chainer==6.2.0->-r requirements.txt (line 1))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/5a/aa/a858df367b464f5e9452e1c538aa47754d467023850c00b000287750fa77/protobuf-3.7.1-cp36-cp36m-manylinux1_x86_64.whl (1.2MB)\nRequirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.6/site-packages (from chainer==6.2.0->-r requirements.txt (line 1)) (1.12.0)\nBuilding wheels for collected packages: chainer\nBuilding wheel for chainer (setup.py): started\nBuilding wheel for chainer (setup.py): finished with status 'done'\nStored in directory: /root/.cache/pip/wheels/2e/be/c5/6ee506abcaa4a53106f7d7671bbee8b4e5243bc562a9d32ad1\nSuccessfully built chainer\nInstalling collected packages: typing, typing-extensions, filelock, protobuf, chainer\nFound existing installation: protobuf 3.8.0\nUninstalling protobuf-3.8.0:\nSuccessfully uninstalled protobuf-3.8.0\nSuccessfully installed chainer-6.2.0 filelock-3.0.12 protobuf-3.7.1 typing-3.6.6 typing-extensions-3.6.6\nWARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nWARNING: You are using pip version 19.1, however version 19.2.2 is available.\nYou should consider upgrading via the 'pip install --upgrade pip' command.\nBuild completed successfully\n"
],
[
"!docker run --name \"mnist_predictor\" -d --rm -p 5000:5000 chainer-mnist:0.1",
"b03f58f82ca07e25261be34b75be4a0ffbbfa1ad736d3866790682bf0d8202a3\r\n"
]
],
[
[
"Send some random features that conform to the contract",
"_____no_output_____"
]
],
[
[
"!seldon-core-tester contract.json 0.0.0.0 5000 -p",
"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n----------------------------------------\nSENDING NEW REQUEST:\n\n[[0.997 0.039 0.778 0.59 0.526 0.591 0.659 0.423 0.404 0.302 0.322 0.453\n 0.54 0.852 0.268 0.564 0.163 0.032 0.934 0.317 0.395 0.122 0.056 0.729\n 0.106 0.443 0.334 0.784 0.646 0.296 0.524 0.855 0.503 0.727 0.326 0.491\n 0.385 0.042 0.82 0.715 0.972 0.699 0.431 0.618 0.096 0.849 0.224 0.187\n 0.145 0.357 0.187 0.779 0.009 0.775 0.775 0.584 0.897 0.674 0.01 0.775\n 0.095 0.081 0.089 0.351 0.985 0.878 0.906 0.396 0.499 0.646 0.127 0.966\n 0.087 0.668 0.314 0.853 0.55 0.345 0.95 0.792 0.797 0.037 0.18 0.592\n 0.941 0.662 0.101 0.388 0.902 0.868 0.505 0.824 0.8 0.855 0.568 0.368\n 0.605 0.224 0.214 0.582 0.365 0.44 0.389 0.922 0.028 0.142 0.525 0.843\n 0.706 0.61 0.215 0.962 0.334 0.273 0.365 0.075 0.929 0.693 0.382 0.76\n 0.75 0.403 0.344 0.218 0.831 0.431 0.469 0.527 0.755 0.048 0.407 0.953\n 0.468 0.186 0.589 0.839 0.513 0.307 0.251 0.738 0.173 0.185 0.499 0.797\n 0.264 0.149 0.547 0.699 0.935 0.071 0.145 0.853 0.884 0.195 0.944 0.775\n 0.523 0.627 0.729 0.826 0.894 0.117 0.935 0.363 0.03 0.16 0.435 0.579\n 0.954 0.487 0.133 0.348 0.12 0.741 0.203 0.103 0.334 0.009 0.898 0.597\n 0.375 0.241 0.27 0.094 0.819 0.737 0.147 0.715 0.138 0.801 0.427 0.602\n 0.336 0.796 0.691 0.415 0.329 0.155 0.17 0.152 0.237 0.957 0.298 0.837\n 0.982 0.805 0.972 0.125 0.916 0.101 0.054 0.347 0.566 0.232 0.885 0.864\n 0.049 0.205 0.361 0.767 0.099 0.634 0.359 0.975 0.56 0.289 0.49 0.359\n 0.901 0.39 0.197 0.985 0.141 0.232 0.336 0.932 0.923 0.032 0.126 0.51\n 0.571 0.743 0.831 0.999 0.972 0.649 0.527 0.909 0.071 0.539 0.676 0.851\n 0.104 0.103 0.392 0.641 0.838 0.333 0.453 0.573 0.199 0.924 0.588 0.955\n 0.866 0.085 0.985 0.803 0.386 0.713 0.056 0.972 0.489 0.623 0.108 0.904\n 0.746 0.986 0.824 0.996 0.161 0.738 0.24 0.153 0.935 0.782 0.393 0.098\n 0.449 0.24 0.621 0.293 0.569 0.196 0.893 0.605 0.608 0.114 0.383 0.038\n 0.573 0.373 0.474 0.006 0.292 0.738 0.943 0.65 0.553 0.684 0.3 0.587\n 0.183 0.521 0.211 0.074 0.696 0.672 0.206 0.694 0.129 0.81 0.415 0.56\n 0.994 0.686 0.807 0.514 0.215 0.096 0.295 0.233 0.625 0.663 0.794 0.16\n 0.837 0.194 0.07 0.939 0.965 0.142 0.66 0.152 0.249 0.995 0.892 0.265\n 0.865 0.742 0.19 0.03 0.42 0.807 0.15 0.163 0.529 0.23 0.59 0.676\n 0.121 0.474 0.329 0.383 0.534 0.093 0.861 0.058 0.019 0.212 0.296 0.947\n 0.879 0.445 0.357 0.021 0.551 0.362 0.653 0.258 0.146 0.453 0.373 0.448\n 0.339 0.974 0.266 0.656 0.036 0.698 0.651 0.91 0.438 0.767 0.716 0.267\n 0.871 0.781 0.13 0.912 0.13 0.332 0.647 0.31 0.171 0.323 0.703 0.197\n 0.918 0.803 0.43 0.103 0.606 0.955 0.733 0.902 0.139 0.471 0.994 0.393\n 0.95 0.485 0.782 0.213 0.994 0.206 0.938 0.019 0.429 0.135 0.811 0.209\n 0.991 0.93 0.878 0.742 0.859 0.397 0.128 0.087 0.447 0.392 0.61 0.18\n 0.087 0.641 0.31 0.033 0.211 0.431 0.051 0.639 0.461 0.466 0.171 0.736\n 0.727 0.183 0.542 0.416 0.524 0.251 0.513 0.087 0.395 0.164 0.25 0.384\n 0.705 0.683 0.827 0.188 0.163 0.325 0.256 0.904 0.161 0.334 0.639 0.728\n 0.267 0.463 0.373 0.111 0.585 0.794 0.972 0.281 0.984 0.564 0.671 0.868\n 0.741 0.638 0.702 0.778 0.667 0.372 0.818 0.49 0.102 0.403 0.187 0.283\n 0.492 0.937 0.643 0.657 0.514 0.492 0.042 0.809 0.088 0.018 0.631 0.731\n 0.516 0.625 0.597 0.629 0.798 0.907 0.861 0.439 0.777 0.014 0.771 0.152\n 0.16 0.997 0.699 0.127 0.038 0.503 0.572 0.878 0.901 0.215 0.606 0.686\n 0.847 0.007 0.976 0.895 0.357 0.374 0.989 0.544 0.317 0.043 0.718 0.788\n 0.121 0.432 0.16 0.485 0.553 0.048 0.003 0.375 0.592 0.207 0.853 0.81\n 0.043 0.554 0.084 0.584 0.73 0.766 0.738 0.038 0.56 0.475 0.763 0.002\n 0.382 0.49 0.302 0.873 0.141 0.023 0.341 0.113 0.197 0.948 0.088 0.294\n 0.778 0.807 0.935 0.712 0.466 0.885 0.815 0.843 0.745 0.217 0.664 0.142\n 0.421 0.371 0.536 0.009 0.036 0.352 0.916 0.161 0.345 0.348 0.688 0.806\n 0.434 0.413 0.567 0.043 0.934 0.072 0.54 0.347 0.817 0.321 0.85 0.478\n 0.832 0.899 0.283 0.34 0.304 0.955 0.915 0.934 0.452 0.423 0.75 0.013\n 0.5 0.691 0.854 0.453 0.959 0.843 0.698 0.756 0.918 0.992 0.663 0.608\n 0.756 0.7 0.347 0.427 0.198 0.37 0.837 0.362 0.291 0.126 0.695 0.777\n 0.318 0.88 0.859 0.958 0.075 0.332 0.321 0.179 0.834 0.027 0.332 0.799\n 0.504 0.274 0.819 0.081 0.337 0.02 0.598 0.727 0.159 0.937 0.199 0.639\n 0.063 0.75 0.637 0.686 0.677 0.102 0.135 0.264 0.091 0.837 0.562 0.453\n 0.503 0.884 0.147 0.966 0.118 0.293 0.327 0.859 0.958 0.498 0.369 0.123\n 0.354 0.812 0.163 0.96 0.64 0.596 0.029 0.84 0.159 0.717 0.025 0.394\n 0.185 0.29 0.554 0.646 0.432 0.197 0.668 0.531 0.206 0.599 0.842 0.579\n 0.836 0.889 0.797 0.891 0.1 0.087 0.825 0.952 0.781 0.295 0.819 0.038\n 0.34 0.476 0.08 0.784 0.556 0.282 0.699 0.954 0.5 0.332 0.213 0.618\n 0.92 0.776 0.147 0.749 0.597 0.191 0.957 0.47 0.324 0.352 0.837 0.263\n 0.536 0.48 0.997 0.417 0.08 0.464 0.886 0.019 0.307 0.164 0.36 0.638\n 0.46 0.803 0.139 0.575]]\nTraceback (most recent call last):\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/urllib3/connection.py\", line 160, in _new_conn\n (self._dns_host, self.port), self.timeout, **extra_kw)\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/urllib3/util/connection.py\", line 80, in create_connection\n raise err\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/urllib3/util/connection.py\", line 70, in create_connection\n sock.connect(sa)\nConnectionRefusedError: [Errno 61] Connection refused\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 603, in urlopen\n chunked=chunked)\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 355, in _make_request\n conn.request(method, url, **httplib_request_kw)\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/http/client.py\", line 1244, in request\n self._send_request(method, url, body, headers, encode_chunked)\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/http/client.py\", line 1290, in _send_request\n self.endheaders(body, encode_chunked=encode_chunked)\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/http/client.py\", line 1239, in endheaders\n self._send_output(message_body, encode_chunked=encode_chunked)\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/http/client.py\", line 1026, in _send_output\n self.send(msg)\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/http/client.py\", line 966, in send\n self.connect()\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/urllib3/connection.py\", line 183, in connect\n conn = self._new_conn()\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/urllib3/connection.py\", line 169, in _new_conn\n self, \"Failed to establish a new connection: %s\" % e)\nurllib3.exceptions.NewConnectionError: <urllib3.connection.HTTPConnection object at 0x1232a2050>: Failed to establish a new connection: [Errno 61] Connection refused\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/requests/adapters.py\", line 449, in send\n timeout=timeout\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 641, in urlopen\n _stacktrace=sys.exc_info()[2])\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/urllib3/util/retry.py\", line 399, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPConnectionPool(host='0.0.0.0', port=5000): Max retries exceeded with url: /predict (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x1232a2050>: Failed to establish a new connection: [Errno 61] Connection refused'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/bin/seldon-core-tester\", line 10, in <module>\n sys.exit(main())\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/seldon_core/microservice_tester.py\", line 258, in main\n run_predict(args)\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/seldon_core/microservice_tester.py\", line 225, in run_predict\n response = sc.microservice(data=batch, transport=transport, method=\"predict\", payload_type=payload_type, names=feature_names)\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/seldon_core/seldon_client.py\", line 395, in microservice\n return microservice_api_rest_seldon_message(**k)\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/seldon_core/seldon_client.py\", line 534, in microservice_api_rest_seldon_message\n data={\"json\": json.dumps(payload)})\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/requests/api.py\", line 116, in post\n return request('post', url, data=data, json=json, **kwargs)\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/requests/api.py\", line 60, in request\n return session.request(method=method, url=url, **kwargs)\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/requests/sessions.py\", line 533, in request\n resp = self.send(prep, **send_kwargs)\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/requests/sessions.py\", line 646, in send\n r = adapter.send(request, **kwargs)\n File \"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/requests/adapters.py\", line 516, in send\n raise ConnectionError(e, request=request)\nrequests.exceptions.ConnectionError: HTTPConnectionPool(host='0.0.0.0', port=5000): Max retries exceeded with url: /predict (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x1232a2050>: Failed to establish a new connection: [Errno 61] Connection refused'))\n"
],
[
"!docker rm mnist_predictor --force",
"Error: No such container: mnist_predictor\r\n"
]
],
[
[
"## Test using Minikube\n\n**Due to a [minikube/s2i issue](https://github.com/SeldonIO/seldon-core/issues/253) you will need [s2i >= 1.1.13](https://github.com/openshift/source-to-image/releases/tag/v1.1.13)**",
"_____no_output_____"
]
],
[
[
"!minikube start --memory 4096",
"😄 minikube v1.2.0 on darwin (amd64)\n🔥 Creating virtualbox VM (CPUs=2, Memory=4096MB, Disk=20000MB) ...\n🐳 Configuring environment for Kubernetes v1.15.0 on Docker 18.09.6\n🚜 Pulling images ...\n🚀 Launching Kubernetes ... \n⌛ Verifying: apiserver proxy etcd scheduler controller dns\n🏄 Done! kubectl is now configured to use \"minikube\"\n"
]
],
[
[
"## Setup Seldon Core\n\nUse the setup notebook to [Setup Cluster](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Setup-Cluster) with [Ambassador Ingress](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Ambassador) and [Install Seldon Core](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Install-Seldon-Core). Instructions [also online](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html).",
"_____no_output_____"
]
],
[
[
"!eval $(minikube docker-env) && s2i build . seldonio/seldon-core-s2i-python37-ubi8:1.7.0-dev chainer-mnist:0.1",
"---> Installing application source...\n---> Installing dependencies ...\nLooking in links: /whl\nCollecting chainer==6.2.0 (from -r requirements.txt (line 1))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/2c/5a/86c50a0119a560a39d782c4cdd9b72927c090cc2e3f70336e01b19a5f97a/chainer-6.2.0.tar.gz (873kB)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.6/site-packages (from chainer==6.2.0->-r requirements.txt (line 1)) (41.0.1)\nCollecting typing<=3.6.6 (from chainer==6.2.0->-r requirements.txt (line 1))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/4a/bd/eee1157fc2d8514970b345d69cb9975dcd1e42cd7e61146ed841f6e68309/typing-3.6.6-py3-none-any.whl\nCollecting typing_extensions<=3.6.6 (from chainer==6.2.0->-r requirements.txt (line 1))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/62/4f/392a1fa2873e646f5990eb6f956e662d8a235ab474450c72487745f67276/typing_extensions-3.6.6-py3-none-any.whl\nCollecting filelock (from chainer==6.2.0->-r requirements.txt (line 1))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/93/83/71a2ee6158bb9f39a90c0dea1637f81d5eef866e188e1971a1b1ab01a35a/filelock-3.0.12-py3-none-any.whl\nRequirement already satisfied: numpy>=1.9.0 in /usr/local/lib/python3.6/site-packages (from chainer==6.2.0->-r requirements.txt (line 1)) (1.16.4)\nCollecting protobuf<3.8.0rc1,>=3.0.0 (from chainer==6.2.0->-r requirements.txt (line 1))\n WARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nDownloading https://files.pythonhosted.org/packages/5a/aa/a858df367b464f5e9452e1c538aa47754d467023850c00b000287750fa77/protobuf-3.7.1-cp36-cp36m-manylinux1_x86_64.whl (1.2MB)\nRequirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.6/site-packages (from chainer==6.2.0->-r requirements.txt (line 1)) (1.12.0)\nBuilding wheels for collected packages: chainer\nBuilding wheel for chainer (setup.py): started\nBuilding wheel for chainer (setup.py): finished with status 'done'\nStored in directory: /root/.cache/pip/wheels/2e/be/c5/6ee506abcaa4a53106f7d7671bbee8b4e5243bc562a9d32ad1\nSuccessfully built chainer\nInstalling collected packages: typing, typing-extensions, filelock, protobuf, chainer\nFound existing installation: protobuf 3.8.0\nUninstalling protobuf-3.8.0:\nSuccessfully uninstalled protobuf-3.8.0\nSuccessfully installed chainer-6.2.0 filelock-3.0.12 protobuf-3.7.1 typing-3.6.6 typing-extensions-3.6.6\nWARNING: Url '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nWARNING: You are using pip version 19.1, however version 19.2.2 is available.\nYou should consider upgrading via the 'pip install --upgrade pip' command.\nBuild completed successfully\n"
],
[
"!kubectl create -f chainer_mnist_deployment.json",
"seldondeployment.machinelearning.seldon.io/seldon-deployment-example created\r\n"
],
[
"!kubectl rollout status deploy/chainer-mnist-deployment-chainer-mnist-predictor-76478b2",
"Waiting for deployment \"chainer-mnist-deployment-chainer-mnist-predictor-76478b2\" rollout to finish: 0 of 1 updated replicas are available...\ndeployment \"chainer-mnist-deployment-chainer-mnist-predictor-76478b2\" successfully rolled out\n"
],
[
"!seldon-core-api-tester contract.json `minikube ip` `kubectl get svc ambassador -o jsonpath='{.spec.ports[0].nodePort}'` \\\n seldon-deployment-example --namespace default -p",
"/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n/Users/dtaniwaki/.pyenv/versions/3.7.4/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n----------------------------------------\nSENDING NEW REQUEST:\n\n[[0.64 0.213 0.028 0.604 0.586 0.076 0.629 0.568 0.806 0.931 0.266 0.098\n 0.526 0.336 0.569 0.965 0.157 0.401 0.15 0.405 0.594 0.21 0.699 0.085\n 0.314 0.467 0.303 0.384 0.788 0.135 0.349 0.467 0.025 0.525 0.767 0.819\n 0.275 0.212 0.784 0.448 0.808 0.582 0.939 0.165 0.761 0.272 0.332 0.321\n 0.005 0.921 0.285 0.181 0.161 0.948 0.148 0.788 0.664 0.65 0.795 0.548\n 0.754 0.407 0.057 0.429 0.569 0.538 0.295 0.4 0.581 0.569 0.299 0.066\n 0.456 0.118 0.983 0.93 0.316 0.865 0.492 0.048 0.505 0.573 0.595 0.13\n 0.595 0.595 0.474 0.334 0.708 0.25 0.183 0.391 0.268 0.252 0.366 0.029\n 0.676 0.869 0.12 0.737 0.502 0.868 0.846 0.891 0.578 0.598 0.984 0.543\n 0.515 0.081 0.998 0.976 0.611 0.492 0.494 0.985 0.443 0.246 0.252 0.871\n 0.615 0.885 0.903 0.254 0.651 0.412 0.645 0.608 0.921 0.5 0.18 0.845\n 0.91 0.601 0.782 0.27 0.643 0.671 0.273 0.37 0.454 0.08 0.854 0.439\n 0.912 0.709 0.703 0.817 0.381 0.963 0.057 0.015 0.126 0.686 0.284 0.463\n 0.231 0.332 0.932 0.804 0.538 0.039 0.12 0.992 0.436 0.791 0.261 0.842\n 0.901 0.208 0.578 0.423 0.657 0.293 0.633 0.45 0.609 0.715 0.149 0.244\n 0.026 0.332 0.525 0.157 0.749 0.88 0.713 0.405 0.473 0.01 0.038 0.807\n 0.934 0.157 0.141 0.155 0.124 0.781 0.738 0.018 0.42 0.635 0.867 0.925\n 0.398 0.505 0.695 0.429 0.174 0.327 0.123 0.967 0.378 0.224 0.393 0.053\n 0.344 0.731 0.02 0.848 0.079 0.814 0.023 0.087 0.578 0.642 0.18 0.563\n 0.276 0.491 0.021 0.719 0.85 0.156 0.031 0.506 0.271 0.095 0.186 0.002\n 0.799 0.138 0.734 0.925 0.881 0.187 0.559 0.946 0.826 0.488 0.744 0.322\n 0.333 0.322 0.665 0.032 0.663 0.754 0.495 0.569 0.917 0.167 0.168 0.409\n 0.369 0.363 0.23 0.961 0.201 0.463 0.565 0.834 0.431 0.848 0.742 0.436\n 0.061 0.656 0.3 0.128 0.485 0.78 0.617 0.082 0.396 0.416 0.673 0.961\n 0.727 0.986 0.222 0.909 0.898 0.144 0.639 0.046 0.101 0.546 0.782 0.069\n 0.672 0.824 0.861 0.981 0.003 0.591 0.303 0.384 0.67 0.7 0.834 0.475\n 0.932 0.949 0.938 0.945 0.368 0.522 0.833 0.045 0.452 0.068 0.165 0.569\n 0.44 0.702 0.727 0.069 0.686 0.262 0.891 0.547 0.994 0.454 0.947 0.364\n 0.154 0.322 0.571 0.19 0.476 0.925 0.871 0.605 0.442 0.585 0.544 0.316\n 0.915 0.253 0.973 0.501 0.402 0.96 0.206 0.501 0.37 0.463 0.904 0.981\n 0.969 0.877 0.724 0.5 0.447 0.499 0.443 0.349 0.79 0.051 0.384 0.27\n 0.094 0.774 0.742 0.16 0.517 0.266 0.908 0.796 0.862 0.987 0.939 0.909\n 0.962 0.587 0.964 0.159 0.029 0.952 0.416 0.72 0.346 0.257 0.152 0.233\n 0.862 0.457 0.153 0.076 0.105 0.634 0.652 0.435 0.757 0.985 0.487 0.114\n 0.95 0.217 0.877 0.483 0.302 0.929 0.856 0.768 0.223 0.006 0.841 0.565\n 0.611 0.407 0.71 0.588 0.654 0.197 0.506 0.938 0.779 0.387 0.007 0.482\n 0.523 0.993 0.671 0.044 0.497 0.71 0.418 0.06 0.114 0.082 0.811 0.083\n 0.773 0.134 0.87 0.414 0.787 0.972 0.132 0.047 0.593 0.502 0.15 0.042\n 0.363 0.311 0.17 0.895 0.569 0.774 0.006 0.408 0.92 0.753 0.543 0.279\n 0.911 0.314 0.195 0.538 0.977 0.606 0.954 0.378 0.397 0.261 0.085 0.656\n 0.978 0.598 0.216 0.832 0.105 0.958 0.185 0.81 0.444 0.308 0.013 0.176\n 0.603 0.383 0.671 0.436 0.981 0.072 0.713 0.349 0.962 0.055 0.315 0.417\n 0.052 0.076 0.198 0.786 0.397 0.757 0.145 0.539 0.671 0.583 0.42 0.575\n 0.563 0.286 0.788 0.481 0.403 0.85 0.864 0.945 0.427 0.511 0.268 0.091\n 0.049 0.611 0.137 0.58 0.281 0.057 0.453 0.461 0.895 0.701 0.662 0.599\n 0.967 0.562 0.295 0.6 0.742 0.909 0.69 0.383 0.553 0.078 0.949 0.109\n 0.771 0.083 0.712 0.514 0.549 0.403 0.575 0.494 0.31 0.307 0.091 0.874\n 0.591 0.315 0.199 0.372 0.131 0.905 0.32 0.284 0.516 0.055 0.832 0.042\n 0.927 0.667 0.273 0.426 0.054 0.799 0.356 0.564 0.223 0.772 0.79 0.628\n 0.893 0.512 0.523 0.518 0.48 0.869 0.49 0.416 0.775 0.864 0.921 0.968\n 0.109 0.812 0.943 0.042 0.179 0.943 0.324 0.079 0.017 0.226 0.848 0.803\n 0.873 0.834 0.696 0.582 0.125 0.042 0.917 0.909 0.491 0.5 0.101 0.779\n 0.65 0.424 0.94 0.582 0.706 0.935 0.286 0.057 0.544 0.198 0.893 0.537\n 0.405 0.91 0.908 0.297 0.288 0.368 0.654 0.347 0.002 0.677 0.32 0.691\n 0.17 0.133 0.586 0.857 0.001 0.639 0.223 0.164 0.689 0.97 0.913 0.947\n 0.962 0.44 0.201 0.343 0.493 0.662 0.728 0.295 0.445 0.739 0.764 0.955\n 0.206 0.298 0.996 0.835 0.983 0.033 0.801 0.284 0.621 0.941 0.293 0.865\n 0.158 0.788 0.681 0.613 0.705 0.753 0.006 0.175 0.414 0.299 0.116 0.67\n 0.66 0.845 0.905 0.369 0.11 0.841 0.717 0.348 0.537 0.116 0.024 0.575\n 0.211 0.427 0.84 0.447 0.056 0.427 0.39 0.424 0.48 0.738 0.698 0.377\n 0.143 0.242 0.877 0.238 0.188 0.786 0.965 0.112 0.952 0.679 0.916 0.13\n 0.882 0.353 0.433 0.608 0.297 0.558 0.663 0.646 0.185 0.91 0.131 0.217\n 0.549 0.759 0.087 0.96 0.11 0.613 0.643 0.218 0.126 0.535 0.751 0.097\n 0.681 0.782 0.367 0.197 0.05 0.742 0.623 0.763 0.625 0.317 0.364 0.879\n 0.445 0.751 0.87 0.727 0.879 0.035 0.412 0.907 0.895 0.923 0.373 0.22\n 0.21 0.176 0.182 0.821]]\n"
],
[
"!minikube delete",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
d04d9b7066c78b19c9078dc572a2b6fec4eddd78 | 11,418 | ipynb | Jupyter Notebook | src/NN_13100+15112.ipynb | VoyagerIII/DigiX-- | f753d994be30e2174cd152ba08722e5b030f1dcf | [
"MIT"
] | 2 | 2020-04-14T07:11:50.000Z | 2020-05-14T03:46:59.000Z | src/NN_13100+15112.ipynb | VoyagerIII/DigiX-Human-Age-group-Classification | f753d994be30e2174cd152ba08722e5b030f1dcf | [
"MIT"
] | null | null | null | src/NN_13100+15112.ipynb | VoyagerIII/DigiX-Human-Age-group-Classification | f753d994be30e2174cd152ba08722e5b030f1dcf | [
"MIT"
] | null | null | null | 29.053435 | 924 | 0.547381 | [
[
[
"# coding=utf-8\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"from keras.utils import np_utils\nfrom keras.models import Sequential,load_model,save_model\nfrom keras.layers import Dense, Dropout, Activation,LeakyReLU\nfrom keras.optimizers import SGD, Adam\nfrom keras.callbacks import EarlyStopping,ModelCheckpoint\nfrom keras import backend as K\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import roc_auc_score,accuracy_score\nfrom scipy import sparse\nimport gc\nfrom time import strftime, localtime",
"_____no_output_____"
],
[
"# 打印当前时间\ndef printTime():\n print(strftime(\"%Y-%m-%d %H:%M:%S\", localtime()))\n return",
"_____no_output_____"
],
[
"printTime()",
"_____no_output_____"
],
[
"csr_trainData0 = sparse.load_npz(r'../trainTestData/trainData13100.npz')\ncsr_trainData0.shape\n\ncsr_trainData1 = sparse.load_npz(r'../trainTestData/trainData15112.npz')\ncsr_trainData1.shape\n\ncsr_trainData = sparse.hstack((csr_trainData0,csr_trainData1),format='csr')\ndel csr_trainData0,csr_trainData1\ngc.collect()",
"_____no_output_____"
],
[
"age_train = pd.read_csv(r'../data/age_train.csv',header=None)\nlabel = age_train[1].values\nprint(label.shape)",
"_____no_output_____"
],
[
"import time\n\nseed = 7\nnp.random.seed(seed)",
"_____no_output_____"
],
[
"kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)",
"_____no_output_____"
],
[
"model_filePath = r'../model/model28212_NN_'\ncurrK = 0\nval_index_list, score = [], []\nval_probability = np.zeros((2010000,7))",
"_____no_output_____"
],
[
"printTime()\nfor train_index, val_index in kfold.split(csr_trainData,label):\n K.clear_session()\n trainData, trainLabel, valData, valLabel = csr_trainData[train_index,:], label[train_index], csr_trainData[val_index,:] , label[val_index] \n trainLabel,valLabel = np_utils.to_categorical(trainLabel,num_classes=7),np_utils.to_categorical(valLabel,num_classes=7)\n print('----------------------------------------------------------------------------------------------------------------------------------')\n print(currK,'split Done!\\n')\n \n # 全连接模型\n model = Sequential()\n model.add(Dense(4000, activation='tanh', input_shape=(csr_trainData.shape[1],)))\n model.add(Dense(2000, activation='relu'))\n model.add(Dense(1000, activation='sigmoid'))\n model.add(Dense(7, activation='softmax'))\n #损失函数使用交叉熵\n adam = Adam(lr=0.0003)\n model.compile(loss='categorical_crossentropy',\n optimizer = adam,\n metrics=['accuracy'])\n #模型训练\n batch_size = 1024\n epochs = 100\n early_stopping = EarlyStopping(monitor='val_loss', patience=5, verbose=2)\n bestModel = ModelCheckpoint(model_filePath + str(currK) + r'.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)\n hist = model.fit(trainData, trainLabel,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n shuffle=True,\n validation_data=(valData,valLabel),\n callbacks=[early_stopping,bestModel],\n ) \n print('\\n',currK,'train Done!')\n printTime()\n \n K.clear_session()\n model = load_model(model_filePath + str(currK) + r'.h5')\n probability = model.predict(valData,batch_size=1024)\n val_probability[val_index,:] = probability\n \n score.append(np.max(hist.history['val_acc']))\n y_label = label[val_index]\n val_label = np.argmax(probability,axis=1) \n print(currK,'val_acc:',accuracy_score(val_label,y_label),'\\n\\n')\n \n currK += 1\n K.clear_session()\n del trainData, valData, trainLabel,valLabel,model\n print('----------------------------------------------------------------------------------------------------------------------------------')\nprint('mean val_acc:', np.mean(score))\nprintTime()",
"_____no_output_____"
],
[
"accuracy_score(np.argmax(val_probability,axis=1) ,label)",
"_____no_output_____"
],
[
"del csr_trainData",
"_____no_output_____"
],
[
"import gc \ngc.collect()",
"_____no_output_____"
]
],
[
[
"# 验证集",
"_____no_output_____"
]
],
[
[
"val_probability = pd.DataFrame(val_probability)\nprint(val_probability.shape)\nprint(val_probability.head())",
"_____no_output_____"
],
[
"val_probability.drop(labels=[0],axis=1,inplace=True)",
"_____no_output_____"
],
[
"val_probability.to_csv(r'../processed/val_probability_28212.csv',header=None,index=False)",
"_____no_output_____"
]
],
[
[
"# 测试集",
"_____no_output_____"
]
],
[
[
"import os",
"_____no_output_____"
],
[
"model_file = r'../model/model28212_NN_'",
"_____no_output_____"
],
[
"csr_testData0 = sparse.load_npz(r'../trainTestData/trainData13100.npz')\ncsr_testData0.shape\n\ncsr_testData1 = sparse.load_npz(r'../trainTestData/trainData15112.npz')\ncsr_testData1.shape\n\ncsr_testData = sparse.hstack((csr_testData0, csr_testData1),format='csr')\ndel csr_trainData0,csr_trainData1\ngc.collect()",
"_____no_output_____"
],
[
"age_test = pd.read_csv(r'../data/age_test.csv',header=None,usecols=[0])",
"_____no_output_____"
],
[
"printTime()\nproflag = True\nmodel_Num = 0\nfor i in list(range(10)):\n model = load_model(model_file + str(i) + '.h5')\n if proflag==True:\n probability = model.predict(csr_testData,batch_size=1024,verbose=1)\n proflag = False\n else:\n probability += model.predict(csr_testData,batch_size=1024,verbose=1)\n model_Num += 1\n print(model_Num)\n K.clear_session()\n del model\nprintTime()",
"_____no_output_____"
],
[
"model_Num",
"_____no_output_____"
],
[
"probability /= model_Num\nage = np.argmax(probability,axis=1)",
"_____no_output_____"
],
[
"age_test = pd.read_csv(r'../data/age_test.csv',header=None,usecols=[0])\nage_test = age_test.values\ntype(age_test)",
"_____no_output_____"
],
[
"print(probability.shape)\npro = np.column_stack((age_test,probability))\npro = pd.DataFrame(pro)\npro.drop(labels=[0,1],axis=1,inplace=True)\nprint(pro.shape)\npro.to_csv(r'../processed/test_probability_28212.csv',index=False,header=False)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
Subsets and Splits