hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
list | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
list | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
list | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
list | cell_types
list | cell_type_groups
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ec62a595fd6ef0e264eec1cd0ae6f1306a595555 | 240,805 | ipynb | Jupyter Notebook | Exam2_Answers.ipynb | Aidapinacho/AIDA_IntroToBioinformatics | b587226db0d196e9c5985f29beb83473c33f5c20 | [
"MIT"
]
| null | null | null | Exam2_Answers.ipynb | Aidapinacho/AIDA_IntroToBioinformatics | b587226db0d196e9c5985f29beb83473c33f5c20 | [
"MIT"
]
| null | null | null | Exam2_Answers.ipynb | Aidapinacho/AIDA_IntroToBioinformatics | b587226db0d196e9c5985f29beb83473c33f5c20 | [
"MIT"
]
| null | null | null | 54.174353 | 427 | 0.584996 | [
[
[
"**Exam 2 AIDA PINACHO** ..",
"_____no_output_____"
]
],
[
[
"%load_ext sql\n#%config SqlMagic.autocommit=False\n%sql mysql+pymysql://root:[email protected]:3306/mysql\n",
"The sql extension is already loaded. To reload it, use:\n %reload_ext sql\n"
]
],
[
[
"## Problem 1: Controls\n\nWrite a Python script that proves that the lines of data in Germplasm.tsv, and LocusGene are in the same sequence, based on the AGI Locus Code (ATxGxxxxxx). (hint: This will help you decide how to load the data into the database)",
"_____no_output_____"
]
],
[
[
"#First we need to import the files we are going to use and then open them and read their lines\n\nimport re\n\nlocus_file = open (\"LocusGene.tsv\", \"r\")\ngerm_file = open (\"Germplasm.tsv\", \"r\")\n\nlocus_file = locus_file.readlines()\ngerm_file = germ_file.readlines() \n\n#we can now check if the size of both files is the same\n\nlocus_size = len(locus_file)\ngerm_size = len(germ_file)\n\nif locus_size == germ_size:\n print(\"both files have the same size\")\nelse:\n print(\"the files don't have the same size\")\n \n\n#Now, we know that both files have the same size but we need to know if the AGI Locus code in both of them match. For this, we can create a loop in which it takes all the AGI LOcus Code from both files and copy them into lists in which we can check if the match or not.\n\nlocus = []\ngerm = []\n\nfor i in range (0,locus_size):\n matchObj = re.search(r'AT\\dG\\d+', locus_file[i])\n matchObj_2 = re.search(r'AT\\dG\\d+', germ_file[i])\n if matchObj:\n Locus_AGI = matchObj.group()\n locus.append(Locus_AGI) \n \n Germ_AGI = matchObj_2.group()\n germ.append(Germ_AGI)\n \n else:\n print(\"Done\")\n\nif locus == germ:\n print(\"Both files are the same\")\nelse:\n print (\"The files are not the same\")\n \nprint (locus)\nprint (\"\")\nprint(germ) ",
"both files have the same size\nDone\nBoth files are the same\n['AT1G01040', 'AT1G01060', 'AT1G01140', 'AT1G01220', 'AT2G03720', 'AT2G03800', 'AT2G04240', 'AT2G05210', 'AT3G02130', 'AT3G02140', 'AT3G02230', 'AT3G02260', 'AT3G02310', 'AT3G02680', 'AT3G02850', 'AT3G02870', 'AT3G03260', 'AT4G14790', 'AT4G15210', 'AT4G15560', 'AT4G15570', 'AT4G15802', 'AT4G15880', 'AT4G16420', 'AT4G16480', 'AT5G10480', 'AT5G10510', 'AT5G11110', 'AT5G11260', 'AT5G11510', 'AT5G12200', 'AT5G13290']\n\n['AT1G01040', 'AT1G01060', 'AT1G01140', 'AT1G01220', 'AT2G03720', 'AT2G03800', 'AT2G04240', 'AT2G05210', 'AT3G02130', 'AT3G02140', 'AT3G02230', 'AT3G02260', 'AT3G02310', 'AT3G02680', 'AT3G02850', 'AT3G02870', 'AT3G03260', 'AT4G14790', 'AT4G15210', 'AT4G15560', 'AT4G15570', 'AT4G15802', 'AT4G15880', 'AT4G16420', 'AT4G16480', 'AT5G10480', 'AT5G10510', 'AT5G11110', 'AT5G11260', 'AT5G11510', 'AT5G12200', 'AT5G13290']\n"
]
],
[
[
"## Problem 2: Design and create the database. \n* It should have two tables - one for each of the two data files.\n* The two tables should be linked in a 1:1 relationship\n* you may use either sqlMagic or pymysql to build the database\n",
"_____no_output_____"
]
],
[
[
"%load_ext sql\n#%config SqlMagic.autocommit=False\n%sql mysql+pymysql://root:[email protected]:3306/mysql\n",
"The sql extension is already loaded. To reload it, use:\n %reload_ext sql\n"
],
[
"%sql CREATE DATABASE Exam_2;",
" * mysql+pymysql://root:***@127.0.0.1:3306/mysql\n1 rows affected.\n"
],
[
"%sql show databases;",
" * mysql+pymysql://root:***@127.0.0.1:3306/mysql\n6 rows affected.\n"
],
[
"%sql use Exam_2;",
" * mysql+pymysql://root:***@127.0.0.1:3306/mysql\n0 rows affected.\n"
],
[
"%sql show tables;",
" * mysql+pymysql://root:***@127.0.0.1:3306/mysql\n0 rows affected.\n"
],
[
"#%sql drop table locus_table;\n#%sql drop table germ_table;\n#In case we need to start from the beginning\n\n%sql CREATE TABLE locus_table (id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, locus VARCHAR(20) NOT NULL, gene VARCHAR(30) NOT NULL, Protein_length INTEGER NOT NULL);\n%sql CREATE TABLE germ_table (id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, locus VARCHAR(20) NOT NULL, germplasm VARCHAR(30) NOT NULL, phenotype VARCHAR(500) NOT NULL, pubmed_link INTEGER NOT NULL);\n\n#As we want the two tables linked 1:1, we will use the same PRIMARY KEY as the conection being it the same for both of the tables\n\n%sql DESCRIBE locus_table;\n%sql DESCRIBE germ_table;",
" * mysql+pymysql://root:***@127.0.0.1:3306/mysql\n0 rows affected.\n * mysql+pymysql://root:***@127.0.0.1:3306/mysql\n0 rows affected.\n * mysql+pymysql://root:***@127.0.0.1:3306/mysql\n0 rows affected.\n * mysql+pymysql://root:***@127.0.0.1:3306/mysql\n0 rows affected.\n * mysql+pymysql://root:***@127.0.0.1:3306/mysql\n4 rows affected.\n * mysql+pymysql://root:***@127.0.0.1:3306/mysql\n5 rows affected.\n"
],
[
"%sql SELECT * from locus_table;",
" * mysql+pymysql://root:***@127.0.0.1:3306/mysql\n0 rows affected.\n"
],
[
"%sql SELECT * from germ_table",
" * mysql+pymysql://root:***@127.0.0.1:3306/mysql\n0 rows affected.\n"
]
],
[
[
"## Problem 3: Fill the database\nUsing pymysql, create a Python script that reads the data from these files, and fills the database. There are a variety of strategies to accomplish this. I will give all strategies equal credit - do whichever one you are most confident with.",
"_____no_output_____"
]
],
[
[
"import pymysql.cursors\n\nconnection = pymysql.connect(host='localhost',\n user='root',\n password='root',\n db='Exam_2',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n\nconnection.autocommit(True)\n\ntry:\n with connection.cursor() as cursor:\n for i in range (1,len(locus_file)):\n locus_filled = locus_file[i].split('\\t')\n \n locus = locus_filled [0]\n gene = locus_filled[1]\n Protein_length = locus_filled[2]\n \n sql = \"INSERT INTO locus_table(locus, gene, Protein_length) values ('{}', '{}', '{}')\".format(locus, gene, Protein_length)\n cursor.execute(sql)\n \n \n germ_filled = germ_file[i].split('\\t')\n #in this table we begin with [1] because both tables have the column \"locus\" and it is the same in both\n germplasm = germ_filled [1]\n phenotype = germ_filled [2]\n pubmed_link = germ_filled [3]\n \n sql = \"INSERT INTO germ_table(locus, germplasm, phenotype, pubmed_link) values ('{}', '{}', '{}', '{}')\".format(locus, germplasm, phenotype, pubmed_link)\n cursor.execute(sql)\n \n connection.commit()\n \nfinally:\n print(\"Everything correct\")\n connection.close()\n\n \n#In this exercise, in order to read the files and fill the tables created, we can use pymysql to extract the data from the files using \"for\"and then tell the program to fill the tables with the data from the files using \"try-with\" giving the names of the columns and the format we need to fill\n\n",
"Everything correct\n"
],
[
"# in order to see if the tables are well filled\n%sql SELECT * from locus_table",
" * mysql+pymysql://root:***@127.0.0.1:3306/mysql\n160 rows affected.\n"
],
[
"%sql SELECT * from germ_table",
" * mysql+pymysql://root:***@127.0.0.1:3306/mysql\n128 rows affected.\n"
]
],
[
[
"## Problem 4: Create reports, written to a file\n\n1. Create a report that shows the full, joined, content of the two database tables (including a header line)\n\n2. Create a joined report that only includes the Genes SKOR and MAA3\n\n3. Create a report that counts the number of entries for each Chromosome (AT1Gxxxxxx to AT5Gxxxxxxx)\n\n4. Create a report that shows the average protein length for the genes on each Chromosome (AT1Gxxxxxx to AT5Gxxxxxxx)\n\nWhen creating reports 2 and 3, remember the \"Don't Repeat Yourself\" rule! \n\nAll reports should be written to **the same file**. You may name the file anything you wish.",
"_____no_output_____"
]
],
[
[
"import pymysql.cursors\nimport csv\n\nconnection = pymysql.connect(host='localhost',\n user='root',\n password='root',\n db='Exam_2',\n charset='utf8mb4', \n cursorclass=pymysql.cursors.DictCursor)\n\nconnection.autocommit(True)\n\ntry:\n with connection.cursor() as cursor:\n sql = \"SELECT * FROM germ_table, locus_table WHERE germ_table.locus = locus_table.locus\"\n cursor.execute(sql)\n results = cursor.fetchall()\n with open ('AIDA_FINAL_REPORT.csv', 'w') as csvfile:\n answerwriter = csv.writer(csvfile, delimiter=\"\\t\", quotechar='\"')\n answerwriter.writerow([\"Problem 1\"])\n answerwriter.writerow([\"Locus\", \"Germplasm\", \"Phenotype\", \"pubmed_link\", \"Gene\", \"Protein_length\"])\n for line in results:\n answerwriter.writerow([line['locus'], line['germplasm'], line['phenotype'], line['pubmed_link'], line['gene'], line['Protein_length']])\n\n connection.commit() \n \nfinally:\n print(\"Problem 1 done\")\n connection.close()\n \n\nl = open('AIDA_FINAL_REPORT.csv', 'r') ## In order to see the content of the file every time we add some information. \nfor line in l.readlines():\n print(line)\n ",
"Problem 1 done\nProblem 1\n\nLocus\tGermplasm\tPhenotype\tpubmed_link\tGene\tProtein_length\n\nAT1G01040\tCS3828\tIncreased abundance of miRNA precursors.\t17369351\tDCL1\t332\n\nAT1G01040\tCS3828\tIncreased abundance of miRNA precursors.\t17369351\tDCL1\t332\n\nAT1G01040\tCS3828\tIncreased abundance of miRNA precursors.\t17369351\tDCL1\t332\n\nAT1G01040\tCS3828\tIncreased abundance of miRNA precursors.\t17369351\tDCL1\t332\n\nAT1G01060\tlhy-101\tThe mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant.\t16891401\tLHY\t290\n\nAT1G01060\tlhy-101\tThe mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant.\t16891401\tLHY\t290\n\nAT1G01060\tlhy-101\tThe mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant.\t16891401\tLHY\t290\n\nAT1G01060\tlhy-101\tThe mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant.\t16891401\tLHY\t290\n\nAT1G01140\tSALK_058629\thypersensitive to low potassium media\t17486125\tCIPK9\t223\n\nAT1G01140\tSALK_058629\thypersensitive to low potassium media\t17486125\tCIPK9\t223\n\nAT1G01140\tSALK_058629\thypersensitive to low potassium media\t17486125\tCIPK9\t223\n\nAT1G01140\tSALK_058629\thypersensitive to low potassium media\t17486125\tCIPK9\t223\n\nAT1G01220\tSALK_012400C\tfkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected.\t18199744\tFKGP\t190\n\nAT1G01220\tSALK_012400C\tfkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected.\t18199744\tFKGP\t190\n\nAT1G01220\tSALK_012400C\tfkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected.\t18199744\tFKGP\t190\n\nAT1G01220\tSALK_012400C\tfkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected.\t18199744\tFKGP\t190\n\nAT2G03720\tSALK_042433\tMultiple straight hairs\t16367956\tMRH6\t189\n\nAT2G03720\tSALK_042433\tMultiple straight hairs\t16367956\tMRH6\t189\n\nAT2G03720\tSALK_042433\tMultiple straight hairs\t16367956\tMRH6\t189\n\nAT2G03720\tSALK_042433\tMultiple straight hairs\t16367956\tMRH6\t189\n\nAT2G03800\tgek1-1\tEthanol hypersensitivity.\t15215505\tGEK1\t196\n\nAT2G03800\tgek1-1\tEthanol hypersensitivity.\t15215505\tGEK1\t196\n\nAT2G03800\tgek1-1\tEthanol hypersensitivity.\t15215505\tGEK1\t196\n\nAT2G03800\tgek1-1\tEthanol hypersensitivity.\t15215505\tGEK1\t196\n\nAT2G04240\txerico\tResistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype.\t17933900\tXERICO\t256\n\nAT2G04240\txerico\tResistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype.\t17933900\tXERICO\t256\n\nAT2G04240\txerico\tResistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype.\t17933900\tXERICO\t256\n\nAT2G04240\txerico\tResistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype.\t17933900\tXERICO\t256\n\nAT2G05210\tpot1-1\tNo visible phenotype.\t17627276\tPOT1A\t221\n\nAT2G05210\tpot1-1\tNo visible phenotype.\t17627276\tPOT1A\t221\n\nAT2G05210\tpot1-1\tNo visible phenotype.\t17627276\tPOT1A\t221\n\nAT2G05210\tpot1-1\tNo visible phenotype.\t17627276\tPOT1A\t221\n\nAT3G02130\trpk2-2\tThe homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants.\t17419837\tRPK2\t284\n\nAT3G02130\trpk2-2\tThe homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants.\t17419837\tRPK2\t284\n\nAT3G02130\trpk2-2\tThe homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants.\t17419837\tRPK2\t284\n\nAT3G02130\trpk2-2\tThe homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants.\t17419837\tRPK2\t284\n\nAT3G02140\tafp4-1\tDecreased germination on high concentrations of glucose and sorbitol.\t18484180\tTMAC2\t300\n\nAT3G02140\tafp4-1\tDecreased germination on high concentrations of glucose and sorbitol.\t18484180\tTMAC2\t300\n\nAT3G02140\tafp4-1\tDecreased germination on high concentrations of glucose and sorbitol.\t18484180\tTMAC2\t300\n\nAT3G02140\tafp4-1\tDecreased germination on high concentrations of glucose and sorbitol.\t18484180\tTMAC2\t300\n\nAT3G02230\trgp1-1\trgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls.\t21478444\tRGP1\t301\n\nAT3G02230\trgp1-1\trgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls.\t21478444\tRGP1\t301\n\nAT3G02230\trgp1-1\trgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls.\t21478444\tRGP1\t301\n\nAT3G02230\trgp1-1\trgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls.\t21478444\tRGP1\t301\n\nAT3G02260\ttir3-1 RGLG1:rglg1 rglg2\tThe triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence.\t17586653\tBIG\t279\n\nAT3G02260\ttir3-1 RGLG1:rglg1 rglg2\tThe triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence.\t17586653\tBIG\t279\n\nAT3G02260\ttir3-1 RGLG1:rglg1 rglg2\tThe triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence.\t17586653\tBIG\t279\n\nAT3G02260\ttir3-1 RGLG1:rglg1 rglg2\tThe triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence.\t17586653\tBIG\t279\n\nAT3G02310\tsep2-1\tNon-described subtle phenotype.\t10821278\tSEP2\t175\n\nAT3G02310\tsep2-1\tNon-described subtle phenotype.\t10821278\tSEP2\t175\n\nAT3G02310\tsep2-1\tNon-described subtle phenotype.\t10821278\tSEP2\t175\n\nAT3G02310\tsep2-1\tNon-described subtle phenotype.\t10821278\tSEP2\t175\n\nAT3G02680\tatnbs1-1\tSignificantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions.\t17672843\tNBS1\t190\n\nAT3G02680\tatnbs1-1\tSignificantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions.\t17672843\tNBS1\t190\n\nAT3G02680\tatnbs1-1\tSignificantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions.\t17672843\tNBS1\t190\n\nAT3G02680\tatnbs1-1\tSignificantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions.\t17672843\tNBS1\t190\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02870\tvtc4-1\tascorbate deficient\t16595667\tVTC4\t311\n\nAT3G02870\tvtc4-1\tascorbate deficient\t16595667\tVTC4\t311\n\nAT3G02870\tvtc4-1\tascorbate deficient\t16595667\tVTC4\t311\n\nAT3G02870\tvtc4-1\tascorbate deficient\t16595667\tVTC4\t311\n\nAT3G03260\thdg8-1\tNo visible phenotype.\t16778018\tHDG8\t194\n\nAT3G03260\thdg8-1\tNo visible phenotype.\t16778018\tHDG8\t194\n\nAT3G03260\thdg8-1\tNo visible phenotype.\t16778018\tHDG8\t194\n\nAT3G03260\thdg8-1\tNo visible phenotype.\t16778018\tHDG8\t194\n\nAT4G14790\tpdd17\tDefective pollen development.\t19237690\tSUV3\t312\n\nAT4G14790\tpdd17\tDefective pollen development.\t19237690\tSUV3\t312\n\nAT4G14790\tpdd17\tDefective pollen development.\t19237690\tSUV3\t312\n\nAT4G14790\tpdd17\tDefective pollen development.\t19237690\tSUV3\t312\n\nAT4G15210\tbmy1-2\tPlants cold-shocked for 6h have an increased starch content compared to wildtype.\t16297066\tBAM5\t313\n\nAT4G15210\tbmy1-2\tPlants cold-shocked for 6h have an increased starch content compared to wildtype.\t16297066\tBAM5\t313\n\nAT4G15210\tbmy1-2\tPlants cold-shocked for 6h have an increased starch content compared to wildtype.\t16297066\tBAM5\t313\n\nAT4G15210\tbmy1-2\tPlants cold-shocked for 6h have an increased starch content compared to wildtype.\t16297066\tBAM5\t313\n\nAT4G15560\tcla1-1\tMutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves.\t10982425\tDXS\t219\n\nAT4G15560\tcla1-1\tMutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves.\t10982425\tDXS\t219\n\nAT4G15560\tcla1-1\tMutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves.\t10982425\tDXS\t219\n\nAT4G15560\tcla1-1\tMutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves.\t10982425\tDXS\t219\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15802\tAthspb-2\tEarly flowering, reduced fertility, aborted seeds.\t20388662\tHSBP\t254\n\nAT4G15802\tAthspb-2\tEarly flowering, reduced fertility, aborted seeds.\t20388662\tHSBP\t254\n\nAT4G15802\tAthspb-2\tEarly flowering, reduced fertility, aborted seeds.\t20388662\tHSBP\t254\n\nAT4G15802\tAthspb-2\tEarly flowering, reduced fertility, aborted seeds.\t20388662\tHSBP\t254\n\nAT4G15880\tesd4-2\tDecreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1.\t17513499\tESD4\t265\n\nAT4G15880\tesd4-2\tDecreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1.\t17513499\tESD4\t265\n\nAT4G15880\tesd4-2\tDecreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1.\t17513499\tESD4\t265\n\nAT4G15880\tesd4-2\tDecreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1.\t17513499\tESD4\t265\n\nAT4G16420\tprz1-1\tAltered response to auxin and cytokinin\t12747832\tADA2B\t279\n\nAT4G16420\tprz1-1\tAltered response to auxin and cytokinin\t12747832\tADA2B\t279\n\nAT4G16420\tprz1-1\tAltered response to auxin and cytokinin\t12747832\tADA2B\t279\n\nAT4G16420\tprz1-1\tAltered response to auxin and cytokinin\t12747832\tADA2B\t279\n\nAT4G16480\tatint4-2\tNo visible phenotype.\t16603666\tINT4\t284\n\nAT4G16480\tatint4-2\tNo visible phenotype.\t16603666\tINT4\t284\n\nAT4G16480\tatint4-2\tNo visible phenotype.\t16603666\tINT4\t284\n\nAT4G16480\tatint4-2\tNo visible phenotype.\t16603666\tINT4\t284\n\nAT5G10480\tpas2-3\tSegregates 25% embryo lethal.\t18799749\tPAS2\t301\n\nAT5G10480\tpas2-3\tSegregates 25% embryo lethal.\t18799749\tPAS2\t301\n\nAT5G10480\tpas2-3\tSegregates 25% embryo lethal.\t18799749\tPAS2\t301\n\nAT5G10480\tpas2-3\tSegregates 25% embryo lethal.\t18799749\tPAS2\t301\n\nAT5G10510\tplt3-1\tShort roots and shortened root meristem.\t17960244\tAIL6\t310\n\nAT5G10510\tplt3-1\tShort roots and shortened root meristem.\t17960244\tAIL6\t310\n\nAT5G10510\tplt3-1\tShort roots and shortened root meristem.\t17960244\tAIL6\t310\n\nAT5G10510\tplt3-1\tShort roots and shortened root meristem.\t17960244\tAIL6\t310\n\nAT5G11110\tkns2\tDefects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile.\t18779216\tSPS2\t232\n\nAT5G11110\tkns2\tDefects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile.\t18779216\tSPS2\t232\n\nAT5G11110\tkns2\tDefects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile.\t18779216\tSPS2\t232\n\nAT5G11110\tkns2\tDefects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile.\t18779216\tSPS2\t232\n\nAT5G11260\thy5-101\tUnder FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller.\t16891401\tHY5\t221\n\nAT5G11260\thy5-101\tUnder FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller.\t16891401\tHY5\t221\n\nAT5G11260\thy5-101\tUnder FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller.\t16891401\tHY5\t221\n\nAT5G11260\thy5-101\tUnder FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller.\t16891401\tHY5\t221\n\nAT5G11510\tmyb3r4-1\tNo visible phenotype.\t17287251\tMYB3R-4\t336\n\nAT5G11510\tmyb3r4-1\tNo visible phenotype.\t17287251\tMYB3R-4\t336\n\nAT5G11510\tmyb3r4-1\tNo visible phenotype.\t17287251\tMYB3R-4\t336\n\nAT5G11510\tmyb3r4-1\tNo visible phenotype.\t17287251\tMYB3R-4\t336\n\nAT5G12200\tpyd2-2\tThe pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source.\t19413687\tPYD2\t310\n\nAT5G12200\tpyd2-2\tThe pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source.\t19413687\tPYD2\t310\n\nAT5G12200\tpyd2-2\tThe pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source.\t19413687\tPYD2\t310\n\nAT5G12200\tpyd2-2\tThe pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source.\t19413687\tPYD2\t310\n\nAT5G13290\tcrn-1\tIncreased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3.\t12345678\tCRN\t189\n\nAT5G13290\tcrn-1\tIncreased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3.\t12345678\tCRN\t189\n\nAT5G13290\tcrn-1\tIncreased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3.\t12345678\tCRN\t189\n\nAT5G13290\tcrn-1\tIncreased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3.\t12345678\tCRN\t189\n\nAT1G01040\tCS3828\tIncreased abundance of miRNA precursors.\t17369351\tDCL1\t332\n\nAT1G01040\tCS3828\tIncreased abundance of miRNA precursors.\t17369351\tDCL1\t332\n\nAT1G01040\tCS3828\tIncreased abundance of miRNA precursors.\t17369351\tDCL1\t332\n\nAT1G01040\tCS3828\tIncreased abundance of miRNA precursors.\t17369351\tDCL1\t332\n\nAT1G01060\tlhy-101\tThe mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant.\t16891401\tLHY\t290\n\nAT1G01060\tlhy-101\tThe mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant.\t16891401\tLHY\t290\n\nAT1G01060\tlhy-101\tThe mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant.\t16891401\tLHY\t290\n\nAT1G01060\tlhy-101\tThe mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant.\t16891401\tLHY\t290\n\nAT1G01140\tSALK_058629\thypersensitive to low potassium media\t17486125\tCIPK9\t223\n\nAT1G01140\tSALK_058629\thypersensitive to low potassium media\t17486125\tCIPK9\t223\n\nAT1G01140\tSALK_058629\thypersensitive to low potassium media\t17486125\tCIPK9\t223\n\nAT1G01140\tSALK_058629\thypersensitive to low potassium media\t17486125\tCIPK9\t223\n\nAT1G01220\tSALK_012400C\tfkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected.\t18199744\tFKGP\t190\n\nAT1G01220\tSALK_012400C\tfkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected.\t18199744\tFKGP\t190\n\nAT1G01220\tSALK_012400C\tfkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected.\t18199744\tFKGP\t190\n\nAT1G01220\tSALK_012400C\tfkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected.\t18199744\tFKGP\t190\n\nAT2G03720\tSALK_042433\tMultiple straight hairs\t16367956\tMRH6\t189\n\nAT2G03720\tSALK_042433\tMultiple straight hairs\t16367956\tMRH6\t189\n\nAT2G03720\tSALK_042433\tMultiple straight hairs\t16367956\tMRH6\t189\n\nAT2G03720\tSALK_042433\tMultiple straight hairs\t16367956\tMRH6\t189\n\nAT2G03800\tgek1-1\tEthanol hypersensitivity.\t15215505\tGEK1\t196\n\nAT2G03800\tgek1-1\tEthanol hypersensitivity.\t15215505\tGEK1\t196\n\nAT2G03800\tgek1-1\tEthanol hypersensitivity.\t15215505\tGEK1\t196\n\nAT2G03800\tgek1-1\tEthanol hypersensitivity.\t15215505\tGEK1\t196\n\nAT2G04240\txerico\tResistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype.\t17933900\tXERICO\t256\n\nAT2G04240\txerico\tResistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype.\t17933900\tXERICO\t256\n\nAT2G04240\txerico\tResistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype.\t17933900\tXERICO\t256\n\nAT2G04240\txerico\tResistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype.\t17933900\tXERICO\t256\n\nAT2G05210\tpot1-1\tNo visible phenotype.\t17627276\tPOT1A\t221\n\nAT2G05210\tpot1-1\tNo visible phenotype.\t17627276\tPOT1A\t221\n\nAT2G05210\tpot1-1\tNo visible phenotype.\t17627276\tPOT1A\t221\n\nAT2G05210\tpot1-1\tNo visible phenotype.\t17627276\tPOT1A\t221\n\nAT3G02130\trpk2-2\tThe homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants.\t17419837\tRPK2\t284\n\nAT3G02130\trpk2-2\tThe homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants.\t17419837\tRPK2\t284\n\nAT3G02130\trpk2-2\tThe homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants.\t17419837\tRPK2\t284\n\nAT3G02130\trpk2-2\tThe homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants.\t17419837\tRPK2\t284\n\nAT3G02140\tafp4-1\tDecreased germination on high concentrations of glucose and sorbitol.\t18484180\tTMAC2\t300\n\nAT3G02140\tafp4-1\tDecreased germination on high concentrations of glucose and sorbitol.\t18484180\tTMAC2\t300\n\nAT3G02140\tafp4-1\tDecreased germination on high concentrations of glucose and sorbitol.\t18484180\tTMAC2\t300\n\nAT3G02140\tafp4-1\tDecreased germination on high concentrations of glucose and sorbitol.\t18484180\tTMAC2\t300\n\nAT3G02230\trgp1-1\trgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls.\t21478444\tRGP1\t301\n\nAT3G02230\trgp1-1\trgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls.\t21478444\tRGP1\t301\n\nAT3G02230\trgp1-1\trgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls.\t21478444\tRGP1\t301\n\nAT3G02230\trgp1-1\trgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls.\t21478444\tRGP1\t301\n\nAT3G02260\ttir3-1 RGLG1:rglg1 rglg2\tThe triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence.\t17586653\tBIG\t279\n\nAT3G02260\ttir3-1 RGLG1:rglg1 rglg2\tThe triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence.\t17586653\tBIG\t279\n\nAT3G02260\ttir3-1 RGLG1:rglg1 rglg2\tThe triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence.\t17586653\tBIG\t279\n\nAT3G02260\ttir3-1 RGLG1:rglg1 rglg2\tThe triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence.\t17586653\tBIG\t279\n\nAT3G02310\tsep2-1\tNon-described subtle phenotype.\t10821278\tSEP2\t175\n\nAT3G02310\tsep2-1\tNon-described subtle phenotype.\t10821278\tSEP2\t175\n\nAT3G02310\tsep2-1\tNon-described subtle phenotype.\t10821278\tSEP2\t175\n\nAT3G02310\tsep2-1\tNon-described subtle phenotype.\t10821278\tSEP2\t175\n\nAT3G02680\tatnbs1-1\tSignificantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions.\t17672843\tNBS1\t190\n\nAT3G02680\tatnbs1-1\tSignificantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions.\t17672843\tNBS1\t190\n\nAT3G02680\tatnbs1-1\tSignificantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions.\t17672843\tNBS1\t190\n\nAT3G02680\tatnbs1-1\tSignificantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions.\t17672843\tNBS1\t190\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02870\tvtc4-1\tascorbate deficient\t16595667\tVTC4\t311\n\nAT3G02870\tvtc4-1\tascorbate deficient\t16595667\tVTC4\t311\n\nAT3G02870\tvtc4-1\tascorbate deficient\t16595667\tVTC4\t311\n\nAT3G02870\tvtc4-1\tascorbate deficient\t16595667\tVTC4\t311\n\nAT3G03260\thdg8-1\tNo visible phenotype.\t16778018\tHDG8\t194\n\nAT3G03260\thdg8-1\tNo visible phenotype.\t16778018\tHDG8\t194\n\nAT3G03260\thdg8-1\tNo visible phenotype.\t16778018\tHDG8\t194\n\nAT3G03260\thdg8-1\tNo visible phenotype.\t16778018\tHDG8\t194\n\nAT4G14790\tpdd17\tDefective pollen development.\t19237690\tSUV3\t312\n\nAT4G14790\tpdd17\tDefective pollen development.\t19237690\tSUV3\t312\n\nAT4G14790\tpdd17\tDefective pollen development.\t19237690\tSUV3\t312\n\nAT4G14790\tpdd17\tDefective pollen development.\t19237690\tSUV3\t312\n\nAT4G15210\tbmy1-2\tPlants cold-shocked for 6h have an increased starch content compared to wildtype.\t16297066\tBAM5\t313\n\nAT4G15210\tbmy1-2\tPlants cold-shocked for 6h have an increased starch content compared to wildtype.\t16297066\tBAM5\t313\n\nAT4G15210\tbmy1-2\tPlants cold-shocked for 6h have an increased starch content compared to wildtype.\t16297066\tBAM5\t313\n\nAT4G15210\tbmy1-2\tPlants cold-shocked for 6h have an increased starch content compared to wildtype.\t16297066\tBAM5\t313\n\nAT4G15560\tcla1-1\tMutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves.\t10982425\tDXS\t219\n\nAT4G15560\tcla1-1\tMutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves.\t10982425\tDXS\t219\n\nAT4G15560\tcla1-1\tMutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves.\t10982425\tDXS\t219\n\nAT4G15560\tcla1-1\tMutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves.\t10982425\tDXS\t219\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15802\tAthspb-2\tEarly flowering, reduced fertility, aborted seeds.\t20388662\tHSBP\t254\n\nAT4G15802\tAthspb-2\tEarly flowering, reduced fertility, aborted seeds.\t20388662\tHSBP\t254\n\nAT4G15802\tAthspb-2\tEarly flowering, reduced fertility, aborted seeds.\t20388662\tHSBP\t254\n\nAT4G15802\tAthspb-2\tEarly flowering, reduced fertility, aborted seeds.\t20388662\tHSBP\t254\n\nAT4G15880\tesd4-2\tDecreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1.\t17513499\tESD4\t265\n\nAT4G15880\tesd4-2\tDecreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1.\t17513499\tESD4\t265\n\nAT4G15880\tesd4-2\tDecreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1.\t17513499\tESD4\t265\n\nAT4G15880\tesd4-2\tDecreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1.\t17513499\tESD4\t265\n\nAT4G16420\tprz1-1\tAltered response to auxin and cytokinin\t12747832\tADA2B\t279\n\nAT4G16420\tprz1-1\tAltered response to auxin and cytokinin\t12747832\tADA2B\t279\n\nAT4G16420\tprz1-1\tAltered response to auxin and cytokinin\t12747832\tADA2B\t279\n\nAT4G16420\tprz1-1\tAltered response to auxin and cytokinin\t12747832\tADA2B\t279\n\nAT4G16480\tatint4-2\tNo visible phenotype.\t16603666\tINT4\t284\n\nAT4G16480\tatint4-2\tNo visible phenotype.\t16603666\tINT4\t284\n\nAT4G16480\tatint4-2\tNo visible phenotype.\t16603666\tINT4\t284\n\nAT4G16480\tatint4-2\tNo visible phenotype.\t16603666\tINT4\t284\n\nAT5G10480\tpas2-3\tSegregates 25% embryo lethal.\t18799749\tPAS2\t301\n\nAT5G10480\tpas2-3\tSegregates 25% embryo lethal.\t18799749\tPAS2\t301\n\nAT5G10480\tpas2-3\tSegregates 25% embryo lethal.\t18799749\tPAS2\t301\n\nAT5G10480\tpas2-3\tSegregates 25% embryo lethal.\t18799749\tPAS2\t301\n\nAT5G10510\tplt3-1\tShort roots and shortened root meristem.\t17960244\tAIL6\t310\n\nAT5G10510\tplt3-1\tShort roots and shortened root meristem.\t17960244\tAIL6\t310\n\nAT5G10510\tplt3-1\tShort roots and shortened root meristem.\t17960244\tAIL6\t310\n\nAT5G10510\tplt3-1\tShort roots and shortened root meristem.\t17960244\tAIL6\t310\n\nAT5G11110\tkns2\tDefects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile.\t18779216\tSPS2\t232\n\nAT5G11110\tkns2\tDefects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile.\t18779216\tSPS2\t232\n\nAT5G11110\tkns2\tDefects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile.\t18779216\tSPS2\t232\n\nAT5G11110\tkns2\tDefects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile.\t18779216\tSPS2\t232\n\nAT5G11260\thy5-101\tUnder FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller.\t16891401\tHY5\t221\n\nAT5G11260\thy5-101\tUnder FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller.\t16891401\tHY5\t221\n\nAT5G11260\thy5-101\tUnder FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller.\t16891401\tHY5\t221\n\nAT5G11260\thy5-101\tUnder FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller.\t16891401\tHY5\t221\n\nAT5G11510\tmyb3r4-1\tNo visible phenotype.\t17287251\tMYB3R-4\t336\n\nAT5G11510\tmyb3r4-1\tNo visible phenotype.\t17287251\tMYB3R-4\t336\n\nAT5G11510\tmyb3r4-1\tNo visible phenotype.\t17287251\tMYB3R-4\t336\n\nAT5G11510\tmyb3r4-1\tNo visible phenotype.\t17287251\tMYB3R-4\t336\n\nAT5G12200\tpyd2-2\tThe pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source.\t19413687\tPYD2\t310\n\nAT5G12200\tpyd2-2\tThe pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source.\t19413687\tPYD2\t310\n\nAT5G12200\tpyd2-2\tThe pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source.\t19413687\tPYD2\t310\n\nAT5G12200\tpyd2-2\tThe pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source.\t19413687\tPYD2\t310\n\nAT5G13290\tcrn-1\tIncreased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3.\t12345678\tCRN\t189\n\nAT5G13290\tcrn-1\tIncreased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3.\t12345678\tCRN\t189\n\nAT5G13290\tcrn-1\tIncreased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3.\t12345678\tCRN\t189\n\nAT5G13290\tcrn-1\tIncreased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3.\t12345678\tCRN\t189\n\nAT1G01040\tCS3828\tIncreased abundance of miRNA precursors.\t17369351\tDCL1\t332\n\nAT1G01040\tCS3828\tIncreased abundance of miRNA precursors.\t17369351\tDCL1\t332\n\nAT1G01040\tCS3828\tIncreased abundance of miRNA precursors.\t17369351\tDCL1\t332\n\nAT1G01040\tCS3828\tIncreased abundance of miRNA precursors.\t17369351\tDCL1\t332\n\nAT1G01060\tlhy-101\tThe mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant.\t16891401\tLHY\t290\n\nAT1G01060\tlhy-101\tThe mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant.\t16891401\tLHY\t290\n\nAT1G01060\tlhy-101\tThe mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant.\t16891401\tLHY\t290\n\nAT1G01060\tlhy-101\tThe mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant.\t16891401\tLHY\t290\n\nAT1G01140\tSALK_058629\thypersensitive to low potassium media\t17486125\tCIPK9\t223\n\nAT1G01140\tSALK_058629\thypersensitive to low potassium media\t17486125\tCIPK9\t223\n\nAT1G01140\tSALK_058629\thypersensitive to low potassium media\t17486125\tCIPK9\t223\n\nAT1G01140\tSALK_058629\thypersensitive to low potassium media\t17486125\tCIPK9\t223\n\nAT1G01220\tSALK_012400C\tfkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected.\t18199744\tFKGP\t190\n\nAT1G01220\tSALK_012400C\tfkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected.\t18199744\tFKGP\t190\n\nAT1G01220\tSALK_012400C\tfkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected.\t18199744\tFKGP\t190\n\nAT1G01220\tSALK_012400C\tfkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected.\t18199744\tFKGP\t190\n\nAT2G03720\tSALK_042433\tMultiple straight hairs\t16367956\tMRH6\t189\n\nAT2G03720\tSALK_042433\tMultiple straight hairs\t16367956\tMRH6\t189\n\nAT2G03720\tSALK_042433\tMultiple straight hairs\t16367956\tMRH6\t189\n\nAT2G03720\tSALK_042433\tMultiple straight hairs\t16367956\tMRH6\t189\n\nAT2G03800\tgek1-1\tEthanol hypersensitivity.\t15215505\tGEK1\t196\n\nAT2G03800\tgek1-1\tEthanol hypersensitivity.\t15215505\tGEK1\t196\n\nAT2G03800\tgek1-1\tEthanol hypersensitivity.\t15215505\tGEK1\t196\n\nAT2G03800\tgek1-1\tEthanol hypersensitivity.\t15215505\tGEK1\t196\n\nAT2G04240\txerico\tResistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype.\t17933900\tXERICO\t256\n\nAT2G04240\txerico\tResistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype.\t17933900\tXERICO\t256\n\nAT2G04240\txerico\tResistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype.\t17933900\tXERICO\t256\n\nAT2G04240\txerico\tResistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype.\t17933900\tXERICO\t256\n\nAT2G05210\tpot1-1\tNo visible phenotype.\t17627276\tPOT1A\t221\n\nAT2G05210\tpot1-1\tNo visible phenotype.\t17627276\tPOT1A\t221\n\nAT2G05210\tpot1-1\tNo visible phenotype.\t17627276\tPOT1A\t221\n\nAT2G05210\tpot1-1\tNo visible phenotype.\t17627276\tPOT1A\t221\n\nAT3G02130\trpk2-2\tThe homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants.\t17419837\tRPK2\t284\n\nAT3G02130\trpk2-2\tThe homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants.\t17419837\tRPK2\t284\n\nAT3G02130\trpk2-2\tThe homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants.\t17419837\tRPK2\t284\n\nAT3G02130\trpk2-2\tThe homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants.\t17419837\tRPK2\t284\n\nAT3G02140\tafp4-1\tDecreased germination on high concentrations of glucose and sorbitol.\t18484180\tTMAC2\t300\n\nAT3G02140\tafp4-1\tDecreased germination on high concentrations of glucose and sorbitol.\t18484180\tTMAC2\t300\n\nAT3G02140\tafp4-1\tDecreased germination on high concentrations of glucose and sorbitol.\t18484180\tTMAC2\t300\n\nAT3G02140\tafp4-1\tDecreased germination on high concentrations of glucose and sorbitol.\t18484180\tTMAC2\t300\n\nAT3G02230\trgp1-1\trgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls.\t21478444\tRGP1\t301\n\nAT3G02230\trgp1-1\trgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls.\t21478444\tRGP1\t301\n\nAT3G02230\trgp1-1\trgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls.\t21478444\tRGP1\t301\n\nAT3G02230\trgp1-1\trgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls.\t21478444\tRGP1\t301\n\nAT3G02260\ttir3-1 RGLG1:rglg1 rglg2\tThe triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence.\t17586653\tBIG\t279\n\nAT3G02260\ttir3-1 RGLG1:rglg1 rglg2\tThe triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence.\t17586653\tBIG\t279\n\nAT3G02260\ttir3-1 RGLG1:rglg1 rglg2\tThe triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence.\t17586653\tBIG\t279\n\nAT3G02260\ttir3-1 RGLG1:rglg1 rglg2\tThe triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence.\t17586653\tBIG\t279\n\nAT3G02310\tsep2-1\tNon-described subtle phenotype.\t10821278\tSEP2\t175\n\nAT3G02310\tsep2-1\tNon-described subtle phenotype.\t10821278\tSEP2\t175\n\nAT3G02310\tsep2-1\tNon-described subtle phenotype.\t10821278\tSEP2\t175\n\nAT3G02310\tsep2-1\tNon-described subtle phenotype.\t10821278\tSEP2\t175\n\nAT3G02680\tatnbs1-1\tSignificantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions.\t17672843\tNBS1\t190\n\nAT3G02680\tatnbs1-1\tSignificantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions.\t17672843\tNBS1\t190\n\nAT3G02680\tatnbs1-1\tSignificantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions.\t17672843\tNBS1\t190\n\nAT3G02680\tatnbs1-1\tSignificantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions.\t17672843\tNBS1\t190\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02870\tvtc4-1\tascorbate deficient\t16595667\tVTC4\t311\n\nAT3G02870\tvtc4-1\tascorbate deficient\t16595667\tVTC4\t311\n\nAT3G02870\tvtc4-1\tascorbate deficient\t16595667\tVTC4\t311\n\nAT3G02870\tvtc4-1\tascorbate deficient\t16595667\tVTC4\t311\n\nAT3G03260\thdg8-1\tNo visible phenotype.\t16778018\tHDG8\t194\n\nAT3G03260\thdg8-1\tNo visible phenotype.\t16778018\tHDG8\t194\n\nAT3G03260\thdg8-1\tNo visible phenotype.\t16778018\tHDG8\t194\n\nAT3G03260\thdg8-1\tNo visible phenotype.\t16778018\tHDG8\t194\n\nAT4G14790\tpdd17\tDefective pollen development.\t19237690\tSUV3\t312\n\nAT4G14790\tpdd17\tDefective pollen development.\t19237690\tSUV3\t312\n\nAT4G14790\tpdd17\tDefective pollen development.\t19237690\tSUV3\t312\n\nAT4G14790\tpdd17\tDefective pollen development.\t19237690\tSUV3\t312\n\nAT4G15210\tbmy1-2\tPlants cold-shocked for 6h have an increased starch content compared to wildtype.\t16297066\tBAM5\t313\n\nAT4G15210\tbmy1-2\tPlants cold-shocked for 6h have an increased starch content compared to wildtype.\t16297066\tBAM5\t313\n\nAT4G15210\tbmy1-2\tPlants cold-shocked for 6h have an increased starch content compared to wildtype.\t16297066\tBAM5\t313\n\nAT4G15210\tbmy1-2\tPlants cold-shocked for 6h have an increased starch content compared to wildtype.\t16297066\tBAM5\t313\n\nAT4G15560\tcla1-1\tMutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves.\t10982425\tDXS\t219\n\nAT4G15560\tcla1-1\tMutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves.\t10982425\tDXS\t219\n\nAT4G15560\tcla1-1\tMutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves.\t10982425\tDXS\t219\n\nAT4G15560\tcla1-1\tMutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves.\t10982425\tDXS\t219\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15802\tAthspb-2\tEarly flowering, reduced fertility, aborted seeds.\t20388662\tHSBP\t254\n\nAT4G15802\tAthspb-2\tEarly flowering, reduced fertility, aborted seeds.\t20388662\tHSBP\t254\n\nAT4G15802\tAthspb-2\tEarly flowering, reduced fertility, aborted seeds.\t20388662\tHSBP\t254\n\nAT4G15802\tAthspb-2\tEarly flowering, reduced fertility, aborted seeds.\t20388662\tHSBP\t254\n\nAT4G15880\tesd4-2\tDecreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1.\t17513499\tESD4\t265\n\nAT4G15880\tesd4-2\tDecreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1.\t17513499\tESD4\t265\n\nAT4G15880\tesd4-2\tDecreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1.\t17513499\tESD4\t265\n\nAT4G15880\tesd4-2\tDecreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1.\t17513499\tESD4\t265\n\nAT4G16420\tprz1-1\tAltered response to auxin and cytokinin\t12747832\tADA2B\t279\n\nAT4G16420\tprz1-1\tAltered response to auxin and cytokinin\t12747832\tADA2B\t279\n\nAT4G16420\tprz1-1\tAltered response to auxin and cytokinin\t12747832\tADA2B\t279\n\nAT4G16420\tprz1-1\tAltered response to auxin and cytokinin\t12747832\tADA2B\t279\n\nAT4G16480\tatint4-2\tNo visible phenotype.\t16603666\tINT4\t284\n\nAT4G16480\tatint4-2\tNo visible phenotype.\t16603666\tINT4\t284\n\nAT4G16480\tatint4-2\tNo visible phenotype.\t16603666\tINT4\t284\n\nAT4G16480\tatint4-2\tNo visible phenotype.\t16603666\tINT4\t284\n\nAT5G10480\tpas2-3\tSegregates 25% embryo lethal.\t18799749\tPAS2\t301\n\nAT5G10480\tpas2-3\tSegregates 25% embryo lethal.\t18799749\tPAS2\t301\n\nAT5G10480\tpas2-3\tSegregates 25% embryo lethal.\t18799749\tPAS2\t301\n\nAT5G10480\tpas2-3\tSegregates 25% embryo lethal.\t18799749\tPAS2\t301\n\nAT5G10510\tplt3-1\tShort roots and shortened root meristem.\t17960244\tAIL6\t310\n\nAT5G10510\tplt3-1\tShort roots and shortened root meristem.\t17960244\tAIL6\t310\n\nAT5G10510\tplt3-1\tShort roots and shortened root meristem.\t17960244\tAIL6\t310\n\nAT5G10510\tplt3-1\tShort roots and shortened root meristem.\t17960244\tAIL6\t310\n\nAT5G11110\tkns2\tDefects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile.\t18779216\tSPS2\t232\n\nAT5G11110\tkns2\tDefects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile.\t18779216\tSPS2\t232\n\nAT5G11110\tkns2\tDefects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile.\t18779216\tSPS2\t232\n\nAT5G11110\tkns2\tDefects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile.\t18779216\tSPS2\t232\n\nAT5G11260\thy5-101\tUnder FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller.\t16891401\tHY5\t221\n\nAT5G11260\thy5-101\tUnder FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller.\t16891401\tHY5\t221\n\nAT5G11260\thy5-101\tUnder FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller.\t16891401\tHY5\t221\n\nAT5G11260\thy5-101\tUnder FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller.\t16891401\tHY5\t221\n\nAT5G11510\tmyb3r4-1\tNo visible phenotype.\t17287251\tMYB3R-4\t336\n\nAT5G11510\tmyb3r4-1\tNo visible phenotype.\t17287251\tMYB3R-4\t336\n\nAT5G11510\tmyb3r4-1\tNo visible phenotype.\t17287251\tMYB3R-4\t336\n\nAT5G11510\tmyb3r4-1\tNo visible phenotype.\t17287251\tMYB3R-4\t336\n\nAT5G12200\tpyd2-2\tThe pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source.\t19413687\tPYD2\t310\n\nAT5G12200\tpyd2-2\tThe pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source.\t19413687\tPYD2\t310\n\nAT5G12200\tpyd2-2\tThe pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source.\t19413687\tPYD2\t310\n\nAT5G12200\tpyd2-2\tThe pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source.\t19413687\tPYD2\t310\n\nAT5G13290\tcrn-1\tIncreased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3.\t12345678\tCRN\t189\n\nAT5G13290\tcrn-1\tIncreased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3.\t12345678\tCRN\t189\n\nAT5G13290\tcrn-1\tIncreased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3.\t12345678\tCRN\t189\n\nAT5G13290\tcrn-1\tIncreased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3.\t12345678\tCRN\t189\n\nAT1G01040\tCS3828\tIncreased abundance of miRNA precursors.\t17369351\tDCL1\t332\n\nAT1G01040\tCS3828\tIncreased abundance of miRNA precursors.\t17369351\tDCL1\t332\n\nAT1G01040\tCS3828\tIncreased abundance of miRNA precursors.\t17369351\tDCL1\t332\n\nAT1G01040\tCS3828\tIncreased abundance of miRNA precursors.\t17369351\tDCL1\t332\n\nAT1G01060\tlhy-101\tThe mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant.\t16891401\tLHY\t290\n\nAT1G01060\tlhy-101\tThe mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant.\t16891401\tLHY\t290\n\nAT1G01060\tlhy-101\tThe mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant.\t16891401\tLHY\t290\n\nAT1G01060\tlhy-101\tThe mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant.\t16891401\tLHY\t290\n\nAT1G01140\tSALK_058629\thypersensitive to low potassium media\t17486125\tCIPK9\t223\n\nAT1G01140\tSALK_058629\thypersensitive to low potassium media\t17486125\tCIPK9\t223\n\nAT1G01140\tSALK_058629\thypersensitive to low potassium media\t17486125\tCIPK9\t223\n\nAT1G01140\tSALK_058629\thypersensitive to low potassium media\t17486125\tCIPK9\t223\n\nAT1G01220\tSALK_012400C\tfkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected.\t18199744\tFKGP\t190\n\nAT1G01220\tSALK_012400C\tfkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected.\t18199744\tFKGP\t190\n\nAT1G01220\tSALK_012400C\tfkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected.\t18199744\tFKGP\t190\n\nAT1G01220\tSALK_012400C\tfkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected.\t18199744\tFKGP\t190\n\nAT2G03720\tSALK_042433\tMultiple straight hairs\t16367956\tMRH6\t189\n\nAT2G03720\tSALK_042433\tMultiple straight hairs\t16367956\tMRH6\t189\n\nAT2G03720\tSALK_042433\tMultiple straight hairs\t16367956\tMRH6\t189\n\nAT2G03720\tSALK_042433\tMultiple straight hairs\t16367956\tMRH6\t189\n\nAT2G03800\tgek1-1\tEthanol hypersensitivity.\t15215505\tGEK1\t196\n\nAT2G03800\tgek1-1\tEthanol hypersensitivity.\t15215505\tGEK1\t196\n\nAT2G03800\tgek1-1\tEthanol hypersensitivity.\t15215505\tGEK1\t196\n\nAT2G03800\tgek1-1\tEthanol hypersensitivity.\t15215505\tGEK1\t196\n\nAT2G04240\txerico\tResistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype.\t17933900\tXERICO\t256\n\nAT2G04240\txerico\tResistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype.\t17933900\tXERICO\t256\n\nAT2G04240\txerico\tResistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype.\t17933900\tXERICO\t256\n\nAT2G04240\txerico\tResistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype.\t17933900\tXERICO\t256\n\nAT2G05210\tpot1-1\tNo visible phenotype.\t17627276\tPOT1A\t221\n\nAT2G05210\tpot1-1\tNo visible phenotype.\t17627276\tPOT1A\t221\n\nAT2G05210\tpot1-1\tNo visible phenotype.\t17627276\tPOT1A\t221\n\nAT2G05210\tpot1-1\tNo visible phenotype.\t17627276\tPOT1A\t221\n\nAT3G02130\trpk2-2\tThe homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants.\t17419837\tRPK2\t284\n\nAT3G02130\trpk2-2\tThe homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants.\t17419837\tRPK2\t284\n\nAT3G02130\trpk2-2\tThe homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants.\t17419837\tRPK2\t284\n\nAT3G02130\trpk2-2\tThe homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants.\t17419837\tRPK2\t284\n\nAT3G02140\tafp4-1\tDecreased germination on high concentrations of glucose and sorbitol.\t18484180\tTMAC2\t300\n\nAT3G02140\tafp4-1\tDecreased germination on high concentrations of glucose and sorbitol.\t18484180\tTMAC2\t300\n\nAT3G02140\tafp4-1\tDecreased germination on high concentrations of glucose and sorbitol.\t18484180\tTMAC2\t300\n\nAT3G02140\tafp4-1\tDecreased germination on high concentrations of glucose and sorbitol.\t18484180\tTMAC2\t300\n\nAT3G02230\trgp1-1\trgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls.\t21478444\tRGP1\t301\n\nAT3G02230\trgp1-1\trgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls.\t21478444\tRGP1\t301\n\nAT3G02230\trgp1-1\trgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls.\t21478444\tRGP1\t301\n\nAT3G02230\trgp1-1\trgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls.\t21478444\tRGP1\t301\n\nAT3G02260\ttir3-1 RGLG1:rglg1 rglg2\tThe triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence.\t17586653\tBIG\t279\n\nAT3G02260\ttir3-1 RGLG1:rglg1 rglg2\tThe triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence.\t17586653\tBIG\t279\n\nAT3G02260\ttir3-1 RGLG1:rglg1 rglg2\tThe triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence.\t17586653\tBIG\t279\n\nAT3G02260\ttir3-1 RGLG1:rglg1 rglg2\tThe triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence.\t17586653\tBIG\t279\n\nAT3G02310\tsep2-1\tNon-described subtle phenotype.\t10821278\tSEP2\t175\n\nAT3G02310\tsep2-1\tNon-described subtle phenotype.\t10821278\tSEP2\t175\n\nAT3G02310\tsep2-1\tNon-described subtle phenotype.\t10821278\tSEP2\t175\n\nAT3G02310\tsep2-1\tNon-described subtle phenotype.\t10821278\tSEP2\t175\n\nAT3G02680\tatnbs1-1\tSignificantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions.\t17672843\tNBS1\t190\n\nAT3G02680\tatnbs1-1\tSignificantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions.\t17672843\tNBS1\t190\n\nAT3G02680\tatnbs1-1\tSignificantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions.\t17672843\tNBS1\t190\n\nAT3G02680\tatnbs1-1\tSignificantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions.\t17672843\tNBS1\t190\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02870\tvtc4-1\tascorbate deficient\t16595667\tVTC4\t311\n\nAT3G02870\tvtc4-1\tascorbate deficient\t16595667\tVTC4\t311\n\nAT3G02870\tvtc4-1\tascorbate deficient\t16595667\tVTC4\t311\n\nAT3G02870\tvtc4-1\tascorbate deficient\t16595667\tVTC4\t311\n\nAT3G03260\thdg8-1\tNo visible phenotype.\t16778018\tHDG8\t194\n\nAT3G03260\thdg8-1\tNo visible phenotype.\t16778018\tHDG8\t194\n\nAT3G03260\thdg8-1\tNo visible phenotype.\t16778018\tHDG8\t194\n\nAT3G03260\thdg8-1\tNo visible phenotype.\t16778018\tHDG8\t194\n\nAT4G14790\tpdd17\tDefective pollen development.\t19237690\tSUV3\t312\n\nAT4G14790\tpdd17\tDefective pollen development.\t19237690\tSUV3\t312\n\nAT4G14790\tpdd17\tDefective pollen development.\t19237690\tSUV3\t312\n\nAT4G14790\tpdd17\tDefective pollen development.\t19237690\tSUV3\t312\n\nAT4G15210\tbmy1-2\tPlants cold-shocked for 6h have an increased starch content compared to wildtype.\t16297066\tBAM5\t313\n\nAT4G15210\tbmy1-2\tPlants cold-shocked for 6h have an increased starch content compared to wildtype.\t16297066\tBAM5\t313\n\nAT4G15210\tbmy1-2\tPlants cold-shocked for 6h have an increased starch content compared to wildtype.\t16297066\tBAM5\t313\n\nAT4G15210\tbmy1-2\tPlants cold-shocked for 6h have an increased starch content compared to wildtype.\t16297066\tBAM5\t313\n\nAT4G15560\tcla1-1\tMutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves.\t10982425\tDXS\t219\n\nAT4G15560\tcla1-1\tMutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves.\t10982425\tDXS\t219\n\nAT4G15560\tcla1-1\tMutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves.\t10982425\tDXS\t219\n\nAT4G15560\tcla1-1\tMutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves.\t10982425\tDXS\t219\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15802\tAthspb-2\tEarly flowering, reduced fertility, aborted seeds.\t20388662\tHSBP\t254\n\nAT4G15802\tAthspb-2\tEarly flowering, reduced fertility, aborted seeds.\t20388662\tHSBP\t254\n\nAT4G15802\tAthspb-2\tEarly flowering, reduced fertility, aborted seeds.\t20388662\tHSBP\t254\n\nAT4G15802\tAthspb-2\tEarly flowering, reduced fertility, aborted seeds.\t20388662\tHSBP\t254\n\nAT4G15880\tesd4-2\tDecreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1.\t17513499\tESD4\t265\n\nAT4G15880\tesd4-2\tDecreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1.\t17513499\tESD4\t265\n\nAT4G15880\tesd4-2\tDecreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1.\t17513499\tESD4\t265\n\nAT4G15880\tesd4-2\tDecreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1.\t17513499\tESD4\t265\n\nAT4G16420\tprz1-1\tAltered response to auxin and cytokinin\t12747832\tADA2B\t279\n\nAT4G16420\tprz1-1\tAltered response to auxin and cytokinin\t12747832\tADA2B\t279\n\nAT4G16420\tprz1-1\tAltered response to auxin and cytokinin\t12747832\tADA2B\t279\n\nAT4G16420\tprz1-1\tAltered response to auxin and cytokinin\t12747832\tADA2B\t279\n\nAT4G16480\tatint4-2\tNo visible phenotype.\t16603666\tINT4\t284\n\nAT4G16480\tatint4-2\tNo visible phenotype.\t16603666\tINT4\t284\n\nAT4G16480\tatint4-2\tNo visible phenotype.\t16603666\tINT4\t284\n\nAT4G16480\tatint4-2\tNo visible phenotype.\t16603666\tINT4\t284\n\nAT5G10480\tpas2-3\tSegregates 25% embryo lethal.\t18799749\tPAS2\t301\n\nAT5G10480\tpas2-3\tSegregates 25% embryo lethal.\t18799749\tPAS2\t301\n\nAT5G10480\tpas2-3\tSegregates 25% embryo lethal.\t18799749\tPAS2\t301\n\nAT5G10480\tpas2-3\tSegregates 25% embryo lethal.\t18799749\tPAS2\t301\n\nAT5G10510\tplt3-1\tShort roots and shortened root meristem.\t17960244\tAIL6\t310\n\nAT5G10510\tplt3-1\tShort roots and shortened root meristem.\t17960244\tAIL6\t310\n\nAT5G10510\tplt3-1\tShort roots and shortened root meristem.\t17960244\tAIL6\t310\n\nAT5G10510\tplt3-1\tShort roots and shortened root meristem.\t17960244\tAIL6\t310\n\nAT5G11110\tkns2\tDefects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile.\t18779216\tSPS2\t232\n\nAT5G11110\tkns2\tDefects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile.\t18779216\tSPS2\t232\n\nAT5G11110\tkns2\tDefects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile.\t18779216\tSPS2\t232\n\nAT5G11110\tkns2\tDefects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile.\t18779216\tSPS2\t232\n\nAT5G11260\thy5-101\tUnder FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller.\t16891401\tHY5\t221\n\nAT5G11260\thy5-101\tUnder FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller.\t16891401\tHY5\t221\n\nAT5G11260\thy5-101\tUnder FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller.\t16891401\tHY5\t221\n\nAT5G11260\thy5-101\tUnder FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller.\t16891401\tHY5\t221\n\nAT5G11510\tmyb3r4-1\tNo visible phenotype.\t17287251\tMYB3R-4\t336\n\nAT5G11510\tmyb3r4-1\tNo visible phenotype.\t17287251\tMYB3R-4\t336\n\nAT5G11510\tmyb3r4-1\tNo visible phenotype.\t17287251\tMYB3R-4\t336\n\nAT5G11510\tmyb3r4-1\tNo visible phenotype.\t17287251\tMYB3R-4\t336\n\nAT5G12200\tpyd2-2\tThe pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source.\t19413687\tPYD2\t310\n\nAT5G12200\tpyd2-2\tThe pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source.\t19413687\tPYD2\t310\n\nAT5G12200\tpyd2-2\tThe pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source.\t19413687\tPYD2\t310\n\nAT5G12200\tpyd2-2\tThe pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source.\t19413687\tPYD2\t310\n\nAT5G13290\tcrn-1\tIncreased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3.\t12345678\tCRN\t189\n\nAT5G13290\tcrn-1\tIncreased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3.\t12345678\tCRN\t189\n\nAT5G13290\tcrn-1\tIncreased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3.\t12345678\tCRN\t189\n\nAT5G13290\tcrn-1\tIncreased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3.\t12345678\tCRN\t189\n\nAT1G01040\tCS3828\tIncreased abundance of miRNA precursors.\t17369351\tDCL1\t332\n\nAT1G01040\tCS3828\tIncreased abundance of miRNA precursors.\t17369351\tDCL1\t332\n\nAT1G01040\tCS3828\tIncreased abundance of miRNA precursors.\t17369351\tDCL1\t332\n\nAT1G01040\tCS3828\tIncreased abundance of miRNA precursors.\t17369351\tDCL1\t332\n\nAT1G01060\tlhy-101\tThe mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant.\t16891401\tLHY\t290\n\nAT1G01060\tlhy-101\tThe mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant.\t16891401\tLHY\t290\n\nAT1G01060\tlhy-101\tThe mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant.\t16891401\tLHY\t290\n\nAT1G01060\tlhy-101\tThe mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant.\t16891401\tLHY\t290\n\nAT1G01140\tSALK_058629\thypersensitive to low potassium media\t17486125\tCIPK9\t223\n\nAT1G01140\tSALK_058629\thypersensitive to low potassium media\t17486125\tCIPK9\t223\n\nAT1G01140\tSALK_058629\thypersensitive to low potassium media\t17486125\tCIPK9\t223\n\nAT1G01140\tSALK_058629\thypersensitive to low potassium media\t17486125\tCIPK9\t223\n\nAT1G01220\tSALK_012400C\tfkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected.\t18199744\tFKGP\t190\n\nAT1G01220\tSALK_012400C\tfkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected.\t18199744\tFKGP\t190\n\nAT1G01220\tSALK_012400C\tfkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected.\t18199744\tFKGP\t190\n\nAT1G01220\tSALK_012400C\tfkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected.\t18199744\tFKGP\t190\n\nAT2G03720\tSALK_042433\tMultiple straight hairs\t16367956\tMRH6\t189\n\nAT2G03720\tSALK_042433\tMultiple straight hairs\t16367956\tMRH6\t189\n\nAT2G03720\tSALK_042433\tMultiple straight hairs\t16367956\tMRH6\t189\n\nAT2G03720\tSALK_042433\tMultiple straight hairs\t16367956\tMRH6\t189\n\nAT2G03800\tgek1-1\tEthanol hypersensitivity.\t15215505\tGEK1\t196\n\nAT2G03800\tgek1-1\tEthanol hypersensitivity.\t15215505\tGEK1\t196\n\nAT2G03800\tgek1-1\tEthanol hypersensitivity.\t15215505\tGEK1\t196\n\nAT2G03800\tgek1-1\tEthanol hypersensitivity.\t15215505\tGEK1\t196\n\nAT2G04240\txerico\tResistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype.\t17933900\tXERICO\t256\n\nAT2G04240\txerico\tResistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype.\t17933900\tXERICO\t256\n\nAT2G04240\txerico\tResistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype.\t17933900\tXERICO\t256\n\nAT2G04240\txerico\tResistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype.\t17933900\tXERICO\t256\n\nAT2G05210\tpot1-1\tNo visible phenotype.\t17627276\tPOT1A\t221\n\nAT2G05210\tpot1-1\tNo visible phenotype.\t17627276\tPOT1A\t221\n\nAT2G05210\tpot1-1\tNo visible phenotype.\t17627276\tPOT1A\t221\n\nAT2G05210\tpot1-1\tNo visible phenotype.\t17627276\tPOT1A\t221\n\nAT3G02130\trpk2-2\tThe homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants.\t17419837\tRPK2\t284\n\nAT3G02130\trpk2-2\tThe homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants.\t17419837\tRPK2\t284\n\nAT3G02130\trpk2-2\tThe homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants.\t17419837\tRPK2\t284\n\nAT3G02130\trpk2-2\tThe homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants.\t17419837\tRPK2\t284\n\nAT3G02140\tafp4-1\tDecreased germination on high concentrations of glucose and sorbitol.\t18484180\tTMAC2\t300\n\nAT3G02140\tafp4-1\tDecreased germination on high concentrations of glucose and sorbitol.\t18484180\tTMAC2\t300\n\nAT3G02140\tafp4-1\tDecreased germination on high concentrations of glucose and sorbitol.\t18484180\tTMAC2\t300\n\nAT3G02140\tafp4-1\tDecreased germination on high concentrations of glucose and sorbitol.\t18484180\tTMAC2\t300\n\nAT3G02230\trgp1-1\trgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls.\t21478444\tRGP1\t301\n\nAT3G02230\trgp1-1\trgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls.\t21478444\tRGP1\t301\n\nAT3G02230\trgp1-1\trgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls.\t21478444\tRGP1\t301\n\nAT3G02230\trgp1-1\trgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls.\t21478444\tRGP1\t301\n\nAT3G02260\ttir3-1 RGLG1:rglg1 rglg2\tThe triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence.\t17586653\tBIG\t279\n\nAT3G02260\ttir3-1 RGLG1:rglg1 rglg2\tThe triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence.\t17586653\tBIG\t279\n\nAT3G02260\ttir3-1 RGLG1:rglg1 rglg2\tThe triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence.\t17586653\tBIG\t279\n\nAT3G02260\ttir3-1 RGLG1:rglg1 rglg2\tThe triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence.\t17586653\tBIG\t279\n\nAT3G02310\tsep2-1\tNon-described subtle phenotype.\t10821278\tSEP2\t175\n\nAT3G02310\tsep2-1\tNon-described subtle phenotype.\t10821278\tSEP2\t175\n\nAT3G02310\tsep2-1\tNon-described subtle phenotype.\t10821278\tSEP2\t175\n\nAT3G02310\tsep2-1\tNon-described subtle phenotype.\t10821278\tSEP2\t175\n\nAT3G02680\tatnbs1-1\tSignificantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions.\t17672843\tNBS1\t190\n\nAT3G02680\tatnbs1-1\tSignificantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions.\t17672843\tNBS1\t190\n\nAT3G02680\tatnbs1-1\tSignificantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions.\t17672843\tNBS1\t190\n\nAT3G02680\tatnbs1-1\tSignificantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions.\t17672843\tNBS1\t190\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02870\tvtc4-1\tascorbate deficient\t16595667\tVTC4\t311\n\nAT3G02870\tvtc4-1\tascorbate deficient\t16595667\tVTC4\t311\n\nAT3G02870\tvtc4-1\tascorbate deficient\t16595667\tVTC4\t311\n\nAT3G02870\tvtc4-1\tascorbate deficient\t16595667\tVTC4\t311\n\nAT3G03260\thdg8-1\tNo visible phenotype.\t16778018\tHDG8\t194\n\nAT3G03260\thdg8-1\tNo visible phenotype.\t16778018\tHDG8\t194\n\nAT3G03260\thdg8-1\tNo visible phenotype.\t16778018\tHDG8\t194\n\nAT3G03260\thdg8-1\tNo visible phenotype.\t16778018\tHDG8\t194\n\nAT4G14790\tpdd17\tDefective pollen development.\t19237690\tSUV3\t312\n\nAT4G14790\tpdd17\tDefective pollen development.\t19237690\tSUV3\t312\n\nAT4G14790\tpdd17\tDefective pollen development.\t19237690\tSUV3\t312\n\nAT4G14790\tpdd17\tDefective pollen development.\t19237690\tSUV3\t312\n\nAT4G15210\tbmy1-2\tPlants cold-shocked for 6h have an increased starch content compared to wildtype.\t16297066\tBAM5\t313\n\nAT4G15210\tbmy1-2\tPlants cold-shocked for 6h have an increased starch content compared to wildtype.\t16297066\tBAM5\t313\n\nAT4G15210\tbmy1-2\tPlants cold-shocked for 6h have an increased starch content compared to wildtype.\t16297066\tBAM5\t313\n\nAT4G15210\tbmy1-2\tPlants cold-shocked for 6h have an increased starch content compared to wildtype.\t16297066\tBAM5\t313\n\nAT4G15560\tcla1-1\tMutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves.\t10982425\tDXS\t219\n\nAT4G15560\tcla1-1\tMutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves.\t10982425\tDXS\t219\n\nAT4G15560\tcla1-1\tMutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves.\t10982425\tDXS\t219\n\nAT4G15560\tcla1-1\tMutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves.\t10982425\tDXS\t219\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15802\tAthspb-2\tEarly flowering, reduced fertility, aborted seeds.\t20388662\tHSBP\t254\n\nAT4G15802\tAthspb-2\tEarly flowering, reduced fertility, aborted seeds.\t20388662\tHSBP\t254\n\nAT4G15802\tAthspb-2\tEarly flowering, reduced fertility, aborted seeds.\t20388662\tHSBP\t254\n\nAT4G15802\tAthspb-2\tEarly flowering, reduced fertility, aborted seeds.\t20388662\tHSBP\t254\n\nAT4G15880\tesd4-2\tDecreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1.\t17513499\tESD4\t265\n\nAT4G15880\tesd4-2\tDecreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1.\t17513499\tESD4\t265\n\nAT4G15880\tesd4-2\tDecreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1.\t17513499\tESD4\t265\n\nAT4G15880\tesd4-2\tDecreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1.\t17513499\tESD4\t265\n\nAT4G16420\tprz1-1\tAltered response to auxin and cytokinin\t12747832\tADA2B\t279\n\nAT4G16420\tprz1-1\tAltered response to auxin and cytokinin\t12747832\tADA2B\t279\n\nAT4G16420\tprz1-1\tAltered response to auxin and cytokinin\t12747832\tADA2B\t279\n\nAT4G16420\tprz1-1\tAltered response to auxin and cytokinin\t12747832\tADA2B\t279\n\nAT4G16480\tatint4-2\tNo visible phenotype.\t16603666\tINT4\t284\n\nAT4G16480\tatint4-2\tNo visible phenotype.\t16603666\tINT4\t284\n\nAT4G16480\tatint4-2\tNo visible phenotype.\t16603666\tINT4\t284\n\nAT4G16480\tatint4-2\tNo visible phenotype.\t16603666\tINT4\t284\n\nAT5G10480\tpas2-3\tSegregates 25% embryo lethal.\t18799749\tPAS2\t301\n\nAT5G10480\tpas2-3\tSegregates 25% embryo lethal.\t18799749\tPAS2\t301\n\nAT5G10480\tpas2-3\tSegregates 25% embryo lethal.\t18799749\tPAS2\t301\n\nAT5G10480\tpas2-3\tSegregates 25% embryo lethal.\t18799749\tPAS2\t301\n\nAT5G10510\tplt3-1\tShort roots and shortened root meristem.\t17960244\tAIL6\t310\n\nAT5G10510\tplt3-1\tShort roots and shortened root meristem.\t17960244\tAIL6\t310\n\nAT5G10510\tplt3-1\tShort roots and shortened root meristem.\t17960244\tAIL6\t310\n\nAT5G10510\tplt3-1\tShort roots and shortened root meristem.\t17960244\tAIL6\t310\n\nAT5G11110\tkns2\tDefects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile.\t18779216\tSPS2\t232\n\nAT5G11110\tkns2\tDefects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile.\t18779216\tSPS2\t232\n\nAT5G11110\tkns2\tDefects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile.\t18779216\tSPS2\t232\n\nAT5G11110\tkns2\tDefects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile.\t18779216\tSPS2\t232\n\nAT5G11260\thy5-101\tUnder FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller.\t16891401\tHY5\t221\n\nAT5G11260\thy5-101\tUnder FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller.\t16891401\tHY5\t221\n\nAT5G11260\thy5-101\tUnder FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller.\t16891401\tHY5\t221\n\nAT5G11260\thy5-101\tUnder FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller.\t16891401\tHY5\t221\n\nAT5G11510\tmyb3r4-1\tNo visible phenotype.\t17287251\tMYB3R-4\t336\n\nAT5G11510\tmyb3r4-1\tNo visible phenotype.\t17287251\tMYB3R-4\t336\n\nAT5G11510\tmyb3r4-1\tNo visible phenotype.\t17287251\tMYB3R-4\t336\n\nAT5G11510\tmyb3r4-1\tNo visible phenotype.\t17287251\tMYB3R-4\t336\n\nAT5G12200\tpyd2-2\tThe pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source.\t19413687\tPYD2\t310\n\nAT5G12200\tpyd2-2\tThe pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source.\t19413687\tPYD2\t310\n\nAT5G12200\tpyd2-2\tThe pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source.\t19413687\tPYD2\t310\n\nAT5G12200\tpyd2-2\tThe pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source.\t19413687\tPYD2\t310\n\nAT5G13290\tcrn-1\tIncreased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3.\t12345678\tCRN\t189\n\nAT5G13290\tcrn-1\tIncreased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3.\t12345678\tCRN\t189\n\nAT5G13290\tcrn-1\tIncreased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3.\t12345678\tCRN\t189\n\nAT5G13290\tcrn-1\tIncreased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3.\t12345678\tCRN\t189\n\n"
],
[
"#in this case we follow the same estructure as in problem one but using WHERE + indexing the names of the genes we are interested in \n\nimport pymysql.cursors\nimport csv\n\nconnection = pymysql.connect(host='localhost',\n user='root',\n password='root',\n db='Exam_2',\n charset='utf8mb4', \n cursorclass=pymysql.cursors.DictCursor)\n\nconnection.autocommit(True)\n\n\ntry:\n with connection.cursor() as cursor:\n sql = \"SELECT * FROM germ_table, locus_table WHERE (gene = 'SKOR' OR gene = 'MAA3') AND germ_table.locus = locus_table.locus\"\n cursor.execute(sql)\n results = cursor.fetchall()\n with open('AIDA_FINAL_REPORT.csv', 'w') as csvfile:\n answerwriter = csv.writer(csvfile, delimiter=\"\\t\", quotechar='\"')\n answerwriter.writerow([\"Problem 2\"])\n answerwriter.writerow([\"Locus\", \"Germplasm\", \"Phenotype\", \"pubmed_link\", \"Gene\", \"Protein_ength\"])\n for line in results:\n answerwriter.writerow([line['locus'], line['germplasm'], line['phenotype'], line['pubmed_link'], line['gene'], line['Protein_length']])\n \n connection.commit() \n \nfinally:\n print(\"Problem 2 done\")\n connection.close()\n \n\nl = open('AIDA_FINAL_REPORT.csv', 'r')\nfor line in l.readlines():\n print(line)",
"Problem 2 done\nProblem 2\n\nLocus\tGermplasm\tPhenotype\tpubmed_link\tGene\tProtein_ength\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT3G02850\tCS3816\tThe skor-1 mutant is sensitive to toxic cations in addition to K+ depletion.\t17568770\tSKOR\t234\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\nAT4G15570\tmaa3\tHomozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT.\t18772186\tMAA3\t294\n\n"
],
[
"#In this problem we need to count the number of entries on each chromosome. we will follow the same estructure but using SELECT COUNT and using regular expresion in order to query for the chromosomes. \n# we can set i = 1 at the beginning and tell the program to catch all the answers while i <= 5 until i =1. In this way we are cerating a loop that reads the table looking for each chromosome and recording each time it finds it. \nimport csv\nimport pymysql.cursors\n\ni = 1\n\nconnection = pymysql.connect(host='localhost',\n user='root',\n password='root',\n db='Exam_2',\n charset='utf8mb4', \n cursorclass=pymysql.cursors.DictCursor)\n\nconnection.autocommit(True)\n\ntry:\n with connection.cursor() as cursor:\n \n with open('AIDA_FINAL_REPORT.csv', 'w', newline='') as csvfile:\n answerwriter = csv.writer(csvfile, delimiter=\"\\t\", quotechar='\"')\n answerwriter.writerow([\"Problem 3\"])\n answerwriter.writerow([\"Number of entries for each chromosome\"])\n while i <= 5:\n sql = \"SELECT COUNT(locus) FROM germ_table WHERE locus REGEXP (%s)\"\n data = (\"^AT\"+str(i)+\"G.+\") \n cursor.execute(sql, data)\n results = cursor.fetchall()\n for line in results:\n answerwriter.writerow([\"AT\"+str(i)+\"G\", line['COUNT(locus)']])\n i += 1\n \n connection.commit() \n \nfinally:\n print(\"problem 3 done\")\n connection.close()\n",
"problem 3 done\n"
],
[
"\nl = open('AIDA_FINAL_REPORT.csv', 'r')\nfor line in l.readlines():\n print(line)",
"Problem 3\n\nNumber of entries for each chromosome\n\nAT1G\t16\n\nAT2G\t16\n\nAT3G\t36\n\nAT4G\t32\n\nAT5G\t28\n\n"
],
[
"# In the same way, we can repeat this structure of code in order to show the average protein length for the genes on each Chromosome\nimport csv\nimport pymysql.cursors\n\ni = 1\n\nconnection = pymysql.connect(host='localhost',\n user='root',\n password='root',\n db='Exam_2',\n charset='utf8mb4', \n cursorclass=pymysql.cursors.DictCursor)\n\nconnection.autocommit(True)\n\ntry:\n with connection.cursor() as cursor:\n \n with open('AIDA_FINAL_REPORT.csv', 'w', newline='') as csvfile:\n answerwriter = csv.writer(csvfile, delimiter=\"\\t\", quotechar='\"')\n answerwriter.writerow([\"Problem 4\"])\n answerwriter.writerow([\"Average protein lenght for each chromosome\"])\n while i <= 5:\n sql = \"SELECT ROUND(AVG(Protein_length)) FROM locus_table WHERE locus REGEXP (%s)\"\n data = (\"^AT\"+str(i)+\"G.+\") \n cursor.execute(sql, data)\n results = cursor.fetchall()\n for line in results:\n answerwriter.writerow([\"AT\"+str(i)+\"G\", line['ROUND(AVG(Protein_length))']])\n i += 1\n \n connection.commit() \n \nfinally:\n print(\"problem 4 done\")\n connection.close()\n \n \n\nl = open('AIDA_FINAL_REPORT.csv', 'r')\nfor line in l.readlines():\n print(line)\n",
"problem 4 done\nProblem 4\n\nAverage protein lenght for each chromosome\n\nAT1G\t259\n\nAT2G\t216\n\nAT3G\t252\n\nAT4G\t278\n\nAT5G\t271\n\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
]
|
ec62b585c0a046bf5af02c5659b98391a1180003 | 23,830 | ipynb | Jupyter Notebook | advisor.ipynb | dmakhkamov/fpl-advisor | f37ddf240649288190dee94f02a5e6093b422af4 | [
"MIT"
]
| 13 | 2019-12-04T20:39:28.000Z | 2021-09-22T15:45:52.000Z | advisor.ipynb | dmakhkamov/fpl-advisor | f37ddf240649288190dee94f02a5e6093b422af4 | [
"MIT"
]
| 17 | 2019-11-19T22:31:59.000Z | 2021-07-24T09:44:14.000Z | advisor.ipynb | dmakhkamov/fpl-advisor | f37ddf240649288190dee94f02a5e6093b422af4 | [
"MIT"
]
| 4 | 2020-07-29T14:19:32.000Z | 2022-03-28T16:10:05.000Z | 33.945869 | 551 | 0.618044 | [
[
[
"# Fantasy Premier League (FPL) Advisor\n\n# Purpose\nThe purpose of this Jupyter notebook is to help with the selection of team members for the [Fantasy Premier League](https://fantasy.premierleague.com/) (FPL) by forecasting how many points players will earn. It uses the [fpl-data](https://github.com/177arc/fpl-data) generated estimate points and other stats. It provides:\n- a visual tool for analysing the performance of each player and understanding their potential to earn points\n- a optimiser to recommend a team with the maximum expected points to improve the performance of your current team\n- tools for selecting the best game weeks play your chips\n- visual tools to understand the re-liability of the data\n\nIf you are not familar with the Fantasy Permier League, you can watch this introduction:\n\n<a href=\"http://www.youtube.com/watch?v=SV_F-cL8fC0\" target=\"_blank\"><img src=\"http://img.youtube.com/vi/SV_F-cL8fC0/0.jpg\" \nalt=\"How to play FPL\" width=\"600\" height=\"400\"/></a>\n\n# Installation\nTo get started, run the following command to install all required dependencies.",
"_____no_output_____"
]
],
[
[
"#!pip install -q -r ./requirements.txt",
"_____no_output_____"
]
],
[
[
"# Import requirements\nHere we import all external and local modulues.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport os, sys\n\n# Load local modules\nsys.path.append(os.getcwd())\nfrom data import get_df, get_next_gw_counts, get_next_gw_name\n\npd.set_option('display.max_columns', 100)\n\n# Define type aliases\nDF = pd.DataFrame\nS = pd.Series",
"_____no_output_____"
]
],
[
[
"# Set variables\nThis section sets all important global variables.",
"_____no_output_____"
]
],
[
[
"CREDS_FILE = 'fpl_credentials.csv' # Location of file holding the FPL user name and password. These are only required for the personalised recommendations in the second half of this notebook.\nDATA_URL = 'https://s3.eu-west-2.amazonaws.com/fpl.177arc.net/v1/latest/'\nLAST_SEASON = '2020-21'\nCURRENT_SEASON = '2021-22'\nFIXTURES_LOOK_BACK = 38 # Limit of how many fixtures to look back for calculating rolling team stats\nPLAYER_FIXTURES_LOOK_BACK = 8 # Limit of how many fixture to look back for calcating rolling player stats",
"_____no_output_____"
]
],
[
[
"# Load pre-processed data\nThis section loads data sets generated by the [fpl-data](https://github.com/177arc/fpl-data) lambda function and made available via the S3 bucket specified in the `DATA_URL` variable.",
"_____no_output_____"
]
],
[
[
"gws = get_df(url=f'{DATA_URL}gws.csv', index='GW ID')\nteams = get_df(url=f'{DATA_URL}teams.csv', index='Team Code')\nplayers_ext = get_df(url=f'{DATA_URL}players_ext.csv', index='Player Code')\nplayer_teams = get_df(url=f'{DATA_URL}player_teams.csv', index='Player Code')\nplayers_gw_team_eps_ext = get_df(url=f'{DATA_URL}players_gw_team_eps_ext.csv', index=['Player Code', 'Season', 'Game Week'])\nplayer_gw_next_eps_ext = get_df(url=f'{DATA_URL}player_gw_next_eps_ext.csv', index=['Player Code'])\nteam_fixture_strength_ext = get_df(url=f'{DATA_URL}team_fixture_stats_ext.csv', index='Fixture Code')\ndd = get_df(url=f'{DATA_URL}data_dictionary.csv')",
"_____no_output_____"
]
],
[
[
"# Configure context\nThis section we configure important setting for this notebook including the data dictionary. The data dictionary contains default ordering of fields, for each field a description, default format and mapping of API field names to more readable ones. It is used to show data in a more user-friendly way.",
"_____no_output_____"
]
],
[
[
"from common import Context\nfrom datadict.jupyter import DataDict\n\nctx = Context()\nctx.fixtures_look_back = FIXTURES_LOOK_BACK\nctx.player_fixtures_look_back = PLAYER_FIXTURES_LOOK_BACK\nctx.last_season = LAST_SEASON\nctx.current_season = CURRENT_SEASON\nctx.dd = DataDict(data_dict=dd)\nctx.total_gws = gws.shape[0]\nctx.next_gw = gws[lambda df: df['Is Next GW?']].index.values[0]\nctx.def_next_gws = get_next_gw_name(min(ctx.total_gws-ctx.next_gw+1, 8), ctx)\nctx.next_gw_counts = get_next_gw_counts(ctx)",
"_____no_output_____"
]
],
[
[
"## Visualise players' cost vs their expected points\nThe chart below shows expected points and cost for each player. The expected points are calculated hourly using the [fpl-data](https://github.com/177arc/fpl-data) lambda function. Use filters to focus on a particular segment and click on a dot to view more details about the player.",
"_____no_output_____"
]
],
[
[
"from player_chart import show_eps_vs_cost\n\nshow_eps_vs_cost(player_gw_next_eps_ext, players_gw_team_eps_ext, teams, ctx)",
"_____no_output_____"
]
],
[
[
"# Get best team for wildcard or season start\nYou can use the code below to get the best team for a wildcard or at the start of the season. It uses the [PuLP linear optimiser](https://pythonhosted.org/PuLP/) to find the team combination within the current money available with the highest total expected points of the over the next game weeks.",
"_____no_output_____"
]
],
[
[
"from team import show_opt_team\n\nshow_opt_team(player_gw_next_eps_ext, def_budget=100, max_budget=115, ctx=ctx)",
"_____no_output_____"
]
],
[
[
"# Load user team data\nThis section loads the data of the user's team. \n\n**Note this requires your user credentials to be saved in fpl_credentials.csv in the same directory as this notebook. Use fpl_credentials_template.csv as template.** Alternatively, you can set the fpl_email and fpl_password variables below.",
"_____no_output_____"
]
],
[
[
"from fplpandas import FPLPandas\n\n# Enter your FPL credentials here.\nfpl_email = ''\nfpl_password = ''\n\nif not os.path.exists(CREDS_FILE):\n fpl_cred = {'email': fpl_email, 'password': fpl_password}\nelse:\n fpl_cred = pd.read_csv('fpl_credentials.csv').iloc[0].to_dict()\n \nassert len(fpl_cred['email']) > 0 and len(fpl_cred['password']) > 0, 'FPL credentials not set. Please provide your email and password.'\n\nfpl = FPLPandas(**fpl_cred)",
"_____no_output_____"
],
[
"try:\n user_team_raw, _, user_trans_info_raw = fpl.get_user_team()\nexcept aiohttp.ClientResponseError as e:\n if e.status == 404:\n print('Your team cannot be found. Have you created it? You can only optimise your team once you have created it.')\n else:\n print(e)",
"_____no_output_____"
],
[
"from data import get_players_id_code_map\n\nplayers_id_code_map = (players_ext\n [lambda df: df['Season'] == ctx.current_season]\n .pipe(get_players_id_code_map))\n\nuser_team = (user_team_raw\n .pipe(ctx.dd.remap, data_set='player')\n .assign(**{'In Team?': True})\n .assign(**{'Selling Price': lambda df: df['Selling Price']/10})\n .assign(**{'Purchase Price': lambda df: df['Purchase Price']/10})\n .assign(**{'Selected?': lambda df: df['Team Position'].map(lambda x: x <= 11)}) \n .rename_axis('Player ID')\n .reset_index()\n .merge(players_id_code_map, left_on='Player ID', right_index=True, suffixes=(None, None))\n .drop(columns='Player ID')\n .set_index('Player Code')\n )\n\nuser_trans_info = user_trans_info_raw.loc[0]",
"_____no_output_____"
]
],
[
[
"## Current team",
"_____no_output_____"
]
],
[
[
"from team import display_team\n\nplayer_user_team = (user_team\n .merge(player_gw_next_eps_ext, left_on='Player Code', right_on='Player Code', how='left', suffixes=(None, None)))\ndisplay_team(player_user_team, None, ctx)",
"_____no_output_____"
],
[
"total_budget = (user_trans_info['bank']/10+player_user_team['Selling Price'].sum())\ntotal_budget",
"_____no_output_____"
],
[
"show_eps_vs_cost(user_team.merge(player_gw_next_eps_ext, left_on='Player Code', right_on='Player Code', how='outer'), players_gw_team_eps_ext, teams, ctx)",
"_____no_output_____"
]
],
[
[
"# Recommend team selection and transfers\nUse this section to get a recommendation on what players to select to optimise the expected points of your team and to improve it by making transfers. You need to have provided your FPL credentials for this to work.\n\nIt uses the PuLP linear optimiser to find the team combination within the current budget available with the highest total expected points of the over the next five game weeks while taking your current team into account for a user defined number of transfers. Note that when executing more than one transfer on the FPL website, 4 points will be deducted from your balance for every transfer.\n\nIt uses the same PuLP linear optimiser to find the selection with the highest expected points for the next game week.",
"_____no_output_____"
]
],
[
[
"from team import show_opt_team\n\nplayer_team_eps_user = (user_team\n .merge(player_gw_next_eps_ext, left_on='Player Code', right_on='Player Code', how='right', suffixes=(None, None))\n .assign(**{'Current Cost': lambda df: df['Selling Price'].fillna(df['Current Cost'])}))\n\nshow_opt_team(player_team_eps_user, total_budget, ctx)",
"_____no_output_____"
]
],
[
[
"# Select a good week to play the free hit chip\nThe idea here is to use the expected points for each player to determine the expected points of the optimal team (selected players only) for each game week. The game week with the highest expected points is the best for a free hit. **Be aware that towards the end of the season, double game weeks get scheduled and therefore it is advisable to wait till early March.**",
"_____no_output_____"
]
],
[
[
"from common import log_progress\nfrom backtest import pred_free_hit_gw\n\nif ctx.next_gw > 1:\n free_hist_eps = DF()\n for gw in log_progress(range(ctx.next_gw, ctx.total_gws+1), name='Game Week'):\n free_hist_eps = free_hist_eps.append(\n pred_free_hit_gw(players_gw_team_eps_ext, player_teams, total_budget, gw, ctx), \n ignore_index=True)\n\n display(free_hist_eps\n .sort_values('Expected Points', ascending=False)\n [['Game Week', 'Expected Points']]\n .pipe(ctx.dd.display, descriptions=False, index=False, head=None))\nelse:\n print('This simulation relies on data that will only be available after game week 1 and will only become reliable later in the season.')",
"_____no_output_____"
]
],
[
[
"# Select a good week for playing the bench boost chip\nHere we use the expected points for each player to determine the expected points of the user team (incl. non-selected players) for each game week. The game week with the highest expected points is the best for a bench boost. **Be aware that towards the end of the season, double game weeks get scheduled and therefore it is advisable to wait till early March.**",
"_____no_output_____"
]
],
[
[
"from backtest import pred_bench_boost_gw\nfrom common import log_progress\n\nif ctx.next_gw > 1:\n player_user_team_eps = (user_team\n .merge(players_gw_team_eps_ext.reset_index(), left_on='Player Code', right_on='Player Code', how='right', suffixes=(None, None))\n .assign(**{'Current Cost': lambda df: df['Selling Price'].fillna(df['Current Cost'])}))\n\n bench_boost_eps = DF()\n\n for gw in log_progress(range(ctx.next_gw, ctx.total_gws+1), name='Game Week'):\n bench_boost_eps = bench_boost_eps.append(\n pred_bench_boost_gw(player_user_team_eps, player_teams, total_budget, gw, ctx), \n ignore_index=True)\n\n display(bench_boost_eps\n .sort_values('Expected Points', ascending=False)\n [['Game Week', 'Expected Points']]\n .pipe(ctx.dd.display, descriptions=False, index=False, head=None))\nelse:\n print('This simulation relies on data that will only be available after game week 1 and will only become reliable later in the season.')",
"_____no_output_____"
]
],
[
[
"# Compare Fixture Difficulty Rating (FDR) with Fixture Difficulty Factor (FDF)\nThis section provides a visual comparison between the official FPL Fixture Difficulty Rating (FDR) and the fixture difficulty factor (FDF) that is used to adjust the average expected points in this notebook.",
"_____no_output_____"
]
],
[
[
"from data import get_team_fixtures_by_gw\n\nfdr_by_team_gw = get_team_fixtures_by_gw(team_fixture_strength_ext, 'Team FDR', ctx)\nfdr_labels_by_team_gw = get_team_fixtures_by_gw(team_fixture_strength_ext, 'Label', ctx)",
"_____no_output_____"
],
[
"from fdr_chart import get_fdr_chart\n\nget_fdr_chart(fdr_by_team_gw, fdr_labels_by_team_gw, 'FDR', True).show()",
"_____no_output_____"
],
[
"goal_strength_by_team_gw = get_team_fixtures_by_gw(\n team_fixture_strength_ext.assign(**{'MDR': lambda df: 1/df['Rel Def Fixture Strength']}), 'MDR', ctx)\n\nget_fdr_chart(goal_strength_by_team_gw, fdr_labels_by_team_gw, 'Defensive Fixture Difficulty Factor').show()",
"_____no_output_____"
],
[
"goal_strength_by_team_gw = get_team_fixtures_by_gw(\n team_fixture_strength_ext.assign(**{'MDR': lambda df: 1/df['Rel Att Fixture Strength']}), 'MDR', ctx)\n\nget_fdr_chart(goal_strength_by_team_gw, fdr_labels_by_team_gw, 'Attacking Fixture Difficulty Factor').show()",
"_____no_output_____"
]
],
[
[
"# Back test expected points for all players",
"_____no_output_____"
]
],
[
[
"from backtest import get_gw_points_backtest\n\ngw_points_backtest = get_gw_points_backtest(players_gw_team_eps_ext, ctx)",
"_____no_output_____"
],
[
"gw_points_backtest[['Error', 'Error Simple']].mean()",
"_____no_output_____"
],
[
"import plotly.express as px\n\npx.line(gw_points_backtest, x='Season Game Week', y=['Avg Expected Points', 'Avg Fixture Total Points', 'Error']).show()",
"_____no_output_____"
]
],
[
[
"# Back test the expected points only for players in a team\nThe basic idea of testing the predictions is to look at each past game week, predict the expected points for the game week (both adjusted for relative team strengths and not adjusted), optimise the team based on the expected points and then calculate the total expected points for the optimised team (only for the selected player). For validation, we calculate the actual points of the players of the optimised team. We also calculate the points of the dream team, i.e. the total points of the team with highest actual points for each game week.",
"_____no_output_____"
]
],
[
[
"from backtest import back_test_gw\n\nif ctx.next_gw > 1:\n backtest_results = DF()\n for gw in log_progress(range(2, ctx.next_gw), name='Game Week'):\n backtest_results = backtest_results.append(back_test_gw(players_gw_team_eps_ext.reset_index(), gw, player_teams, ctx), ignore_index=True)\nelse:\n print('This simulation relies on data that will only be available after game week 1 and will only become reliable later in the season.')",
"_____no_output_____"
],
[
"import plotly.express as px\n\nif ctx.next_gw > 1:\n px.line(backtest_results, x='Game Week', y=['Actual Points Dream Team', 'Calc Actual Points', 'Calc Expected Points']).show()",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
]
|
ec62c0a35f67be35128199f079a88f9eb8e51607 | 35,953 | ipynb | Jupyter Notebook | data-stories/incarceration-race/incarceration-race.ipynb | BohanMeng/storytelling-with-data | 291f8c4c3e1fd83e8057a773712d04febc6c21f6 | [
"MIT"
]
| 2 | 2020-03-30T05:15:56.000Z | 2022-03-21T16:24:56.000Z | data-stories/incarceration-race/incarceration-race.ipynb | BohanMeng/storytelling-with-data | 291f8c4c3e1fd83e8057a773712d04febc6c21f6 | [
"MIT"
]
| 2 | 2019-05-03T19:34:48.000Z | 2019-05-25T01:28:22.000Z | data-stories/incarceration-race/incarceration-race.ipynb | FanruiShao/storytelling-with-data | 55d5452a60ce2f16f398db014e4857b31f175f27 | [
"MIT"
]
| 1 | 2018-01-17T19:14:05.000Z | 2018-01-17T19:14:05.000Z | 123.975862 | 27,462 | 0.812144 | [
[
[
"import pandas as pd\nimport hypertools as hyp\n%matplotlib inline",
"_____no_output_____"
],
[
"fname = '07641-0001-Data.xpt'",
"_____no_output_____"
],
[
"data = pd.read_sas(fname)",
"_____no_output_____"
],
[
"data.head()",
"_____no_output_____"
],
[
"hyp.plot(data, 'k.', model='SpectralEmbedding', );",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code"
]
]
|
ec630c1afbe5d9fc74c8ec3afd8cd736a30ffa84 | 5,137 | ipynb | Jupyter Notebook | mathstyle/PhotoWall.ipynb | suixinGit/myphotoshop | fdc322c1526d596b328a83cd0d4c2b13bf47693f | [
"Apache-2.0"
]
| null | null | null | mathstyle/PhotoWall.ipynb | suixinGit/myphotoshop | fdc322c1526d596b328a83cd0d4c2b13bf47693f | [
"Apache-2.0"
]
| null | null | null | mathstyle/PhotoWall.ipynb | suixinGit/myphotoshop | fdc322c1526d596b328a83cd0d4c2b13bf47693f | [
"Apache-2.0"
]
| null | null | null | 29.693642 | 135 | 0.541756 | [
[
[
"'''将图片像素化'''\n#输入一幅图片,要求黑底白字\n#图像放大至短边\n#矩阵化\n#填图,纵边同一化\n#\nimport cv2\nimport numpy as np \nimport os\nimport sys\nimport ssl\nimport urllib.request\nimport json\nimport string\n#文件选择对话框对话框依赖模块\nimport time\nimport json\nimport random\n#python生成GUID\nimport uuid\nimport copy\nimport glob as gb",
"_____no_output_____"
],
[
"def Get_File_list(imgFoldName):\n filelist = os.listdir(imgFoldName)\n fileNum = len(filelist)\n return filelist,fileNum",
"_____no_output_____"
],
[
"def Get_Image_list(imgFoldName,format_pic):\n img_path_list = gb.glob(imgFoldName + '/*.' + format_pic) \n imgNum = len(img_path_list)\n return img_path_list,imgNum",
"_____no_output_____"
],
[
"def GetFiledImage(image_height,image_width,image_nchanels,imglist,pix_height):\n imagex_start = 0\n imagey_start = 0\n field_image = np.zeros((image_height,image_width,image_nchanels), np.uint8)\n image_num = len(imglist)\n isaddable = False\n if(image_height > pix_height and image_width > 0):\n isaddable = True\n while isaddable:\n image_index = random.randint(0,(image_num-1))\n image_file = imglist[image_index]\n image = cv2.imread(image_file)\n height_img = img.shape[0]\n width_img = img.shape[1]\n pix_width = round(width_img*pix_height/height_img)\n pix_img = cv2.resize(image,(pix_width,pix_height),interpolation=cv2.INTER_AREA)\n if((imagex_start+pix_width)<image_width):\n #print(imagex_start,imagey_start)\n field_image[imagey_start:(imagey_start+pix_height),imagex_start:(imagex_start+pix_width)] = copy.deepcopy(pix_img)\n imagex_start = imagex_start+pix_width\n else:\n imagex_start = 0\n if((imagey_start+2*pix_height)< image_height):\n imagey_start = imagey_start+pix_height\n else:\n #print(isaddable)\n isaddable = False\n cv2.imwrite('Image.jpg',field_image)\n return field_image",
"_____no_output_____"
],
[
"def ImageMerge(img,img_mask):\n height_img_mask = img_mask.shape[0]\n width_img_mask = img_mask.shape[1]\n for i in range(height_img_mask):\n for j in range(width_img_mask):\n if(img_mask[i,j] < 100):\n #print(i,j)\n img[i,j,:] = 0\n #print(img)\n #img[i,j] = 0 # 这里可以处理每个像素点\n cv2.imwrite('result.jpg',img)\n ",
"_____no_output_____"
],
[
"if __name__ == '__main__':\n imgFoldName = '/home/zhangjie/CarRec/20180706/201807061'\n img_format = 'jpg'\n pix_height = 49\n pix_width = 49\n image_height = 1000\n image_width = 1000\n image_nchanels = 3\n pix_height = 12\n \n img = cv2.imread('/home/zhangjie/CarRec/20180706/201807061/201807060850_85b50e90-80b6-11e8-bbe5-000c297a242a.jpg')\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #转换了灰度化\n retval, im_at_fixed = cv2.threshold(img_gray, 150, 255, cv2.THRESH_BINARY) \n height_mash = im_at_fixed.shape[0]\n width_mash = im_at_fixed.shape[1]\n\n #将阈值设置为50,阈值类型为cv2.THRESH_BINARY,则灰度在大于50的像素其值将设置为255,其它像素设置为0\n imglist,imgnum = Get_Image_list(imgFoldName,img_format)\n field_image = GetFiledImage(height_mash,width_mash,3,imglist,pix_height)\n ImageMerge(field_image,im_at_fixed)\n ",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec630ea9912a3bc12ff0e3a9a96580576113419b | 486,418 | ipynb | Jupyter Notebook | GAN_nn_agg.ipynb | yjl705/CS236G_herbalMedicineClassification | 0bbfc1ccdb9336baf2909660130a07237a58576c | [
"MIT"
]
| null | null | null | GAN_nn_agg.ipynb | yjl705/CS236G_herbalMedicineClassification | 0bbfc1ccdb9336baf2909660130a07237a58576c | [
"MIT"
]
| null | null | null | GAN_nn_agg.ipynb | yjl705/CS236G_herbalMedicineClassification | 0bbfc1ccdb9336baf2909660130a07237a58576c | [
"MIT"
]
| null | null | null | 123.362414 | 76,324 | 0.811759 | [
[
[
"import numpy as np\nfrom cvxopt import matrix, solvers\nsolvers.options['show_progress'] = False\nimport os\nimport torch\nimport torchvision\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport tqdm\nimport warnings\nfrom sklearn.preprocessing import StandardScaler\nfrom scipy.io import loadmat\nfrom torch.utils.data import Dataset, DataLoader\nfrom sklearn.model_selection import train_test_split\nimport torchmetrics\nwarnings.filterwarnings(\"ignore\", category=UserWarning)",
"_____no_output_____"
],
[
"#GAN model\nclass LinearGeneratorA(nn.Module):\n\n def __init__(self, input_dimA, output_dim, dim):\n super(LinearGeneratorA, self).__init__()\n self.layers = nn.Sequential(\n nn.Linear(input_dimA, dim),\n nn.LeakyReLU(),\n nn.Linear(dim, dim),\n nn.LeakyReLU(),\n nn.Linear(dim, dim),\n nn.LeakyReLU(),\n #nn.BatchNorm1d(dim),\n nn.Linear(dim, output_dim)) # Real-value range\n\n def forward(self, x):\n return self.layers(x)\n\n\nclass LinearGeneratorB(nn.Module):\n\n def __init__(self, input_dimB, output_dim, dim):\n super(LinearGeneratorB, self).__init__()\n self.layers = nn.Sequential(\n nn.Linear(input_dimB, dim),\n nn.LeakyReLU(),\n nn.Linear(dim, dim),\n nn.LeakyReLU(),\n nn.Linear(dim, dim),\n nn.LeakyReLU(),\n #nn.BatchNorm1d(dim),\n nn.Linear(dim, output_dim)) # Real-value range\n\n def forward(self, x):\n return self.layers(x)\n\nclass BiGANDiscriminatorA(nn.Module):\n def __init__(self, latent_dim, dim):\n super(BiGANDiscriminatorA, self).__init__()\n\n self.layers = nn.Sequential(\n nn.Linear(latent_dim, dim),\n nn.LeakyReLU(),\n nn.Linear(dim, dim),\n nn.LeakyReLU(),\n nn.Linear(dim, dim),\n nn.LeakyReLU(),\n nn.Linear(dim, 1),\n nn.Sigmoid())\n\n def forward(self, x):\n return self.layers(x)\n\n\nclass BiGANDiscriminatorB(nn.Module):\n def __init__(self, latent_dim, dim):\n super(BiGANDiscriminatorB, self).__init__()\n\n self.layers = nn.Sequential(\n nn.Linear(latent_dim, dim),\n nn.LeakyReLU(),\n nn.Linear(dim, dim),\n nn.LeakyReLU(),\n nn.Linear(dim, dim),\n nn.LeakyReLU(),\n nn.Linear(dim, 1),\n nn.Sigmoid()) # To probability\n\n def forward(self, x): # The noise has to be implicitly added into the generator\n return self.layers(x)",
"_____no_output_____"
],
[
"input_dimA = 128\ninput_dimB = 128\noutput_dimA = 128\noutput_dimB = 128\ndim = 150 #Hidden layer neurons in the generator and discriminator\n\nepochs = 2000\nlr = 1e-3\nlr2 = 1e-4\nlr3 = 1e-3\nbeta = 1 #Balance the reconstruction loss and discriminator loss (fake)\nalpha = 1 #Balance the generator loss and discriminator loss\nEPS = 1e-6\nmomentum = 0.9\nbatch_size = 128\nnoise =0.2",
"_____no_output_____"
],
[
"# ------- Define Model ------- #\n\nencoderA= LinearGeneratorA(input_dimA,output_dimB, dim) #From A to B\ndiscriminatorA = BiGANDiscriminatorA(output_dimA, dim)\n\nencoderB= LinearGeneratorB(input_dimB,output_dimA, dim) # From B to A\ndiscriminatorB = BiGANDiscriminatorB(output_dimB, dim)\n",
"_____no_output_____"
],
[
"# ------- Optimizer ------- #\n\n#opt_g = optim.Adam(list(encoderA.parameters()), lr= lr)\nopt_g = optim.Adam(list(encoderA.parameters()) + list(encoderB.parameters()), lr= lr2) # Question: different learning rate?\nopt_d = optim.Adam(list(discriminatorA.parameters())+list(discriminatorB.parameters()), lr= lr)\nopt_e = optim.Adam(list(encoderA.parameters()) + list(encoderB.parameters()),lr= lr) #Question: 1 or 2 sets of generator parameters\n\nloss = nn.BCELoss()\n",
"_____no_output_____"
],
[
"# Input\nsource =loadmat('/Users/yjl/Downloads/Stanford/CS 236G/Aggregated_feature/Source_BZ_DG_GG.mat')\ntarget =loadmat('/Users/yjl/Downloads/Stanford/CS 236G/Aggregated_feature/Target_BZ_DG_GG.mat')\n\nXs = np.row_stack((source['BZ'],source['DG'],source['GG']))\nassert Xs.shape[0] == 150\nXt = np.row_stack((target['BZ'],target['DG'],target['GG']))\nassert Xt.shape[0] == 480\n\n#normalize data\n#from sklearn.preprocessing import normalize\n#for i in range(0, 128, 16):\n# Xs[:,i:i+16] = normalize(Xs[:,i:i+16]) \n# Xt[:,i:i+16] = normalize(Xt[:,i:i+16]) \n\n#Target domain train test split\ntrain_idx = np.concatenate((np.arange(0,100),np.arange(160,260),np.arange(320,420))).reshape(-1,)\ntest_idx = np.concatenate((np.arange(100,160),np.arange(260,320),np.arange(420,480))).reshape(-1,)\n\n\n\n# Labels\ny_target=np.hstack((0*np.ones(160),1*np.ones(160),2*np.ones(160))).astype(np.int64)\ny_source=np.hstack((0*np.ones(50),1*np.ones(50),2*np.ones(50))).astype(np.int64)",
"_____no_output_____"
],
[
"print(Xs.shape)\nprint(Xt.shape)\nprint(y_source.shape)\nprint(y_target.shape)",
"(150, 128)\n(480, 128)\n(150,)\n(480,)\n"
],
[
"#X and Y, need to change to our data\nXA_train = Xs\nXB_train = Xt[train_idx,:]\nassert XB_train.shape[0] == 300\nXA_test = Xs\nXB_test = Xt[test_idx,:]\nassert XB_test.shape[0] == 180\ny_valid = y_target[train_idx]\nassert y_valid.shape[0] == 300\ny_test = y_target[test_idx]\nassert y_test.shape[0] == 180\n#print(y_test)",
"_____no_output_____"
],
[
"# ------- Variables ------- #\n\nscr_train = Variable(torch.from_numpy(XA_train).float())\ntgt_train = Variable(torch.from_numpy(XB_train).float())\nscr_test = Variable(torch.from_numpy(XA_test).float())\ntgt_test = Variable(torch.from_numpy(XB_test).float())\n\ns_train = torch.tensor(scr_train)\ns_train_tensor = torch.utils.data.TensorDataset(s_train)\nsource_trainloader = torch.utils.data.DataLoader(dataset=s_train_tensor, batch_size=len(s_train_tensor), shuffle=False)\n\nt_train = torch.tensor(tgt_train)\nt_train_tensor = torch.utils.data.TensorDataset(t_train)\ntarget_trainloader = torch.utils.data.DataLoader(dataset=t_train_tensor, batch_size=batch_size, shuffle=False)\n\ns_test = torch.tensor(scr_test)\ns_test_tensor = torch.utils.data.TensorDataset(s_test)\nsource_testloader = torch.utils.data.DataLoader(dataset=s_test_tensor, batch_size=len(s_test_tensor), shuffle=False)\n\nt_test = torch.tensor(tgt_test)\nt_test_tensor = torch.utils.data.TensorDataset(t_test)\ntarget_testloader = torch.utils.data.DataLoader(dataset=t_test_tensor, batch_size=batch_size, shuffle=False)\n\nloss_recorder = []\ngloss_recorder = []\ndloss_recorder = []\nglossrec_recorder = []\n\ntrue_acc_A_mean = []\nfake_acc_A_mean = []\ntrue_acc_B_mean = []\nfake_acc_B_mean = []\n\n",
"_____no_output_____"
],
[
"#Train GAN model\npbar = tqdm.tqdm(range(epochs))\nfor e in pbar:\n\n # No need to perform train-test partition\n # XA_train, XA_test, yA_train, yA_test = train_test_split(XA, yA, test_size=0.2, random_state= 0)\n # XB_train, XB_test, yB_train, yB_test = train_test_split(XB, yB, test_size=0.2, random_state= 0)\n\n true_acc = []\n fake_acc = []\n true_acc_B = []\n fake_acc_B = []\n for (datas, datat) in zip(source_trainloader, target_trainloader):\n src_data = datas[0]\n tgt_data = datat[0]\n\n #For the Wasserstein GAN\n #if src_data.size()[0] != batch_size:\n # continue\n\n #if tgt_data.size()[0] != batch_size:\n # continue\n\n encoderA.train()\n encoderB.train()\n discriminatorA.train()\n discriminatorB.train()\n opt_d.zero_grad()\n opt_g.zero_grad()\n opt_e.zero_grad()\n\n validA_target = torch.ones((src_data.size()[0], 1))\n fakeA_target = torch.zeros((tgt_data.size()[0], 1)) # The fake samples converted from the target domain\n\n validB_target = torch.ones((tgt_data.size()[0], 1))\n fakeB_target = torch.zeros((src_data.size()[0], 1)) # The fake samples converted from the source domain\n\n src_gen_A = encoderA(src_data*torch.from_numpy(np.random.binomial(size=src_data.size(), n=1, p=1-noise)))# From A to B (HM->MMA)\n tgt_gen_B = encoderB(tgt_data*torch.from_numpy(np.random.binomial(size=tgt_data.size(), n=1, p=1-noise))) # From B to A (MMA->HM)\n\n tgt_gen_BA = encoderA(tgt_gen_B*torch.from_numpy(np.random.binomial(size=tgt_data.size(), n=1, p=1-noise))) # Recovery: target->source->target (MMA->HM->MMA)\n src_gen_AB = encoderB(src_gen_A*torch.from_numpy(np.random.binomial(size=src_data.size(), n=1, p=1-noise))) # Recovery: source->target->source (HM->MMA->HM)\n\n # loss_gA = torch.mean(torch.square(src_gen_A - tgt_gen_BA)) #Reconstruction loss\n # loss_gB = torch.mean(torch.square(tgt_gen_B - src_gen_AB)) #Reconstruction loss\n\n loss_gA_rec = torch.mean(torch.square(src_data - src_gen_AB)) # Reconstruction loss\n loss_gB_rec = torch.mean(torch.square(tgt_data - tgt_gen_BA)) # Reconstruction loss\n\n ### The following Four Sentences have been changed ###\n # discriminator_loss_real_A = discriminatorA(src_data, tgt_gen_B)\n # discriminator_loss_fake_A = discriminatorA(tgt_gen_B, src_gen_AB) #Here is weird.\n #\n # discriminator_loss_real_B = discriminatorB(tgt_data, src_gen_A)\n # discriminator_loss_fake_B = discriminatorB(src_gen_A, tgt_gen_BA) #Here is weird.\n\n discriminator_loss_real_A = discriminatorA(src_data)\n discriminator_loss_fake_A = discriminatorA(tgt_gen_B)\n\n discriminator_loss_real_B = discriminatorB(tgt_data)\n discriminator_loss_fake_B = discriminatorB(src_gen_A)\n\n true_acc.append(np.mean(discriminator_loss_real_A.detach().numpy()>0.5))\n fake_acc.append(np.mean(discriminator_loss_fake_A.detach().numpy()<0.5))\n true_acc_B.append(np.mean(discriminator_loss_real_B.detach().numpy() > 0.5))\n fake_acc_B.append(np.mean(discriminator_loss_fake_B.detach().numpy() < 0.5))\n\n # loss_dA = loss(discriminator_loss_real_A, validA_target) + loss(discriminator_loss_fake_A, fakeA_target)\n # loss_dB = loss(discriminator_loss_real_B, validB_target) + loss(discriminator_loss_fake_B, fakeB_target)\n # For the nonsaturating loss\n loss_dA = -torch.mean(torch.log(discriminator_loss_real_A+EPS)) - torch.mean(torch.log(1-discriminator_loss_fake_A+EPS))\n loss_dB = -torch.mean(torch.log(discriminator_loss_real_B+EPS)) - torch.mean(torch.log(1-discriminator_loss_fake_B+EPS))\n\n #lossG = (loss_gA_rec + loss_gB_rec)/alpha - beta*(loss(discriminator_loss_fake_A, fakeA_target) + loss(discriminator_loss_fake_B, fakeB_target))\n #lossGfake = -(loss(discriminator_loss_fake_A, fakeA_target) + loss(discriminator_loss_fake_B, fakeB_target))\n lossGfake = -(torch.mean(torch.log(discriminator_loss_fake_A+EPS))+torch.mean(torch.log(discriminator_loss_fake_B+EPS)))\n lossD = loss_dA + loss_dB\n lossGrec = (loss_gA_rec + loss_gB_rec)\n total_loss = lossGrec/alpha + beta*lossGfake + lossD\n\n #total_loss.backward()\n lossD.backward(retain_graph=True)\n lossGrec.backward(retain_graph=True)\n lossGfake.backward()\n opt_d.step()\n opt_g.step()\n opt_e.step()\n loss_recorder.append(total_loss)\n gloss_recorder.append(lossGfake)\n glossrec_recorder.append(lossGrec)\n dloss_recorder.append(lossD)\n true_acc_A_mean.append(np.mean(true_acc))\n fake_acc_A_mean.append(np.mean(fake_acc))\n true_acc_B_mean.append(np.mean(true_acc_B))\n fake_acc_B_mean.append(np.mean(fake_acc_B))\n pbar.set_description('Bi-GAN Total Loss: %.2e, G-Rec Loss: %.2e, G-Fake Loss: %.2e, D Loss: %.2e, A True ACC: %.2f, A Fake ACC: %.2f , B True ACC: %.2f, B Fake ACC: %.2f' % (total_loss.item(), lossGrec.item(), lossGfake.item(), lossD.item(), np.mean(true_acc), np.mean(fake_acc), np.mean(true_acc_B), np.mean(fake_acc_B)))",
"Bi-GAN Total Loss: 7.59e+00, G-Rec Loss: 4.81e+00, G-Fake Loss: 1.38e+00, D Loss: 1.39e+00, A True ACC: 1.00, A Fake ACC: 0.36 , B True ACC: 1.00, B Fake ACC: 0.06: 100%|██████████| 2000/2000 [00:57<00:00, 34.56it/s]\n"
],
[
"# Save models\nDir_models = \"/Users/yjl/Downloads/Stanford/CS 236G/GAN models\"\nos.chdir(Dir_models)\nspecs = 'Noisy'+ str(noise)+'_Bi-GAN_HM2MMA_standardization_five layers_nonsaturating_separateGDE_dim' + str(dim) +'_epoch'+str(epochs) + '_G initial_lr' + str(lr) + '_D initial_lr' + str(lr3)+ '_Grec initial_lr' + str(lr2)+ '_beta' + str(beta) + '_alpha' + str(alpha)\ntorch.save({\n 'epoch': epochs,\n 'encoderA_state_dict': encoderA.state_dict(),\n 'encoderB_state_dict': encoderB.state_dict(),\n 'discriminatorA_state_dict': discriminatorA.state_dict(),\n 'discriminatorB_state_dict': discriminatorB.state_dict(),\n 'opt_g_state_dict': opt_g.state_dict(),\n 'opt_d_state_dict': opt_d.state_dict(),\n 'opt_e_state_dict': opt_e.state_dict(),\n 'loss': loss_recorder[-1]}, specs + '.pt')\nprint('Model saved to disk!')",
"Model saved to disk!\n"
],
[
"#plot GAN result\nos.chdir(Dir_models)\nimport matplotlib.pyplot as plt\nplt.figure(figsize=(9,6))\nplt.plot(loss_recorder,'r')\nplt.plot(glossrec_recorder,'c:',linewidth=3)\nplt.xticks(fontsize=18)\nplt.yticks(fontsize=18)\nplt.grid()\nplt.legend(['Total Loss','Reconstruction Loss'],fontsize=18)\nplt.title('Total Loss and Reconstruction Loss')\nplt.savefig(specs+'loss.pdf',bbox_inches='tight')\nplt.show()\nplt.figure(figsize=(9,6))\nplt.plot(gloss_recorder,'b')\nplt.plot(dloss_recorder,'g:',linewidth=3)\nplt.xticks(fontsize=18)\nplt.yticks(fontsize=18)\nplt.grid()\nplt.legend(['Generator Faking Loss','Discriminator Loss'],fontsize=18)\nplt.title('GD Loss')\nplt.savefig(specs+'gdloss.pdf',bbox_inches='tight')\nplt.show()\nplt.figure(figsize=(9,6))\nplt.plot(true_acc_A_mean,'r:',linewidth=3)\nplt.plot(fake_acc_A_mean,'b')\nplt.plot(true_acc_B_mean,'g:',linewidth=3)\nplt.plot(fake_acc_B_mean,'c')\nplt.xticks(fontsize=18)\nplt.yticks(fontsize=18)\nplt.grid()\nplt.legend(['Real source Classification Accuracy','Fake source Classification Accuracy','Real target Classification Accuracy','Fake target Classification Accuracy'],fontsize=18)\nplt.title('Classification Accuracy')\nplt.savefig(specs+'accuracy.pdf',bbox_inches='tight')\nplt.show()\n",
"_____no_output_____"
],
[
"#save data\nData_generated_dir = \"/Users/yjl/Downloads/Stanford/CS 236G/Data_gen\"\nfor datas in target_testloader:\n tgt_data = datas[0]\n tgt_gen_B = encoderB(tgt_data).detach().numpy()\n os.chdir(Data_generated_dir)\n np.save(specs+'.npy',tgt_gen_B)",
"_____no_output_____"
],
[
"# NN\nclass NN(nn.Module):\n def __init__(self, layers, dropout):\n \n super().__init__()\n \n fcs = []\n for i in range(len(layers) - 2):\n fcs.append(nn.Linear(layers[i], layers[i+1]))\n fcs.append(nn.ReLU())\n fcs.append(nn.Dropout(dropout))\n fcs.append(nn.Linear(layers[-2], layers[-1]))\n self.fc = nn.Sequential(*fcs)\n\n \n def forward(self, data):\n \n # data = [batch size, input_dim]\n \n return self.fc(data)",
"_____no_output_____"
],
[
"INPUT_DIM = 128\nOUTPUT_DIM = 3\nDROPOUT = 0.3\nLAYERS = [INPUT_DIM, 64, 32, 16, OUTPUT_DIM]\n\nmodel_nn = NN(LAYERS, DROPOUT)\nmodel_nn.float()\nopt_nn = optim.Adam(model_nn.parameters(),lr=1e-4)\n\ncriterion = nn.CrossEntropyLoss()\n\nmodel_nn_GAN = NN(LAYERS, DROPOUT)\nmodel_nn_GAN.float()\nopt_nn_GAN = optim.Adam(model_nn_GAN.parameters(),lr=1e-4)\n\ncriterion_GAN = nn.CrossEntropyLoss()",
"_____no_output_____"
],
[
"def train(model, train_dataloader, optimizer, criterion):\n epoch_loss = 0\n epoch_acc = 0\n epoch_prec = 0\n epoch_recall = 0\n epoch_f1 = 0\n batches = len(train_dataloader)\n \n model.train()\n \n for _, batch in enumerate(train_dataloader):\n x, y = batch\n \n optimizer.zero_grad()\n \n predictions = model(x)\n \n loss = criterion(predictions, y)\n \n predictions = torch.argmax(torch.softmax(predictions, 1), dim=1)\n \n acc = torchmetrics.functional.accuracy(predictions, y)\n prec, recall = torchmetrics.functional.precision_recall(predictions, y, num_classes=3, average='macro')\n f1 = torchmetrics.functional.f1(predictions, y, num_classes=3, average='macro')\n \n loss.backward(retain_graph=True)\n \n optimizer.step()\n \n epoch_loss += loss.item()\n epoch_acc += acc.item()\n epoch_prec += prec.item()\n epoch_recall += recall.item()\n epoch_f1 += f1.item()\n\n return epoch_loss / batches, epoch_acc / batches, epoch_prec / batches, epoch_recall / batches, epoch_f1 / batches\n\n",
"_____no_output_____"
],
[
"def evaluate(model, dataloader, criterion):\n epoch_loss = 0\n epoch_acc = 0\n epoch_prec = 0\n epoch_recall = 0\n epoch_f1 = 0\n batches = len(dataloader)\n \n model.eval()\n \n with torch.no_grad():\n \n for _, batch in enumerate(dataloader):\n x, y = batch\n\n predictions = model(x)\n \n loss = criterion(predictions, y)\n \n predictions = torch.argmax(torch.softmax(predictions, 1), dim=1)\n \n acc = torchmetrics.functional.accuracy(predictions, y)\n prec, recall = torchmetrics.functional.precision_recall(predictions, y, num_classes=3, average='macro')\n f1 = torchmetrics.functional.f1(predictions, y, num_classes=3, average='macro')\n\n epoch_loss += loss.item()\n epoch_acc += acc.item()\n epoch_prec += prec.item()\n epoch_recall += recall.item()\n epoch_f1 += f1.item()\n \n return epoch_loss / batches, epoch_acc / batches, epoch_prec / batches, epoch_recall / batches, epoch_f1 / batches",
"_____no_output_____"
],
[
"import time\n\ndef epoch_time(start_time, end_time):\n elapsed_time = end_time - start_time\n elapsed_mins = int(elapsed_time / 60)\n elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n return elapsed_mins, elapsed_secs\n\n",
"_____no_output_____"
],
[
"class HerbalData(Dataset):\n def __init__(self, x_train, y_train):\n assert len(x_train) == len(y_train)\n self.x = x_train\n self.y = y_train\n \n def __len__(self):\n return self.x.shape[0]\n \n def __getitem__(self, idx):\n return self.x[idx], self.y[idx]\n",
"_____no_output_____"
],
[
"#initiate training\nx_train = torch.tensor(scr_train)\nx_train_GAN = encoderA(x_train*torch.from_numpy(np.random.binomial(size=x_train.size(), n=1, p=1-noise)))\nx_valid = t_train\nx_test = t_test\n\ntrain_set = HerbalData(x_train, torch.from_numpy(y_source))\ntrain_GAN_set = HerbalData(x_train_GAN, torch.from_numpy(y_source))\nvalid_set = HerbalData(x_valid, torch.from_numpy(y_valid))\ntest_set = HerbalData(x_test, torch.from_numpy(y_test))\n\ntrain_dataloader = DataLoader(train_set, batch_size=150, shuffle=True)\ntrain_GAN_dataloader = DataLoader(train_GAN_set, batch_size=150, shuffle=True)\nvalid_dataloader = DataLoader(valid_set, batch_size=len(valid_set), shuffle=True)\ntest_dataloader = DataLoader(test_set, batch_size=len(test_set), shuffle=True)\n\n\nprint(\"Train Set:\", x_train.shape)\nprint(\"Valid Set:\", x_valid.shape)\nprint(\"Target Train Set:\", x_test.shape)\n",
"Train Set: torch.Size([150, 128])\nValid Set: torch.Size([300, 128])\nTarget Train Set: torch.Size([180, 128])\n"
],
[
"N_EPOCHS = 150\n\ntrain_loss = np.zeros(N_EPOCHS)\ntrain_acc = np.zeros(N_EPOCHS)\ntrain_prec = np.zeros(N_EPOCHS)\ntrain_recall = np.zeros(N_EPOCHS)\ntrain_f1 = np.zeros(N_EPOCHS)\n\ntrain_GAN_loss = np.zeros(N_EPOCHS)\ntrain_GAN_acc = np.zeros(N_EPOCHS)\ntrain_GAN_prec = np.zeros(N_EPOCHS)\ntrain_GAN_recall = np.zeros(N_EPOCHS)\ntrain_GAN_f1 = np.zeros(N_EPOCHS)\n\nvalid_loss = np.zeros(N_EPOCHS)\nvalid_acc = np.zeros(N_EPOCHS)\nvalid_prec = np.zeros(N_EPOCHS)\nvalid_recall = np.zeros(N_EPOCHS)\nvalid_f1 = np.zeros(N_EPOCHS)\n\nvalid_GAN_loss = np.zeros(N_EPOCHS)\nvalid_GAN_acc = np.zeros(N_EPOCHS)\nvalid_GAN_prec = np.zeros(N_EPOCHS)\nvalid_GAN_recall = np.zeros(N_EPOCHS)\nvalid_GAN_f1 = np.zeros(N_EPOCHS)\n\n\n\n\nbest_valid_loss = float('inf')\nbest_valid_GAN_loss = float('inf')\n\n\n#start train DNN\nfor i in range(N_EPOCHS):\n\n start_time = time.time()\n \n train_loss[i], train_acc[i], train_prec[i], train_recall[i], train_f1[i] = train(model_nn, train_dataloader, opt_nn, criterion)\n valid_loss[i], valid_acc[i], valid_prec[i], valid_recall[i], valid_f1[i] = evaluate(model_nn, valid_dataloader, criterion)\n train_GAN_loss[i], train_GAN_acc[i], train_GAN_prec[i], train_GAN_recall[i], train_GAN_f1[i] = train(model_nn_GAN, train_GAN_dataloader, opt_nn_GAN, criterion_GAN)\n valid_GAN_loss[i], valid_GAN_acc[i], valid_GAN_prec[i], valid_GAN_recall[i], valid_GAN_f1[i] = evaluate(model_nn_GAN, valid_dataloader, criterion_GAN)\n \n end_time = time.time()\n\n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n \n if valid_loss[i] < best_valid_loss:\n best_valid_loss = valid_loss[i]\n torch.save(model_nn.state_dict(), 'nn-agg.pt')\n if valid_GAN_loss[i] < best_valid_GAN_loss:\n best_valid_GAN_loss = valid_GAN_loss[i]\n torch.save(model_nn.state_dict(), 'nn-agg-GAN.pt')\n \n print(f'Epoch: {i+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')\n print(f'\\tTrain Loss: {train_loss[i]:.3f} | Train Acc: {train_acc[i]*100:.2f}%')\n print(f'\\t Val. Loss: {valid_loss[i]:.3f} | Val. Acc: {valid_acc[i]*100:.2f}%')\n print(f'\\tTrain GAN Loss: {train_GAN_loss[i]:.3f} | Train GAN Acc: {train_GAN_acc[i]*100:.2f}%')\n print(f'\\t Val. GAN Loss: {valid_GAN_loss[i]:.3f} | Val. GAN Acc: {valid_GAN_acc[i]*100:.2f}%')\n\n",
"Epoch: 01 | Epoch Time: 0m 0s\n\tTrain Loss: 1.307 | Train Acc: 32.67%\n\t Val. Loss: 1.135 | Val. Acc: 8.00%\n\tTrain GAN Loss: 1.445 | Train GAN Acc: 38.00%\n\t Val. GAN Loss: 1.024 | Val. GAN Acc: 38.33%\nEpoch: 02 | Epoch Time: 0m 0s\n\tTrain Loss: 1.320 | Train Acc: 26.67%\n\t Val. Loss: 1.136 | Val. Acc: 8.00%\n\tTrain GAN Loss: 1.394 | Train GAN Acc: 36.67%\n\t Val. GAN Loss: 1.029 | Val. GAN Acc: 38.33%\nEpoch: 03 | Epoch Time: 0m 0s\n\tTrain Loss: 1.428 | Train Acc: 25.33%\n\t Val. Loss: 1.135 | Val. Acc: 8.00%\n\tTrain GAN Loss: 1.551 | Train GAN Acc: 26.00%\n\t Val. GAN Loss: 1.034 | Val. GAN Acc: 41.67%\nEpoch: 04 | Epoch Time: 0m 0s\n\tTrain Loss: 1.300 | Train Acc: 28.67%\n\t Val. Loss: 1.134 | Val. Acc: 7.67%\n\tTrain GAN Loss: 1.328 | Train GAN Acc: 36.67%\n\t Val. GAN Loss: 1.039 | Val. GAN Acc: 41.33%\nEpoch: 05 | Epoch Time: 0m 0s\n\tTrain Loss: 1.186 | Train Acc: 36.00%\n\t Val. Loss: 1.133 | Val. Acc: 7.67%\n\tTrain GAN Loss: 1.412 | Train GAN Acc: 32.00%\n\t Val. GAN Loss: 1.044 | Val. GAN Acc: 41.00%\nEpoch: 06 | Epoch Time: 0m 0s\n\tTrain Loss: 1.209 | Train Acc: 36.67%\n\t Val. Loss: 1.132 | Val. Acc: 8.00%\n\tTrain GAN Loss: 1.304 | Train GAN Acc: 38.00%\n\t Val. GAN Loss: 1.049 | Val. GAN Acc: 37.33%\nEpoch: 07 | Epoch Time: 0m 0s\n\tTrain Loss: 1.201 | Train Acc: 36.00%\n\t Val. Loss: 1.132 | Val. Acc: 8.00%\n\tTrain GAN Loss: 1.216 | Train GAN Acc: 42.00%\n\t Val. GAN Loss: 1.054 | Val. GAN Acc: 33.67%\nEpoch: 08 | Epoch Time: 0m 0s\n\tTrain Loss: 1.284 | Train Acc: 30.00%\n\t Val. Loss: 1.131 | Val. Acc: 8.00%\n\tTrain GAN Loss: 1.274 | Train GAN Acc: 36.00%\n\t Val. GAN Loss: 1.059 | Val. GAN Acc: 29.67%\nEpoch: 09 | Epoch Time: 0m 0s\n\tTrain Loss: 1.229 | Train Acc: 30.67%\n\t Val. Loss: 1.130 | Val. Acc: 8.00%\n\tTrain GAN Loss: 1.423 | Train GAN Acc: 31.33%\n\t Val. GAN Loss: 1.063 | Val. GAN Acc: 29.00%\nEpoch: 10 | Epoch Time: 0m 0s\n\tTrain Loss: 1.263 | Train Acc: 34.67%\n\t Val. Loss: 1.129 | Val. Acc: 8.00%\n\tTrain GAN Loss: 1.211 | Train GAN Acc: 32.00%\n\t Val. GAN Loss: 1.067 | Val. GAN Acc: 29.00%\nEpoch: 11 | Epoch Time: 0m 0s\n\tTrain Loss: 1.347 | Train Acc: 29.33%\n\t Val. Loss: 1.128 | Val. Acc: 8.00%\n\tTrain GAN Loss: 1.288 | Train GAN Acc: 32.67%\n\t Val. GAN Loss: 1.069 | Val. GAN Acc: 29.33%\nEpoch: 12 | Epoch Time: 0m 0s\n\tTrain Loss: 1.163 | Train Acc: 37.33%\n\t Val. Loss: 1.128 | Val. Acc: 8.00%\n\tTrain GAN Loss: 1.248 | Train GAN Acc: 42.00%\n\t Val. GAN Loss: 1.072 | Val. GAN Acc: 29.67%\nEpoch: 13 | Epoch Time: 0m 0s\n\tTrain Loss: 1.221 | Train Acc: 33.33%\n\t Val. Loss: 1.127 | Val. Acc: 8.00%\n\tTrain GAN Loss: 1.316 | Train GAN Acc: 32.67%\n\t Val. GAN Loss: 1.074 | Val. GAN Acc: 30.00%\nEpoch: 14 | Epoch Time: 0m 0s\n\tTrain Loss: 1.194 | Train Acc: 33.33%\n\t Val. Loss: 1.126 | Val. Acc: 8.33%\n\tTrain GAN Loss: 1.141 | Train GAN Acc: 37.33%\n\t Val. GAN Loss: 1.076 | Val. GAN Acc: 30.00%\nEpoch: 15 | Epoch Time: 0m 0s\n\tTrain Loss: 1.222 | Train Acc: 31.33%\n\t Val. Loss: 1.125 | Val. Acc: 8.33%\n\tTrain GAN Loss: 1.300 | Train GAN Acc: 33.33%\n\t Val. GAN Loss: 1.078 | Val. GAN Acc: 31.00%\nEpoch: 16 | Epoch Time: 0m 0s\n\tTrain Loss: 1.197 | Train Acc: 33.33%\n\t Val. Loss: 1.125 | Val. Acc: 8.33%\n\tTrain GAN Loss: 1.296 | Train GAN Acc: 32.00%\n\t Val. GAN Loss: 1.080 | Val. GAN Acc: 32.00%\nEpoch: 17 | Epoch Time: 0m 0s\n\tTrain Loss: 1.290 | Train Acc: 28.67%\n\t Val. Loss: 1.124 | Val. Acc: 8.67%\n\tTrain GAN Loss: 1.197 | Train GAN Acc: 34.67%\n\t Val. GAN Loss: 1.082 | Val. GAN Acc: 32.67%\nEpoch: 18 | Epoch Time: 0m 0s\n\tTrain Loss: 1.208 | Train Acc: 32.67%\n\t Val. Loss: 1.123 | Val. Acc: 8.67%\n\tTrain GAN Loss: 1.187 | Train GAN Acc: 39.33%\n\t Val. GAN Loss: 1.084 | Val. GAN Acc: 34.33%\nEpoch: 19 | Epoch Time: 0m 0s\n\tTrain Loss: 1.165 | Train Acc: 33.33%\n\t Val. Loss: 1.123 | Val. Acc: 9.00%\n\tTrain GAN Loss: 1.363 | Train GAN Acc: 31.33%\n\t Val. GAN Loss: 1.086 | Val. GAN Acc: 35.33%\nEpoch: 20 | Epoch Time: 0m 0s\n\tTrain Loss: 1.199 | Train Acc: 31.33%\n\t Val. Loss: 1.122 | Val. Acc: 9.00%\n\tTrain GAN Loss: 1.286 | Train GAN Acc: 35.33%\n\t Val. GAN Loss: 1.088 | Val. GAN Acc: 35.33%\nEpoch: 21 | Epoch Time: 0m 0s\n\tTrain Loss: 1.227 | Train Acc: 30.67%\n\t Val. Loss: 1.121 | Val. Acc: 11.00%\n\tTrain GAN Loss: 1.249 | Train GAN Acc: 35.33%\n\t Val. GAN Loss: 1.089 | Val. GAN Acc: 35.33%\nEpoch: 22 | Epoch Time: 0m 0s\n\tTrain Loss: 1.176 | Train Acc: 40.00%\n\t Val. Loss: 1.120 | Val. Acc: 11.00%\n\tTrain GAN Loss: 1.161 | Train GAN Acc: 38.67%\n\t Val. GAN Loss: 1.091 | Val. GAN Acc: 35.00%\nEpoch: 23 | Epoch Time: 0m 0s\n\tTrain Loss: 1.214 | Train Acc: 33.33%\n\t Val. Loss: 1.119 | Val. Acc: 11.33%\n\tTrain GAN Loss: 1.159 | Train GAN Acc: 38.00%\n\t Val. GAN Loss: 1.093 | Val. GAN Acc: 35.00%\nEpoch: 24 | Epoch Time: 0m 0s\n\tTrain Loss: 1.208 | Train Acc: 34.00%\n\t Val. Loss: 1.118 | Val. Acc: 11.33%\n\tTrain GAN Loss: 1.262 | Train GAN Acc: 28.00%\n\t Val. GAN Loss: 1.095 | Val. GAN Acc: 34.00%\nEpoch: 25 | Epoch Time: 0m 0s\n\tTrain Loss: 1.218 | Train Acc: 30.67%\n\t Val. Loss: 1.117 | Val. Acc: 12.33%\n\tTrain GAN Loss: 1.235 | Train GAN Acc: 34.00%\n\t Val. GAN Loss: 1.097 | Val. GAN Acc: 33.00%\nEpoch: 26 | Epoch Time: 0m 0s\n\tTrain Loss: 1.175 | Train Acc: 40.67%\n\t Val. Loss: 1.116 | Val. Acc: 13.67%\n\tTrain GAN Loss: 1.170 | Train GAN Acc: 34.67%\n\t Val. GAN Loss: 1.099 | Val. GAN Acc: 31.00%\nEpoch: 27 | Epoch Time: 0m 0s\n\tTrain Loss: 1.153 | Train Acc: 38.00%\n\t Val. Loss: 1.115 | Val. Acc: 14.67%\n\tTrain GAN Loss: 1.252 | Train GAN Acc: 30.00%\n\t Val. GAN Loss: 1.100 | Val. GAN Acc: 29.33%\nEpoch: 28 | Epoch Time: 0m 0s\n\tTrain Loss: 1.217 | Train Acc: 30.67%\n\t Val. Loss: 1.113 | Val. Acc: 15.67%\n\tTrain GAN Loss: 1.172 | Train GAN Acc: 42.00%\n\t Val. GAN Loss: 1.102 | Val. GAN Acc: 28.33%\nEpoch: 29 | Epoch Time: 0m 0s\n\tTrain Loss: 1.238 | Train Acc: 28.67%\n\t Val. Loss: 1.112 | Val. Acc: 16.67%\n\tTrain GAN Loss: 1.168 | Train GAN Acc: 32.67%\n\t Val. GAN Loss: 1.104 | Val. GAN Acc: 29.33%\nEpoch: 30 | Epoch Time: 0m 0s\n\tTrain Loss: 1.171 | Train Acc: 34.67%\n\t Val. Loss: 1.111 | Val. Acc: 17.33%\n\tTrain GAN Loss: 1.217 | Train GAN Acc: 38.00%\n\t Val. GAN Loss: 1.106 | Val. GAN Acc: 30.33%\nEpoch: 31 | Epoch Time: 0m 0s\n\tTrain Loss: 1.198 | Train Acc: 31.33%\n\t Val. Loss: 1.110 | Val. Acc: 19.00%\n\tTrain GAN Loss: 1.220 | Train GAN Acc: 32.67%\n\t Val. GAN Loss: 1.108 | Val. GAN Acc: 30.67%\nEpoch: 32 | Epoch Time: 0m 0s\n\tTrain Loss: 1.169 | Train Acc: 34.67%\n\t Val. Loss: 1.110 | Val. Acc: 21.00%\n\tTrain GAN Loss: 1.191 | Train GAN Acc: 33.33%\n\t Val. GAN Loss: 1.109 | Val. GAN Acc: 30.33%\nEpoch: 33 | Epoch Time: 0m 0s\n\tTrain Loss: 1.193 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 23.67%\n\tTrain GAN Loss: 1.177 | Train GAN Acc: 35.33%\n\t Val. GAN Loss: 1.111 | Val. GAN Acc: 30.67%\nEpoch: 34 | Epoch Time: 0m 0s\n\tTrain Loss: 1.200 | Train Acc: 29.33%\n\t Val. Loss: 1.108 | Val. Acc: 24.67%\n\tTrain GAN Loss: 1.217 | Train GAN Acc: 36.00%\n\t Val. GAN Loss: 1.112 | Val. GAN Acc: 30.00%\nEpoch: 35 | Epoch Time: 0m 0s\n\tTrain Loss: 1.126 | Train Acc: 36.00%\n\t Val. Loss: 1.108 | Val. Acc: 26.67%\n\tTrain GAN Loss: 1.191 | Train GAN Acc: 33.33%\n\t Val. GAN Loss: 1.114 | Val. GAN Acc: 29.67%\nEpoch: 36 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 35.33%\n\t Val. Loss: 1.107 | Val. Acc: 28.67%\n\tTrain GAN Loss: 1.091 | Train GAN Acc: 43.33%\n\t Val. GAN Loss: 1.115 | Val. GAN Acc: 29.00%\nEpoch: 37 | Epoch Time: 0m 0s\n\tTrain Loss: 1.158 | Train Acc: 36.67%\n\t Val. Loss: 1.107 | Val. Acc: 30.00%\n\tTrain GAN Loss: 1.152 | Train GAN Acc: 40.00%\n\t Val. GAN Loss: 1.117 | Val. GAN Acc: 29.67%\nEpoch: 38 | Epoch Time: 0m 0s\n\tTrain Loss: 1.162 | Train Acc: 36.00%\n\t Val. Loss: 1.107 | Val. Acc: 30.33%\n\tTrain GAN Loss: 1.149 | Train GAN Acc: 32.00%\n\t Val. GAN Loss: 1.118 | Val. GAN Acc: 30.33%\nEpoch: 39 | Epoch Time: 0m 0s\n\tTrain Loss: 1.092 | Train Acc: 42.67%\n\t Val. Loss: 1.106 | Val. Acc: 30.67%\n\tTrain GAN Loss: 1.218 | Train GAN Acc: 30.67%\n\t Val. GAN Loss: 1.119 | Val. GAN Acc: 29.67%\nEpoch: 40 | Epoch Time: 0m 0s\n\tTrain Loss: 1.194 | Train Acc: 32.67%\n\t Val. Loss: 1.106 | Val. Acc: 31.67%\n\tTrain GAN Loss: 1.084 | Train GAN Acc: 40.67%\n\t Val. GAN Loss: 1.121 | Val. GAN Acc: 30.00%\nEpoch: 41 | Epoch Time: 0m 0s\n\tTrain Loss: 1.135 | Train Acc: 40.00%\n\t Val. Loss: 1.106 | Val. Acc: 32.33%\n\tTrain GAN Loss: 1.117 | Train GAN Acc: 36.00%\n\t Val. GAN Loss: 1.122 | Val. GAN Acc: 29.33%\nEpoch: 42 | Epoch Time: 0m 0s\n\tTrain Loss: 1.138 | Train Acc: 35.33%\n\t Val. Loss: 1.106 | Val. Acc: 32.33%\n\tTrain GAN Loss: 1.111 | Train GAN Acc: 40.67%\n\t Val. GAN Loss: 1.123 | Val. GAN Acc: 29.00%\nEpoch: 43 | Epoch Time: 0m 0s\n\tTrain Loss: 1.106 | Train Acc: 41.33%\n\t Val. Loss: 1.106 | Val. Acc: 32.33%\n\tTrain GAN Loss: 1.126 | Train GAN Acc: 35.33%\n\t Val. GAN Loss: 1.124 | Val. GAN Acc: 28.67%\nEpoch: 44 | Epoch Time: 0m 0s\n\tTrain Loss: 1.236 | Train Acc: 28.00%\n\t Val. Loss: 1.106 | Val. Acc: 32.33%\n\tTrain GAN Loss: 1.115 | Train GAN Acc: 39.33%\n\t Val. GAN Loss: 1.125 | Val. GAN Acc: 29.33%\nEpoch: 45 | Epoch Time: 0m 0s\n\tTrain Loss: 1.185 | Train Acc: 30.67%\n\t Val. Loss: 1.106 | Val. Acc: 32.33%\n\tTrain GAN Loss: 1.164 | Train GAN Acc: 37.33%\n\t Val. GAN Loss: 1.126 | Val. GAN Acc: 29.00%\n"
],
[
"#test\nmodel_nn.load_state_dict(torch.load('nn-agg.pt'))\n\ntest_loss, test_acc, test_prec, test_recall, test_f1 = evaluate(model_nn, test_dataloader, criterion)\nprint(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}% | Test Prec: {test_prec*100:.2f}% | Test Recall: {test_recall*100:.2f}% | Test F1: {test_f1*100:.2f}%')\n\nmodel_nn_GAN.load_state_dict(torch.load('nn-agg-GAN.pt'))\n\ntest_GAN_loss, test_GAN_acc, test_GAN_prec, test_GAN_recall, test_GAN_f1 = evaluate(model_nn_GAN, test_dataloader, criterion_GAN)\nprint(f'Test GAN Loss: {test_GAN_loss:.3f} | Test GAN Acc: {test_GAN_acc*100:.2f}% | Test GAN Prec: {test_GAN_prec*100:.2f}% | Test GAN Recall: {test_GAN_recall*100:.2f}% | Test GAN F1: {test_GAN_f1*100:.2f}%')\n",
"Test Loss: 1.107 | Test Acc: 36.67% | Test Prec: 33.99% | Test Recall: 36.67% | Test F1: 23.19%\nTest GAN Loss: 1.141 | Test GAN Acc: 9.44% | Test GAN Prec: 38.91% | Test GAN Recall: 9.44% | Test GAN F1: 7.79%\n"
]
],
[
[
"# Bootstrapping",
"_____no_output_____"
]
],
[
[
"#bootstrapping\nboot = 100\nresults = []\ntest_GAN_loss_list, test_GAN_acc_list, test_GAN_prec_list, test_GAN_recall_list, test_GAN_f1_list = [],[],[],[],[]\n\nfor i in range(boot):\n np.random.seed(i)\n boot_idx = np.random.randint(0,150, size=(150,))\n #print(boot_idx)\n Xs_boot = Xs[boot_idx,:]\n Xs_boot = encoderA(torch.from_numpy(Xs_boot).float()*torch.from_numpy(np.random.binomial(size=x_train.size(), n=1, p=1-noise)))\n Ys_boot = y_source[boot_idx]\n \n train_GAN_set_boot = HerbalData(Xs_boot, torch.from_numpy(Ys_boot))\n \n \n #train_GAN_set_boot = train_GAN_set[boot_idx]\n train_GAN_dataloader_boot = DataLoader(train_GAN_set_boot, batch_size=128, shuffle=True)\n test_GAN_loss, test_GAN_acc, test_GAN_prec, test_GAN_recall, test_GAN_f1 = evaluate(model_nn_GAN, train_GAN_dataloader_boot, criterion_GAN)\n #print(test_GAN_acc)\n \n test_GAN_loss_list.append(test_GAN_loss)\n test_GAN_acc_list.append(test_GAN_acc) \n test_GAN_prec_list.append(test_GAN_prec) \n test_GAN_recall_list.append(test_GAN_recall) \n test_GAN_f1_list.append(test_GAN_f1)\n",
"_____no_output_____"
],
[
"#bootstrapping painting\n#print(test_GAN_acc_list)\nimport matplotlib.pyplot as plt\nplt.figure(figsize=(9,6))\nplt.plot(test_GAN_acc_list,'r')\n#plt.plot(glossrec_recorder,'c:',linewidth=3)\nplt.xticks(fontsize=18)\nplt.yticks(fontsize=18)\nplt.grid()\nplt.legend(['Total Loss','Generator Faking Loss','Discriminator Loss','Generator Reconstruction Loss'],fontsize=18)\nplt.title('Total Loss and Reconstruction Loss')\nplt.savefig(specs+'loss.pdf',bbox_inches='tight')\nplt.show()",
"_____no_output_____"
],
[
"from numpy import mean\nprint(\"bootstrapping test acc with GAN:\", mean(test_GAN_acc_list))",
"bootstrapping test acc with GAN: 0.33050426267087457\n"
],
[
"from sklearn.metrics import confusion_matrix as cfm\ndef predict(model, x):\n x = torch.from_numpy(x)\n with torch.no_grad():\n return torch.argmax(torch.softmax(model(x), 1), dim=1).detach().numpy()",
"_____no_output_____"
],
[
"def confusion_matrix_plot(y_pred, y_true):\n cm = cfm(y_pred, y_true, normalize='true')\n #print(cm)\n normalize = True\n cmap = 'RdPu'\n classes = [0, 1, 2]\n title = 'cofusion matrix'\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax = ax)\n ax.set(xticks = np.arange(cm.shape[1]), yticks = np.arange(cm.shape[0]), xticklabels = classes, yticklabels = classes, ylabel = 'True label', xlabel = 'Predicted label', title = title)\n plt.setp(ax.get_xticklabels(), rotation=45, ha = 'right', rotation_mode = 'anchor')\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt), ha = 'center', va = 'center', color = 'white' if cm[i,j] > thresh else 'black')\n fig.tight_layout()\n\ndef metric_epoch(train_loss, valid_loss, train_f1, valid_f1):\n x = range(0, len(train_loss))\n plt.figure(figsize=(14,3))\n grid = plt.GridSpec(3, 2, wspace=0.5, hspace=0.5)\n plt.subplot(grid[:,0])\n plt.plot(x, train_f1, color=\"r\", marker='o',markersize='1.5',markeredgecolor='r',markeredgewidth = 1.5, label = 'Train F1 score')\n plt.plot(x, valid_f1, color=\"b\", marker='o',markersize='1.5',markeredgecolor='b',markeredgewidth = 1.5, label = 'Valid F1 score')\n plt.legend()\n plt.title('F1 score vs epoches')\n plt.xlabel('epoches')\n plt.ylabel('F1 score')\n plt.subplot(grid[:,1])\n plt.plot(x, train_loss, color=\"red\", marker='o',markersize='1.5',markeredgecolor='r',markeredgewidth = 1.5, label = 'Train Loss')\n plt.plot(x, valid_loss, color=\"blue\", marker='o',markersize='1.5',markeredgecolor='b',markeredgewidth = 1.5, label = 'Valid Loss')\n plt.legend()\n plt.title('Loss vs epoches')\n plt.xlabel('epoches')\n plt.ylabel('Loss')\n plt.show()\n",
"_____no_output_____"
],
[
"y_test_prediction = predict(model_nn, x_test.numpy())\n#print(y_target_train)\nconfusion_matrix_plot(y_test_prediction, y_test)\nprint(y_test_prediction)",
"[1 1 1 0 1 1 1 1 1 1 1 1 1 1 0 1 2 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1\n 1 1 1 0 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1]\n"
],
[
"y_test_prediction_GAN = predict(model_nn_GAN, x_test.numpy())\n#print(y_target_train)\nconfusion_matrix_plot(y_test_prediction_GAN, y_test)\nprint(y_test_prediction_GAN)",
"[1 1 1 2 1 1 1 1 1 1 1 1 1 1 2 1 0 2 2 1 2 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 1 2 2 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2\n 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 2 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2\n 2 1 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 1 2 2 2 1 1\n 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1]\n"
],
[
"metric_epoch(train_loss, valid_loss, train_f1, valid_f1)",
"_____no_output_____"
],
[
"metric_epoch(train_GAN_loss, valid_GAN_loss, train_GAN_f1, valid_GAN_f1)",
"_____no_output_____"
]
],
[
[
"# DRCA",
"_____no_output_____"
]
],
[
[
"class DRCA():\n '''\n The DRCA Class\n '''\n def __init__(self, n_components = 2,alpha = None, mode = 'raw'):\n '''\n The function to initialize the DRCA class\n :param n_components: The intended dimensionality of projection hyperplane smaller than the initial dimensionality\n :param alpha: weighting factor for target domain data within class scatter\n :param mode: the mode of DRCA:\n 'raw': consider source domain data (S) and target domain data (T) as two groups\n 'number': consider type-specific source domain data and target domain data based on the average number of cases in S and T\n 'mean': equal weights for each class\n '''\n self.mode = mode\n self.Sw_s = None\n self.Sw_t = None\n self.mu_s = None\n self.mu_t = None\n self.alpha = alpha\n self.D_tilde = n_components\n pass\n\n def fit(self, Xs, Xt, Ys=None, Yt = None):\n '''\n This function fit the DRCA model with the data and labels given by users\n :param Xs: the feature matrix of shape (Ns, D) in source domain, np.array\n :param Xt: the feature matrix of shape (Nt, D) in target domain, np.array\n :param Ys: the label of the data of shape (Ns,) in source domain, np.array, int\n :param Yt: the label of the data of shape (Nt,) in target domain, np.array, int\n '''\n ### --- Summarize statistics --- ###\n if self.mode != 'raw':\n Ys = Ys.reshape(-1,) #we need to use Y and make sure the Y is the intended form\n Yt = Yt.reshape(-1,)\n Ns = Xs.shape[0]\n Nt = Xt.shape[0]\n D = Xs.shape[1]\n \n ### --- Within-domain scatter --- ###\n self.mu_s = np.mean(Xs,axis=0,keepdims=True) #1*D\n self.mu_t = np.mean(Xt,axis=0,keepdims=True)\n self.Sw_s = (Xs - self.mu_s).T @ (Xs - self.mu_s) #D*D\n self.Sw_t = (Xt - self.mu_t).T @ (Xt - self.mu_t) #D*D\n if self.alpha == None:\n self.alpha = Ns/Nt\n self.nominator = self.Sw_s + self.Sw_t * self.alpha\n \n ### --- Eliminate sensor drifts --- ###\n if self.mode == 'raw': #S and T as two entities\n self.denominator = (self.mu_s - self.mu_t).T @ (self.mu_s-self.mu_t) #D*D\n elif self.mode == 'number': #Focus on the same classes appeared in target domain\n Kt = np.unique(Yt).shape[0] #Assume that the target domain classes are fewer \n self.denominator = np.empty((D,D))\n for i in range(Kt):\n Ns = np.mean(Ys==Kt[i])\n Nt = np.mean(Yt==Kt[i])\n N = 0.5*(self.Ns+self.Nt)#self. ???????????????????\n mu_s_matrix = np.mean(Xs[Ys==Kt[i],:],axis=0,keepdims=True)\n mu_t_matrix = np.mean(Xt[Yt==Kt[i],:],axis=0,keepdims=True)\n Sb_matrix = (self.mu_s_matrix-self.mu_t_matrix).T @ (self.mu_s_matrix-self.mu_t_matrix)\n self.denomiator += N * Sb_matrix\n elif self.mode == 'mean': #Equal weights for every class\n Kt = np.unique(Yt).shape[0] #Assume that the target domain classes are fewer \n self.denominator = np.empty((D,D))\n for i in range(Kt):\n mu_s_matrix = np.mean(Xs[Ys==Kt[i],:],axis=0,keepdims=True)#1*D\n mu_t_matrix = np.mean(Xt[Yt==Kt[i],:],axis=0,keepdims=True)#1*D\n Sb_matrix = (self.mu_s_matrix-self.mu_t_matrix).T @ (self.mu_s_matrix-self.mu_t_matrix)\n self.denomiator += Sb_matrix#D*D\n \n \n eigenValues, eigenVectors = np.linalg.eig(np.linalg.pinv(self.denominator) @ self.nominator) #D*D\n\n idx = np.abs(eigenValues).argsort()[::-1] \n self.eigenValues = eigenValues[idx]\n self.eigenVectors = eigenVectors[:,idx]\n self.W = self.eigenVectors[:,0:self.D_tilde]#shape=(D,D_tilde)\n pass \n \n def transform(self, X):\n '''\n This function use the fitted SRLDA model\n :param X: the data in np.array of shape (N,D) that needs to be projected to the lower dimension\n :return: X_tilde: the projected data in the lower dimensional space in np.array of shape (N, D_tilde)\n '''\n return np.matmul(X,self.W) #goal: (N,D_tilde) (D_tilde*D)@(D*N).T (N*D)(D*D_tilde)\n pass\n \n def fit_transform(self, Xs, Xt, Ys=None, Yt = None):\n '''\n :param Xs: the feature matrix of shape (Ns, D) in source domain, np.array\n :param Xt: the feature matrix of shape (Nt, D) in target domain, np.array\n :param Ys: the label of the data of shape (Ns,) in source domain, np.array, int\n :param Yt: the label of the data of shape (Nt,) in target domain, np.array, int '''\n \n self.fit(Xs, Xt, Ys, Yt)\n return np.real(self.transform(Xs)),np.real(self.transform(Xt)) #N * D_tilde\n pass",
"_____no_output_____"
],
[
"# With DRCA\ndrca=DRCA(n_components=50, alpha=0.01)#n_components and alpha value are hyperparameters\nXs_drca, Xt_drca = drca.fit_transform(Xs,Xt)\n\n#Target domain train test split\ntrain_idx = np.concatenate((np.arange(0,100),np.arange(160,260),np.arange(320,420))).reshape(-1,)\ntest_idx = np.concatenate((np.arange(100,160),np.arange(260,320),np.arange(420,480))).reshape(-1,)\n\n# Labels\ny_target=np.hstack((0*np.ones(160),1*np.ones(160),2*np.ones(160))).astype(np.int64)\ny_source=np.hstack((0*np.ones(50),1*np.ones(50),2*np.ones(50))).astype(np.int64)\n\n#X and Y, need to change to our data\nXA_drca_train = Xs_drca\nXB_drca_train = Xt_drca[train_idx,:]\nassert XB_drca_train.shape[0] == 300\nXA_drca_test = Xs_drca\nXB_drca_test = Xt_drca[test_idx,:]\nassert XB_drca_test.shape[0] == 180\ny_valid = y_target[train_idx]\nassert y_valid.shape[0] == 300\ny_test = y_target[test_idx]\nassert y_test.shape[0] == 180\n\nscr_drca_train = Variable(torch.from_numpy(XA_drca_train).float())\ntgt_drca_train = Variable(torch.from_numpy(XB_drca_train).float())\ntgt_drca_test = Variable(torch.from_numpy(XB_drca_test).float())\n\n\nx_drca_train = torch.tensor(scr_drca_train)\nx_drca_valid = torch.tensor(tgt_drca_train) \nx_drca_test = torch.tensor(tgt_drca_test)\n\ntrain_drca_set = HerbalData(x_drca_train, torch.from_numpy(y_source))\nvalid_drca_set = HerbalData(x_drca_valid, torch.from_numpy(y_valid))\ntest_drca_set = HerbalData(x_drca_test, torch.from_numpy(y_test))\n\ntrain_drca_dataloader = DataLoader(train_drca_set, batch_size=150, shuffle=True)\nvalid_drca_dataloader = DataLoader(valid_drca_set, batch_size=len(valid_drca_set), shuffle=True)\ntest_drca_dataloader = DataLoader(test_drca_set, batch_size=len(test_drca_set), shuffle=True)\n\n\nprint(\"Train Set:\", x_drca_train.shape)\nprint(\"Valid Set:\", x_drca_valid.shape)\nprint(\"Target Train Set:\", x_drca_test.shape)\n\n\n\n",
"Train Set: torch.Size([150, 50])\nValid Set: torch.Size([300, 50])\nTarget Train Set: torch.Size([180, 50])\n"
],
[
"print(Xs_drca.shape)\nINPUT_DIM = Xs_drca.shape[1]\nOUTPUT_DIM = 3\nDROPOUT = 0.3\nLAYERS = [INPUT_DIM, 64, 32, 16, OUTPUT_DIM]\n\nmodel_drca_nn = NN(LAYERS, DROPOUT)\nmodel_drca_nn.float()\nopt_drca_nn = optim.Adam(model_drca_nn.parameters(),lr=1e-4)\n\ncriterion_drca = nn.CrossEntropyLoss()",
"(150, 50)\n"
],
[
"N_EPOCHS = 150\n\ntrain_drca_loss = np.zeros(N_EPOCHS)\ntrain_drca_acc = np.zeros(N_EPOCHS)\ntrain_drca_prec = np.zeros(N_EPOCHS)\ntrain_drca_recall = np.zeros(N_EPOCHS)\ntrain_drca_f1 = np.zeros(N_EPOCHS)\n\nvalid_drca_loss = np.zeros(N_EPOCHS)\nvalid_drca_acc = np.zeros(N_EPOCHS)\nvalid_drca_prec = np.zeros(N_EPOCHS)\nvalid_drca_recall = np.zeros(N_EPOCHS)\nvalid_drca_f1 = np.zeros(N_EPOCHS)\n\n\n\n\nbest_valid_drca_loss = float('inf')\n\nfor i in range(N_EPOCHS):\n\n start_time = time.time()\n \n train_drca_loss[i], train_drca_acc[i], train_drca_prec[i], train_drca_recall[i], train_drca_f1[i] = train(model_drca_nn, train_drca_dataloader, opt_drca_nn, criterion_drca)\n valid_drca_loss[i], valid_drca_acc[i], valid_drca_prec[i], valid_drca_recall[i], valid_drca_f1[i] = evaluate(model_drca_nn, valid_drca_dataloader, criterion_drca)\n \n \n end_time = time.time()\n\n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n \n if valid_drca_loss[i] < best_valid_drca_loss:\n best_valid_ldrca_oss = valid_drca_loss[i]\n torch.save(model_nn.state_dict(), 'nn-agg-drca.pt')\n \n print(f'Epoch: {i+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')\n print(f'\\tTrain Loss: {train_drca_loss[i]:.3f} | Train Acc: {train_drca_acc[i]*100:.2f}%')\n print(f'\\t Val. Loss: {valid_drca_loss[i]:.3f} | Val. Acc: {valid_drca_acc[i]*100:.2f}%')\n",
"Epoch: 01 | Epoch Time: 0m 0s\n\tTrain Loss: 1.097 | Train Acc: 32.67%\n\t Val. Loss: 1.099 | Val. Acc: 31.33%\nEpoch: 02 | Epoch Time: 0m 0s\n\tTrain Loss: 1.100 | Train Acc: 29.33%\n\t Val. Loss: 1.099 | Val. Acc: 31.00%\nEpoch: 03 | Epoch Time: 0m 0s\n\tTrain Loss: 1.102 | Train Acc: 22.67%\n\t Val. Loss: 1.099 | Val. Acc: 28.00%\nEpoch: 04 | Epoch Time: 0m 0s\n\tTrain Loss: 1.101 | Train Acc: 25.33%\n\t Val. Loss: 1.099 | Val. Acc: 28.00%\nEpoch: 05 | Epoch Time: 0m 0s\n\tTrain Loss: 1.101 | Train Acc: 28.00%\n\t Val. Loss: 1.098 | Val. Acc: 28.00%\nEpoch: 06 | Epoch Time: 0m 0s\n\tTrain Loss: 1.099 | Train Acc: 32.00%\n\t Val. Loss: 1.098 | Val. Acc: 28.67%\nEpoch: 07 | Epoch Time: 0m 0s\n\tTrain Loss: 1.100 | Train Acc: 36.67%\n\t Val. Loss: 1.098 | Val. Acc: 29.00%\nEpoch: 08 | Epoch Time: 0m 0s\n\tTrain Loss: 1.103 | Train Acc: 26.00%\n\t Val. Loss: 1.098 | Val. Acc: 29.33%\nEpoch: 09 | Epoch Time: 0m 0s\n\tTrain Loss: 1.100 | Train Acc: 30.00%\n\t Val. Loss: 1.098 | Val. Acc: 28.67%\nEpoch: 10 | Epoch Time: 0m 0s\n\tTrain Loss: 1.099 | Train Acc: 32.67%\n\t Val. Loss: 1.098 | Val. Acc: 28.67%\nEpoch: 11 | Epoch Time: 0m 0s\n\tTrain Loss: 1.100 | Train Acc: 25.33%\n\t Val. Loss: 1.098 | Val. Acc: 29.00%\nEpoch: 12 | Epoch Time: 0m 0s\n\tTrain Loss: 1.100 | Train Acc: 32.67%\n\t Val. Loss: 1.098 | Val. Acc: 29.67%\nEpoch: 13 | Epoch Time: 0m 0s\n\tTrain Loss: 1.098 | Train Acc: 34.67%\n\t Val. Loss: 1.098 | Val. Acc: 30.67%\nEpoch: 14 | Epoch Time: 0m 0s\n\tTrain Loss: 1.098 | Train Acc: 35.33%\n\t Val. Loss: 1.098 | Val. Acc: 31.00%\nEpoch: 15 | Epoch Time: 0m 0s\n\tTrain Loss: 1.101 | Train Acc: 28.67%\n\t Val. Loss: 1.098 | Val. Acc: 30.67%\nEpoch: 16 | Epoch Time: 0m 0s\n\tTrain Loss: 1.102 | Train Acc: 24.67%\n\t Val. Loss: 1.098 | Val. Acc: 30.33%\nEpoch: 17 | Epoch Time: 0m 0s\n\tTrain Loss: 1.099 | Train Acc: 28.00%\n\t Val. Loss: 1.098 | Val. Acc: 30.33%\nEpoch: 18 | Epoch Time: 0m 0s\n\tTrain Loss: 1.100 | Train Acc: 30.00%\n\t Val. Loss: 1.098 | Val. Acc: 30.67%\nEpoch: 19 | Epoch Time: 0m 0s\n\tTrain Loss: 1.098 | Train Acc: 31.33%\n\t Val. Loss: 1.098 | Val. Acc: 30.33%\nEpoch: 20 | Epoch Time: 0m 0s\n\tTrain Loss: 1.102 | Train Acc: 26.00%\n\t Val. Loss: 1.098 | Val. Acc: 30.67%\nEpoch: 21 | Epoch Time: 0m 0s\n\tTrain Loss: 1.099 | Train Acc: 26.67%\n\t Val. Loss: 1.098 | Val. Acc: 31.00%\nEpoch: 22 | Epoch Time: 0m 0s\n\tTrain Loss: 1.099 | Train Acc: 32.00%\n\t Val. Loss: 1.098 | Val. Acc: 31.00%\nEpoch: 23 | Epoch Time: 0m 0s\n\tTrain Loss: 1.102 | Train Acc: 28.67%\n\t Val. Loss: 1.098 | Val. Acc: 31.33%\nEpoch: 24 | Epoch Time: 0m 0s\n\tTrain Loss: 1.100 | Train Acc: 27.33%\n\t Val. Loss: 1.098 | Val. Acc: 31.67%\nEpoch: 25 | Epoch Time: 0m 0s\n\tTrain Loss: 1.099 | Train Acc: 30.67%\n\t Val. Loss: 1.098 | Val. Acc: 31.67%\nEpoch: 26 | Epoch Time: 0m 0s\n\tTrain Loss: 1.099 | Train Acc: 28.00%\n\t Val. Loss: 1.097 | Val. Acc: 31.67%\nEpoch: 27 | Epoch Time: 0m 0s\n\tTrain Loss: 1.098 | Train Acc: 30.67%\n\t Val. Loss: 1.097 | Val. Acc: 31.67%\nEpoch: 28 | Epoch Time: 0m 0s\n\tTrain Loss: 1.099 | Train Acc: 31.33%\n\t Val. Loss: 1.097 | Val. Acc: 31.33%\nEpoch: 29 | Epoch Time: 0m 0s\n\tTrain Loss: 1.097 | Train Acc: 32.00%\n\t Val. Loss: 1.097 | Val. Acc: 31.33%\nEpoch: 30 | Epoch Time: 0m 0s\n\tTrain Loss: 1.098 | Train Acc: 32.67%\n\t Val. Loss: 1.097 | Val. Acc: 31.67%\nEpoch: 31 | Epoch Time: 0m 0s\n\tTrain Loss: 1.100 | Train Acc: 29.33%\n\t Val. Loss: 1.097 | Val. Acc: 31.67%\nEpoch: 32 | Epoch Time: 0m 0s\n\tTrain Loss: 1.099 | Train Acc: 28.67%\n\t Val. Loss: 1.097 | Val. Acc: 31.67%\nEpoch: 33 | Epoch Time: 0m 0s\n\tTrain Loss: 1.099 | Train Acc: 37.33%\n\t Val. Loss: 1.097 | Val. Acc: 31.67%\nEpoch: 34 | Epoch Time: 0m 0s\n\tTrain Loss: 1.099 | Train Acc: 31.33%\n\t Val. Loss: 1.097 | Val. Acc: 31.67%\nEpoch: 35 | Epoch Time: 0m 0s\n\tTrain Loss: 1.098 | Train Acc: 30.67%\n\t Val. Loss: 1.097 | Val. Acc: 31.67%\nEpoch: 36 | Epoch Time: 0m 0s\n\tTrain Loss: 1.097 | Train Acc: 36.00%\n\t Val. Loss: 1.097 | Val. Acc: 32.00%\nEpoch: 37 | Epoch Time: 0m 0s\n\tTrain Loss: 1.098 | Train Acc: 32.67%\n\t Val. Loss: 1.097 | Val. Acc: 32.33%\nEpoch: 38 | Epoch Time: 0m 0s\n\tTrain Loss: 1.099 | Train Acc: 30.00%\n\t Val. Loss: 1.097 | Val. Acc: 31.67%\nEpoch: 39 | Epoch Time: 0m 0s\n\tTrain Loss: 1.099 | Train Acc: 30.00%\n\t Val. Loss: 1.097 | Val. Acc: 31.67%\nEpoch: 40 | Epoch Time: 0m 0s\n\tTrain Loss: 1.099 | Train Acc: 31.33%\n\t Val. Loss: 1.097 | Val. Acc: 31.33%\nEpoch: 41 | Epoch Time: 0m 0s\n\tTrain Loss: 1.097 | Train Acc: 31.33%\n\t Val. Loss: 1.097 | Val. Acc: 31.33%\nEpoch: 42 | Epoch Time: 0m 0s\n\tTrain Loss: 1.096 | Train Acc: 29.33%\n\t Val. Loss: 1.096 | Val. Acc: 32.33%\nEpoch: 43 | Epoch Time: 0m 0s\n\tTrain Loss: 1.097 | Train Acc: 32.67%\n\t Val. Loss: 1.096 | Val. Acc: 32.67%\nEpoch: 44 | Epoch Time: 0m 0s\n\tTrain Loss: 1.099 | Train Acc: 32.67%\n\t Val. Loss: 1.096 | Val. Acc: 33.00%\nEpoch: 45 | Epoch Time: 0m 0s\n\tTrain Loss: 1.097 | Train Acc: 38.00%\n\t Val. Loss: 1.096 | Val. Acc: 33.00%\nEpoch: 46 | Epoch Time: 0m 0s\n\tTrain Loss: 1.098 | Train Acc: 32.67%\n\t Val. Loss: 1.096 | Val. Acc: 33.00%\nEpoch: 47 | Epoch Time: 0m 0s\n\tTrain Loss: 1.096 | Train Acc: 32.00%\n\t Val. Loss: 1.096 | Val. Acc: 33.67%\nEpoch: 48 | Epoch Time: 0m 0s\n\tTrain Loss: 1.097 | Train Acc: 34.67%\n\t Val. Loss: 1.096 | Val. Acc: 34.00%\nEpoch: 49 | Epoch Time: 0m 0s\n\tTrain Loss: 1.098 | Train Acc: 30.67%\n\t Val. Loss: 1.096 | Val. Acc: 33.67%\nEpoch: 50 | Epoch Time: 0m 0s\n\tTrain Loss: 1.099 | Train Acc: 34.67%\n\t Val. Loss: 1.096 | Val. Acc: 33.67%\nEpoch: 51 | Epoch Time: 0m 0s\n\tTrain Loss: 1.098 | Train Acc: 32.00%\n\t Val. Loss: 1.096 | Val. Acc: 34.00%\nEpoch: 52 | Epoch Time: 0m 0s\n\tTrain Loss: 1.097 | Train Acc: 30.00%\n\t Val. Loss: 1.096 | Val. Acc: 34.00%\nEpoch: 53 | Epoch Time: 0m 0s\n\tTrain Loss: 1.098 | Train Acc: 32.67%\n\t Val. Loss: 1.096 | Val. Acc: 34.00%\nEpoch: 54 | Epoch Time: 0m 0s\n\tTrain Loss: 1.097 | Train Acc: 31.33%\n\t Val. Loss: 1.096 | Val. Acc: 34.00%\nEpoch: 55 | Epoch Time: 0m 0s\n\tTrain Loss: 1.096 | Train Acc: 36.00%\n\t Val. Loss: 1.096 | Val. Acc: 34.00%\nEpoch: 56 | Epoch Time: 0m 0s\n\tTrain Loss: 1.098 | Train Acc: 36.67%\n\t Val. Loss: 1.096 | Val. Acc: 34.00%\nEpoch: 57 | Epoch Time: 0m 0s\n\tTrain Loss: 1.096 | Train Acc: 35.33%\n\t Val. Loss: 1.096 | Val. Acc: 34.00%\nEpoch: 58 | Epoch Time: 0m 0s\n\tTrain Loss: 1.097 | Train Acc: 34.00%\n\t Val. Loss: 1.096 | Val. Acc: 34.00%\nEpoch: 59 | Epoch Time: 0m 0s\n\tTrain Loss: 1.099 | Train Acc: 26.00%\n\t Val. Loss: 1.095 | Val. Acc: 34.33%\nEpoch: 60 | Epoch Time: 0m 0s\n\tTrain Loss: 1.098 | Train Acc: 29.33%\n\t Val. Loss: 1.095 | Val. Acc: 34.33%\nEpoch: 61 | Epoch Time: 0m 0s\n\tTrain Loss: 1.099 | Train Acc: 28.67%\n\t Val. Loss: 1.095 | Val. Acc: 34.33%\nEpoch: 62 | Epoch Time: 0m 0s\n\tTrain Loss: 1.098 | Train Acc: 30.67%\n\t Val. Loss: 1.095 | Val. Acc: 34.33%\nEpoch: 63 | Epoch Time: 0m 0s\n\tTrain Loss: 1.096 | Train Acc: 37.33%\n\t Val. Loss: 1.095 | Val. Acc: 34.33%\nEpoch: 64 | Epoch Time: 0m 0s\n\tTrain Loss: 1.097 | Train Acc: 34.00%\n\t Val. Loss: 1.095 | Val. Acc: 34.33%\nEpoch: 65 | Epoch Time: 0m 0s\n\tTrain Loss: 1.099 | Train Acc: 33.33%\n\t Val. Loss: 1.095 | Val. Acc: 34.33%\nEpoch: 66 | Epoch Time: 0m 0s\n\tTrain Loss: 1.097 | Train Acc: 36.00%\n\t Val. Loss: 1.095 | Val. Acc: 34.33%\nEpoch: 67 | Epoch Time: 0m 0s\n\tTrain Loss: 1.094 | Train Acc: 38.00%\n\t Val. Loss: 1.095 | Val. Acc: 34.33%\nEpoch: 68 | Epoch Time: 0m 0s\n\tTrain Loss: 1.097 | Train Acc: 35.33%\n\t Val. Loss: 1.095 | Val. Acc: 34.67%\nEpoch: 69 | Epoch Time: 0m 0s\n\tTrain Loss: 1.098 | Train Acc: 32.00%\n\t Val. Loss: 1.095 | Val. Acc: 34.00%\nEpoch: 70 | Epoch Time: 0m 0s\n\tTrain Loss: 1.097 | Train Acc: 36.00%\n\t Val. Loss: 1.095 | Val. Acc: 33.67%\nEpoch: 71 | Epoch Time: 0m 0s\n\tTrain Loss: 1.098 | Train Acc: 35.33%\n\t Val. Loss: 1.095 | Val. Acc: 33.67%\nEpoch: 72 | Epoch Time: 0m 0s\n\tTrain Loss: 1.098 | Train Acc: 40.00%\n\t Val. Loss: 1.095 | Val. Acc: 33.67%\nEpoch: 73 | Epoch Time: 0m 0s\n\tTrain Loss: 1.096 | Train Acc: 39.33%\n\t Val. Loss: 1.095 | Val. Acc: 33.33%\nEpoch: 74 | Epoch Time: 0m 0s\n\tTrain Loss: 1.095 | Train Acc: 32.67%\n\t Val. Loss: 1.095 | Val. Acc: 33.33%\nEpoch: 75 | Epoch Time: 0m 0s\n\tTrain Loss: 1.097 | Train Acc: 39.33%\n\t Val. Loss: 1.094 | Val. Acc: 33.33%\nEpoch: 76 | Epoch Time: 0m 0s\n\tTrain Loss: 1.096 | Train Acc: 32.00%\n\t Val. Loss: 1.094 | Val. Acc: 33.33%\nEpoch: 77 | Epoch Time: 0m 0s\n\tTrain Loss: 1.094 | Train Acc: 32.00%\n\t Val. Loss: 1.094 | Val. Acc: 33.33%\n"
],
[
"#test\n#model_drca_nn.load_state_dict(torch.load('nn-agg-drca.pt'))\n\ntest_drca_loss, test_drca_acc, test_drca_prec, test_drca_recall, test_drca_f1 = evaluate(model_drca_nn, test_drca_dataloader, criterion_drca)\nprint(f'Test Loss: {test_drca_loss:.3f} | Test Acc: {test_drca_acc*100:.2f}% | Test Prec: {test_drca_prec*100:.2f}% | Test Recall: {test_drca_recall*100:.2f}% | Test F1: {test_drca_f1*100:.2f}%')\n",
"Test Loss: 1.093 | Test Acc: 30.00% | Test Prec: 50.50% | Test Recall: 30.00% | Test F1: 25.28%\n"
]
],
[
[
"# GAN_DRCA",
"_____no_output_____"
]
],
[
[
"# With DRCA\ndrca=DRCA(n_components=50, alpha=0.01)#n_components and alpha value are hyperparameters\nXs_GAN = encoderA(torch.tensor(scr_train)*torch.from_numpy(np.random.binomial(size=scr_train.size(), n=1, p=1-noise)))\n\nXs_drca, Xt_drca = drca.fit_transform(Xs_GAN.detach().numpy(),Xt)\n\n#Target domain train test split\ntrain_idx = np.concatenate((np.arange(0,100),np.arange(160,260),np.arange(320,420))).reshape(-1,)\ntest_idx = np.concatenate((np.arange(100,160),np.arange(260,320),np.arange(420,480))).reshape(-1,)\n\n# Labels\ny_target=np.hstack((0*np.ones(160),1*np.ones(160),2*np.ones(160))).astype(np.int64)\ny_source=np.hstack((0*np.ones(50),1*np.ones(50),2*np.ones(50))).astype(np.int64)\n\n#X and Y, need to change to our data\nXA_drca_train = Xs_drca\nXB_drca_train = Xt_drca[train_idx,:]\nassert XB_drca_train.shape[0] == 300\nXA_drca_test = Xs_drca\nXB_drca_test = Xt_drca[test_idx,:]\nassert XB_drca_test.shape[0] == 180\ny_valid = y_target[train_idx]\nassert y_valid.shape[0] == 300\ny_test = y_target[test_idx]\nassert y_test.shape[0] == 180\n\nscr_drca_train = Variable(torch.from_numpy(XA_drca_train).float())\ntgt_drca_train = Variable(torch.from_numpy(XB_drca_train).float())\ntgt_drca_test = Variable(torch.from_numpy(XB_drca_test).float())\n\n\nx_drca_train = torch.tensor(scr_drca_train)\nx_drca_valid = torch.tensor(tgt_drca_train) \nx_drca_test = torch.tensor(tgt_drca_test)\n\ntrain_drca_set = HerbalData(x_drca_train, torch.from_numpy(y_source))\nvalid_drca_set = HerbalData(x_drca_valid, torch.from_numpy(y_valid))\ntest_drca_set = HerbalData(x_drca_test, torch.from_numpy(y_test))\n\ntrain_drca_dataloader = DataLoader(train_drca_set, batch_size=150, shuffle=True)\nvalid_drca_dataloader = DataLoader(valid_drca_set, batch_size=len(valid_drca_set), shuffle=True)\ntest_drca_dataloader = DataLoader(test_drca_set, batch_size=len(test_drca_set), shuffle=True)\n\n\nprint(\"Train Set:\", x_drca_train.shape)\nprint(\"Valid Set:\", x_drca_valid.shape)\nprint(\"Target Train Set:\", x_drca_test.shape)\n",
"Train Set: torch.Size([150, 50])\nValid Set: torch.Size([300, 50])\nTarget Train Set: torch.Size([180, 50])\n"
],
[
"print(Xs_drca.shape)\nINPUT_DIM = Xs_drca.shape[1]\nOUTPUT_DIM = 3\nDROPOUT = 0.3\nLAYERS = [INPUT_DIM, 64, 32, 16, OUTPUT_DIM]\n\nmodel_drca_nn = NN(LAYERS, DROPOUT)\nmodel_drca_nn.float()\nopt_drca_nn = optim.Adam(model_drca_nn.parameters(),lr=1e-4)\n\ncriterion_drca = nn.CrossEntropyLoss()",
"(150, 50)\n"
],
[
"N_EPOCHS = 150\n\ntrain_drca_loss = np.zeros(N_EPOCHS)\ntrain_drca_acc = np.zeros(N_EPOCHS)\ntrain_drca_prec = np.zeros(N_EPOCHS)\ntrain_drca_recall = np.zeros(N_EPOCHS)\ntrain_drca_f1 = np.zeros(N_EPOCHS)\n\nvalid_drca_loss = np.zeros(N_EPOCHS)\nvalid_drca_acc = np.zeros(N_EPOCHS)\nvalid_drca_prec = np.zeros(N_EPOCHS)\nvalid_drca_recall = np.zeros(N_EPOCHS)\nvalid_drca_f1 = np.zeros(N_EPOCHS)\n\n\n\n\nbest_valid_drca_loss = float('inf')\n\nfor i in range(N_EPOCHS):\n\n start_time = time.time()\n \n train_drca_loss[i], train_drca_acc[i], train_drca_prec[i], train_drca_recall[i], train_drca_f1[i] = train(model_drca_nn, train_drca_dataloader, opt_drca_nn, criterion_drca)\n valid_drca_loss[i], valid_drca_acc[i], valid_drca_prec[i], valid_drca_recall[i], valid_drca_f1[i] = evaluate(model_drca_nn, valid_drca_dataloader, criterion_drca)\n \n \n end_time = time.time()\n\n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n \n if valid_drca_loss[i] < best_valid_drca_loss:\n best_valid_ldrca_oss = valid_drca_loss[i]\n torch.save(model_nn.state_dict(), 'nn-agg-drca.pt')\n \n print(f'Epoch: {i+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')\n print(f'\\tTrain Loss: {train_drca_loss[i]:.3f} | Train Acc: {train_drca_acc[i]*100:.2f}%')\n print(f'\\t Val. Loss: {valid_drca_loss[i]:.3f} | Val. Acc: {valid_drca_acc[i]*100:.2f}%')",
"Epoch: 01 | Epoch Time: 0m 0s\n\tTrain Loss: 1.098 | Train Acc: 36.00%\n\t Val. Loss: 1.101 | Val. Acc: 34.00%\nEpoch: 02 | Epoch Time: 0m 0s\n\tTrain Loss: 1.104 | Train Acc: 31.33%\n\t Val. Loss: 1.100 | Val. Acc: 34.33%\nEpoch: 03 | Epoch Time: 0m 0s\n\tTrain Loss: 1.101 | Train Acc: 37.33%\n\t Val. Loss: 1.100 | Val. Acc: 35.33%\nEpoch: 04 | Epoch Time: 0m 0s\n\tTrain Loss: 1.102 | Train Acc: 27.33%\n\t Val. Loss: 1.100 | Val. Acc: 35.33%\nEpoch: 05 | Epoch Time: 0m 0s\n\tTrain Loss: 1.105 | Train Acc: 29.33%\n\t Val. Loss: 1.100 | Val. Acc: 35.00%\nEpoch: 06 | Epoch Time: 0m 0s\n\tTrain Loss: 1.105 | Train Acc: 33.33%\n\t Val. Loss: 1.099 | Val. Acc: 35.00%\nEpoch: 07 | Epoch Time: 0m 0s\n\tTrain Loss: 1.101 | Train Acc: 34.67%\n\t Val. Loss: 1.099 | Val. Acc: 35.33%\nEpoch: 08 | Epoch Time: 0m 0s\n\tTrain Loss: 1.101 | Train Acc: 37.33%\n\t Val. Loss: 1.099 | Val. Acc: 35.33%\nEpoch: 09 | Epoch Time: 0m 0s\n\tTrain Loss: 1.100 | Train Acc: 38.00%\n\t Val. Loss: 1.099 | Val. Acc: 35.67%\nEpoch: 10 | Epoch Time: 0m 0s\n\tTrain Loss: 1.102 | Train Acc: 32.67%\n\t Val. Loss: 1.099 | Val. Acc: 36.00%\nEpoch: 11 | Epoch Time: 0m 0s\n\tTrain Loss: 1.094 | Train Acc: 34.00%\n\t Val. Loss: 1.099 | Val. Acc: 36.00%\nEpoch: 12 | Epoch Time: 0m 0s\n\tTrain Loss: 1.102 | Train Acc: 31.33%\n\t Val. Loss: 1.098 | Val. Acc: 36.00%\nEpoch: 13 | Epoch Time: 0m 0s\n\tTrain Loss: 1.097 | Train Acc: 34.00%\n\t Val. Loss: 1.098 | Val. Acc: 36.33%\nEpoch: 14 | Epoch Time: 0m 0s\n\tTrain Loss: 1.102 | Train Acc: 30.00%\n\t Val. Loss: 1.098 | Val. Acc: 36.33%\nEpoch: 15 | Epoch Time: 0m 0s\n\tTrain Loss: 1.096 | Train Acc: 34.67%\n\t Val. Loss: 1.098 | Val. Acc: 36.33%\nEpoch: 16 | Epoch Time: 0m 0s\n\tTrain Loss: 1.090 | Train Acc: 42.00%\n\t Val. Loss: 1.098 | Val. Acc: 36.33%\nEpoch: 17 | Epoch Time: 0m 0s\n\tTrain Loss: 1.101 | Train Acc: 32.67%\n\t Val. Loss: 1.098 | Val. Acc: 36.33%\nEpoch: 18 | Epoch Time: 0m 0s\n\tTrain Loss: 1.089 | Train Acc: 38.00%\n\t Val. Loss: 1.097 | Val. Acc: 36.67%\nEpoch: 19 | Epoch Time: 0m 0s\n\tTrain Loss: 1.094 | Train Acc: 39.33%\n\t Val. Loss: 1.097 | Val. Acc: 36.67%\nEpoch: 20 | Epoch Time: 0m 0s\n\tTrain Loss: 1.096 | Train Acc: 37.33%\n\t Val. Loss: 1.097 | Val. Acc: 36.67%\nEpoch: 21 | Epoch Time: 0m 0s\n\tTrain Loss: 1.092 | Train Acc: 40.00%\n\t Val. Loss: 1.097 | Val. Acc: 37.33%\nEpoch: 22 | Epoch Time: 0m 0s\n\tTrain Loss: 1.100 | Train Acc: 35.33%\n\t Val. Loss: 1.097 | Val. Acc: 37.33%\nEpoch: 23 | Epoch Time: 0m 0s\n\tTrain Loss: 1.094 | Train Acc: 39.33%\n\t Val. Loss: 1.097 | Val. Acc: 37.33%\nEpoch: 24 | Epoch Time: 0m 0s\n\tTrain Loss: 1.090 | Train Acc: 40.00%\n\t Val. Loss: 1.097 | Val. Acc: 37.33%\nEpoch: 25 | Epoch Time: 0m 0s\n\tTrain Loss: 1.091 | Train Acc: 41.33%\n\t Val. Loss: 1.096 | Val. Acc: 38.00%\nEpoch: 26 | Epoch Time: 0m 0s\n\tTrain Loss: 1.092 | Train Acc: 40.67%\n\t Val. Loss: 1.096 | Val. Acc: 38.00%\nEpoch: 27 | Epoch Time: 0m 0s\n\tTrain Loss: 1.094 | Train Acc: 36.67%\n\t Val. Loss: 1.096 | Val. Acc: 38.00%\nEpoch: 28 | Epoch Time: 0m 0s\n\tTrain Loss: 1.096 | Train Acc: 32.67%\n\t Val. Loss: 1.096 | Val. Acc: 38.33%\nEpoch: 29 | Epoch Time: 0m 0s\n\tTrain Loss: 1.092 | Train Acc: 36.67%\n\t Val. Loss: 1.096 | Val. Acc: 38.67%\nEpoch: 30 | Epoch Time: 0m 0s\n\tTrain Loss: 1.090 | Train Acc: 39.33%\n\t Val. Loss: 1.096 | Val. Acc: 38.67%\nEpoch: 31 | Epoch Time: 0m 0s\n\tTrain Loss: 1.092 | Train Acc: 35.33%\n\t Val. Loss: 1.095 | Val. Acc: 38.67%\nEpoch: 32 | Epoch Time: 0m 0s\n\tTrain Loss: 1.095 | Train Acc: 38.67%\n\t Val. Loss: 1.095 | Val. Acc: 38.67%\nEpoch: 33 | Epoch Time: 0m 0s\n\tTrain Loss: 1.092 | Train Acc: 40.00%\n\t Val. Loss: 1.095 | Val. Acc: 39.00%\nEpoch: 34 | Epoch Time: 0m 0s\n\tTrain Loss: 1.098 | Train Acc: 35.33%\n\t Val. Loss: 1.095 | Val. Acc: 39.00%\nEpoch: 35 | Epoch Time: 0m 0s\n\tTrain Loss: 1.098 | Train Acc: 36.67%\n\t Val. Loss: 1.095 | Val. Acc: 39.00%\nEpoch: 36 | Epoch Time: 0m 0s\n\tTrain Loss: 1.093 | Train Acc: 35.33%\n\t Val. Loss: 1.095 | Val. Acc: 39.00%\nEpoch: 37 | Epoch Time: 0m 0s\n\tTrain Loss: 1.097 | Train Acc: 36.00%\n\t Val. Loss: 1.094 | Val. Acc: 39.00%\nEpoch: 38 | Epoch Time: 0m 0s\n\tTrain Loss: 1.090 | Train Acc: 39.33%\n\t Val. Loss: 1.094 | Val. Acc: 39.33%\nEpoch: 39 | Epoch Time: 0m 0s\n\tTrain Loss: 1.099 | Train Acc: 34.00%\n\t Val. Loss: 1.094 | Val. Acc: 39.67%\nEpoch: 40 | Epoch Time: 0m 0s\n\tTrain Loss: 1.097 | Train Acc: 35.33%\n\t Val. Loss: 1.094 | Val. Acc: 40.00%\nEpoch: 41 | Epoch Time: 0m 0s\n\tTrain Loss: 1.091 | Train Acc: 38.67%\n\t Val. Loss: 1.094 | Val. Acc: 40.33%\nEpoch: 42 | Epoch Time: 0m 0s\n\tTrain Loss: 1.084 | Train Acc: 49.33%\n\t Val. Loss: 1.094 | Val. Acc: 40.33%\nEpoch: 43 | Epoch Time: 0m 0s\n\tTrain Loss: 1.088 | Train Acc: 40.00%\n\t Val. Loss: 1.093 | Val. Acc: 40.33%\nEpoch: 44 | Epoch Time: 0m 0s\n\tTrain Loss: 1.086 | Train Acc: 37.33%\n\t Val. Loss: 1.093 | Val. Acc: 40.33%\nEpoch: 45 | Epoch Time: 0m 0s\n\tTrain Loss: 1.091 | Train Acc: 40.00%\n\t Val. Loss: 1.093 | Val. Acc: 41.33%\nEpoch: 46 | Epoch Time: 0m 0s\n\tTrain Loss: 1.086 | Train Acc: 41.33%\n\t Val. Loss: 1.093 | Val. Acc: 41.67%\nEpoch: 47 | Epoch Time: 0m 0s\n\tTrain Loss: 1.090 | Train Acc: 38.00%\n\t Val. Loss: 1.093 | Val. Acc: 42.33%\nEpoch: 48 | Epoch Time: 0m 0s\n\tTrain Loss: 1.085 | Train Acc: 40.00%\n\t Val. Loss: 1.092 | Val. Acc: 43.67%\nEpoch: 49 | Epoch Time: 0m 0s\n\tTrain Loss: 1.091 | Train Acc: 40.00%\n\t Val. Loss: 1.092 | Val. Acc: 44.00%\nEpoch: 50 | Epoch Time: 0m 0s\n\tTrain Loss: 1.093 | Train Acc: 38.67%\n\t Val. Loss: 1.092 | Val. Acc: 44.00%\nEpoch: 51 | Epoch Time: 0m 0s\n\tTrain Loss: 1.089 | Train Acc: 36.00%\n\t Val. Loss: 1.092 | Val. Acc: 44.00%\nEpoch: 52 | Epoch Time: 0m 0s\n\tTrain Loss: 1.090 | Train Acc: 36.67%\n\t Val. Loss: 1.091 | Val. Acc: 44.00%\nEpoch: 53 | Epoch Time: 0m 0s\n\tTrain Loss: 1.092 | Train Acc: 34.00%\n\t Val. Loss: 1.091 | Val. Acc: 44.33%\nEpoch: 54 | Epoch Time: 0m 0s\n\tTrain Loss: 1.095 | Train Acc: 32.00%\n\t Val. Loss: 1.091 | Val. Acc: 44.33%\nEpoch: 55 | Epoch Time: 0m 0s\n\tTrain Loss: 1.078 | Train Acc: 50.67%\n\t Val. Loss: 1.091 | Val. Acc: 44.33%\nEpoch: 56 | Epoch Time: 0m 0s\n\tTrain Loss: 1.091 | Train Acc: 39.33%\n\t Val. Loss: 1.090 | Val. Acc: 44.67%\nEpoch: 57 | Epoch Time: 0m 0s\n\tTrain Loss: 1.089 | Train Acc: 42.67%\n\t Val. Loss: 1.090 | Val. Acc: 44.67%\nEpoch: 58 | Epoch Time: 0m 0s\n\tTrain Loss: 1.090 | Train Acc: 32.67%\n\t Val. Loss: 1.090 | Val. Acc: 44.67%\nEpoch: 59 | Epoch Time: 0m 0s\n\tTrain Loss: 1.083 | Train Acc: 39.33%\n\t Val. Loss: 1.090 | Val. Acc: 44.67%\nEpoch: 60 | Epoch Time: 0m 0s\n\tTrain Loss: 1.083 | Train Acc: 39.33%\n\t Val. Loss: 1.089 | Val. Acc: 44.67%\nEpoch: 61 | Epoch Time: 0m 0s\n\tTrain Loss: 1.087 | Train Acc: 43.33%\n\t Val. Loss: 1.089 | Val. Acc: 44.67%\nEpoch: 62 | Epoch Time: 0m 0s\n\tTrain Loss: 1.088 | Train Acc: 40.00%\n\t Val. Loss: 1.089 | Val. Acc: 45.00%\nEpoch: 63 | Epoch Time: 0m 0s\n\tTrain Loss: 1.088 | Train Acc: 36.67%\n\t Val. Loss: 1.088 | Val. Acc: 45.00%\nEpoch: 64 | Epoch Time: 0m 0s\n\tTrain Loss: 1.079 | Train Acc: 47.33%\n\t Val. Loss: 1.088 | Val. Acc: 45.00%\nEpoch: 65 | Epoch Time: 0m 0s\n\tTrain Loss: 1.088 | Train Acc: 40.00%\n\t Val. Loss: 1.088 | Val. Acc: 45.33%\nEpoch: 66 | Epoch Time: 0m 0s\n\tTrain Loss: 1.086 | Train Acc: 40.67%\n\t Val. Loss: 1.088 | Val. Acc: 45.33%\nEpoch: 67 | Epoch Time: 0m 0s\n\tTrain Loss: 1.084 | Train Acc: 43.33%\n\t Val. Loss: 1.087 | Val. Acc: 45.33%\nEpoch: 68 | Epoch Time: 0m 0s\n\tTrain Loss: 1.080 | Train Acc: 48.00%\n\t Val. Loss: 1.087 | Val. Acc: 45.33%\nEpoch: 69 | Epoch Time: 0m 0s\n\tTrain Loss: 1.078 | Train Acc: 51.33%\n\t Val. Loss: 1.087 | Val. Acc: 45.33%\nEpoch: 70 | Epoch Time: 0m 0s\n\tTrain Loss: 1.081 | Train Acc: 43.33%\n\t Val. Loss: 1.086 | Val. Acc: 45.67%\nEpoch: 71 | Epoch Time: 0m 0s\n\tTrain Loss: 1.089 | Train Acc: 36.67%\n\t Val. Loss: 1.086 | Val. Acc: 45.67%\nEpoch: 72 | Epoch Time: 0m 0s\n\tTrain Loss: 1.082 | Train Acc: 40.00%\n\t Val. Loss: 1.086 | Val. Acc: 46.00%\nEpoch: 73 | Epoch Time: 0m 0s\n\tTrain Loss: 1.078 | Train Acc: 45.33%\n\t Val. Loss: 1.085 | Val. Acc: 46.00%\nEpoch: 74 | Epoch Time: 0m 0s\n\tTrain Loss: 1.081 | Train Acc: 40.67%\n\t Val. Loss: 1.085 | Val. Acc: 46.00%\nEpoch: 75 | Epoch Time: 0m 0s\n\tTrain Loss: 1.090 | Train Acc: 41.33%\n\t Val. Loss: 1.085 | Val. Acc: 46.00%\nEpoch: 76 | Epoch Time: 0m 0s\n\tTrain Loss: 1.085 | Train Acc: 37.33%\n\t Val. Loss: 1.084 | Val. Acc: 46.00%\nEpoch: 77 | Epoch Time: 0m 0s\n\tTrain Loss: 1.074 | Train Acc: 54.67%\n\t Val. Loss: 1.084 | Val. Acc: 46.00%\nEpoch: 78 | Epoch Time: 0m 0s\n\tTrain Loss: 1.084 | Train Acc: 40.67%\n\t Val. Loss: 1.084 | Val. Acc: 46.00%\nEpoch: 79 | Epoch Time: 0m 0s\n\tTrain Loss: 1.090 | Train Acc: 36.00%\n\t Val. Loss: 1.083 | Val. Acc: 46.00%\nEpoch: 80 | Epoch Time: 0m 0s\n\tTrain Loss: 1.078 | Train Acc: 40.00%\n\t Val. Loss: 1.083 | Val. Acc: 46.33%\nEpoch: 81 | Epoch Time: 0m 0s\n\tTrain Loss: 1.080 | Train Acc: 40.67%\n\t Val. Loss: 1.083 | Val. Acc: 46.33%\nEpoch: 82 | Epoch Time: 0m 0s\n\tTrain Loss: 1.077 | Train Acc: 40.00%\n\t Val. Loss: 1.082 | Val. Acc: 46.33%\nEpoch: 83 | Epoch Time: 0m 0s\n\tTrain Loss: 1.079 | Train Acc: 40.67%\n\t Val. Loss: 1.082 | Val. Acc: 46.33%\nEpoch: 84 | Epoch Time: 0m 0s\n\tTrain Loss: 1.080 | Train Acc: 43.33%\n\t Val. Loss: 1.081 | Val. Acc: 46.67%\nEpoch: 85 | Epoch Time: 0m 0s\n\tTrain Loss: 1.077 | Train Acc: 40.67%\n\t Val. Loss: 1.081 | Val. Acc: 47.00%\n"
],
[
"#test\n#model_drca_nn.load_state_dict(torch.load('nn-agg-drca.pt'))\n\ntest_drca_loss, test_drca_acc, test_drca_prec, test_drca_recall, test_drca_f1 = evaluate(model_drca_nn, test_drca_dataloader, criterion_drca)\nprint(f'Test Loss: {test_drca_loss:.3f} | Test Acc: {test_drca_acc*100:.2f}% | Test Prec: {test_drca_prec*100:.2f}% | Test Recall: {test_drca_recall*100:.2f}% | Test F1: {test_drca_f1*100:.2f}%')\n",
"Test Loss: 1.082 | Test Acc: 38.33% | Test Prec: 34.71% | Test Recall: 38.33% | Test F1: 26.91%\n"
]
],
[
[
"# GAN_KMM",
"_____no_output_____"
]
],
[
[
"import scipy\ndef kernel(ker, X1, X2, gamma):\n K = None\n if not ker or ker == 'primal':\n K = X1\n elif ker == 'linear':\n if X2 is not None:\n K = sklearn.metrics.pairwise.linear_kernel(np.asarray(X1).T, np.asarray(X2).T)\n else:\n K = sklearn.metrics.pairwise.linear_kernel(np.asarray(X1).T)\n elif ker == 'rbf':\n if X2 is not None:\n K = sklearn.metrics.pairwise.rbf_kernel(np.asarray(X1).T, np.asarray(X2).T, gamma)\n else:\n K = sklearn.metrics.pairwise.rbf_kernel(np.asarray(X1).T, None, gamma)\n return K\n\ndef transform(Xs, Xt, dim=30, kernel_type='primal'):\n '''\n Transform Xs and Xt\n :param Xs: ns * n_feature, source feature\n :param Xt: nt * n_feature, target feature\n :return: Xs_new and Xt_new\n '''\n kernel_type = 'primal'\n lamb = 1\n gamma = 1\n\n X = np.hstack((Xs.T, Xt.T))\n X /= np.linalg.norm(X, axis=0)\n m, n = X.shape\n ns, nt = len(Xs), len(Xt)\n e = np.vstack((1 / ns * np.ones((ns, 1)), -1 / nt * np.ones((nt, 1))))\n M = e * e.T\n M = M / np.linalg.norm(M, 'fro')\n H = np.eye(n) - 1 / n * np.ones((n, n))\n K = kernel('primal', X, None, gamma=gamma)\n n_eye = m if kernel_type == 'primal' else n\n a, b = np.linalg.multi_dot([K, M, K.T]) + lamb * np.eye(n_eye), np.linalg.multi_dot([K, H, K.T])\n w, V = scipy.linalg.eig(a, b)\n ind = np.argsort(w)\n A = V[:, ind[:dim]]\n Z = np.dot(A.T, K)\n Z /= np.linalg.norm(Z, axis=0)\n Xs_new, Xt_new = Z[:, :ns].T, Z[:, ns:].T\n return Xs_new, Xt_new",
"_____no_output_____"
],
[
"# With KMM\ndrca=DRCA(n_components=50, alpha=0.01)#n_components and alpha value are hyperparameters\nXs_GAN = encoderA(torch.tensor(scr_train)*torch.from_numpy(np.random.binomial(size=scr_train.size(), n=1, p=1-noise)))\n\nXs_drca, Xt_drca = transform(Xs=Xs, Xt=Xt, dim=50, kernel_type='primal')\n\n#Target domain train test split\ntrain_idx = np.concatenate((np.arange(0,100),np.arange(160,260),np.arange(320,420))).reshape(-1,)\ntest_idx = np.concatenate((np.arange(100,160),np.arange(260,320),np.arange(420,480))).reshape(-1,)\n\n# Labels\ny_target=np.hstack((0*np.ones(160),1*np.ones(160),2*np.ones(160))).astype(np.int64)\ny_source=np.hstack((0*np.ones(50),1*np.ones(50),2*np.ones(50))).astype(np.int64)\n\n#X and Y, need to change to our data\nXA_drca_train = Xs_drca\nXB_drca_train = Xt_drca[train_idx,:]\nassert XB_drca_train.shape[0] == 300\nXA_drca_test = Xs_drca\nXB_drca_test = Xt_drca[test_idx,:]\nassert XB_drca_test.shape[0] == 180\ny_valid = y_target[train_idx]\nassert y_valid.shape[0] == 300\ny_test = y_target[test_idx]\nassert y_test.shape[0] == 180\n\nscr_drca_train = Variable(torch.from_numpy(XA_drca_train).float())\ntgt_drca_train = Variable(torch.from_numpy(XB_drca_train).float())\ntgt_drca_test = Variable(torch.from_numpy(XB_drca_test).float())\n\n\nx_drca_train = torch.tensor(scr_drca_train)\nx_drca_valid = torch.tensor(tgt_drca_train) \nx_drca_test = torch.tensor(tgt_drca_test)\n\ntrain_drca_set = HerbalData(x_drca_train, torch.from_numpy(y_source))\nvalid_drca_set = HerbalData(x_drca_valid, torch.from_numpy(y_valid))\ntest_drca_set = HerbalData(x_drca_test, torch.from_numpy(y_test))\n\ntrain_drca_dataloader = DataLoader(train_drca_set, batch_size=150, shuffle=True)\nvalid_drca_dataloader = DataLoader(valid_drca_set, batch_size=len(valid_drca_set), shuffle=True)\ntest_drca_dataloader = DataLoader(test_drca_set, batch_size=len(test_drca_set), shuffle=True)\n\n\nprint(\"Train Set:\", x_drca_train.shape)\nprint(\"Valid Set:\", x_drca_valid.shape)\nprint(\"Target Train Set:\", x_drca_test.shape)\n",
"Train Set: torch.Size([150, 50])\nValid Set: torch.Size([300, 50])\nTarget Train Set: torch.Size([180, 50])\n"
],
[
"print(Xs_drca.shape)\nINPUT_DIM = Xs_drca.shape[1]\nOUTPUT_DIM = 3\nDROPOUT = 0.3\nLAYERS = [INPUT_DIM, 64, 32, 16, OUTPUT_DIM]\n\nmodel_drca_nn = NN(LAYERS, DROPOUT)\nmodel_drca_nn.float()\nopt_drca_nn = optim.Adam(model_drca_nn.parameters(),lr=1e-4)\n\ncriterion_drca = nn.CrossEntropyLoss()",
"(150, 50)\n"
],
[
"N_EPOCHS = 150\n\ntrain_drca_loss = np.zeros(N_EPOCHS)\ntrain_drca_acc = np.zeros(N_EPOCHS)\ntrain_drca_prec = np.zeros(N_EPOCHS)\ntrain_drca_recall = np.zeros(N_EPOCHS)\ntrain_drca_f1 = np.zeros(N_EPOCHS)\n\nvalid_drca_loss = np.zeros(N_EPOCHS)\nvalid_drca_acc = np.zeros(N_EPOCHS)\nvalid_drca_prec = np.zeros(N_EPOCHS)\nvalid_drca_recall = np.zeros(N_EPOCHS)\nvalid_drca_f1 = np.zeros(N_EPOCHS)\n\n\n\n\nbest_valid_drca_loss = float('inf')\n\nfor i in range(N_EPOCHS):\n\n start_time = time.time()\n \n train_drca_loss[i], train_drca_acc[i], train_drca_prec[i], train_drca_recall[i], train_drca_f1[i] = train(model_drca_nn, train_drca_dataloader, opt_drca_nn, criterion_drca)\n valid_drca_loss[i], valid_drca_acc[i], valid_drca_prec[i], valid_drca_recall[i], valid_drca_f1[i] = evaluate(model_drca_nn, valid_drca_dataloader, criterion_drca)\n \n \n end_time = time.time()\n\n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n \n if valid_drca_loss[i] < best_valid_drca_loss:\n best_valid_ldrca_oss = valid_drca_loss[i]\n torch.save(model_nn.state_dict(), 'nn-agg-drca.pt')\n \n print(f'Epoch: {i+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')\n print(f'\\tTrain Loss: {train_drca_loss[i]:.3f} | Train Acc: {train_drca_acc[i]*100:.2f}%')\n print(f'\\t Val. Loss: {valid_drca_loss[i]:.3f} | Val. Acc: {valid_drca_acc[i]*100:.2f}%')",
"Epoch: 01 | Epoch Time: 0m 0s\n\tTrain Loss: 1.106 | Train Acc: 33.33%\n\t Val. Loss: 1.110 | Val. Acc: 33.33%\nEpoch: 02 | Epoch Time: 0m 0s\n\tTrain Loss: 1.110 | Train Acc: 33.33%\n\t Val. Loss: 1.110 | Val. Acc: 33.33%\nEpoch: 03 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.110 | Val. Acc: 33.33%\nEpoch: 04 | Epoch Time: 0m 0s\n\tTrain Loss: 1.111 | Train Acc: 33.33%\n\t Val. Loss: 1.110 | Val. Acc: 33.33%\nEpoch: 05 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.110 | Val. Acc: 33.33%\nEpoch: 06 | Epoch Time: 0m 0s\n\tTrain Loss: 1.112 | Train Acc: 33.33%\n\t Val. Loss: 1.110 | Val. Acc: 33.33%\nEpoch: 07 | Epoch Time: 0m 0s\n\tTrain Loss: 1.107 | Train Acc: 33.33%\n\t Val. Loss: 1.110 | Val. Acc: 33.33%\nEpoch: 08 | Epoch Time: 0m 0s\n\tTrain Loss: 1.110 | Train Acc: 33.33%\n\t Val. Loss: 1.110 | Val. Acc: 33.33%\nEpoch: 09 | Epoch Time: 0m 0s\n\tTrain Loss: 1.110 | Train Acc: 33.33%\n\t Val. Loss: 1.110 | Val. Acc: 33.33%\nEpoch: 10 | Epoch Time: 0m 0s\n\tTrain Loss: 1.110 | Train Acc: 33.33%\n\t Val. Loss: 1.110 | Val. Acc: 33.33%\nEpoch: 11 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.110 | Val. Acc: 33.33%\nEpoch: 12 | Epoch Time: 0m 0s\n\tTrain Loss: 1.111 | Train Acc: 33.33%\n\t Val. Loss: 1.110 | Val. Acc: 33.33%\nEpoch: 13 | Epoch Time: 0m 0s\n\tTrain Loss: 1.108 | Train Acc: 33.33%\n\t Val. Loss: 1.110 | Val. Acc: 33.33%\nEpoch: 14 | Epoch Time: 0m 0s\n\tTrain Loss: 1.112 | Train Acc: 33.33%\n\t Val. Loss: 1.110 | Val. Acc: 33.33%\nEpoch: 15 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.110 | Val. Acc: 33.33%\nEpoch: 16 | Epoch Time: 0m 0s\n\tTrain Loss: 1.110 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 17 | Epoch Time: 0m 0s\n\tTrain Loss: 1.111 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 18 | Epoch Time: 0m 0s\n\tTrain Loss: 1.108 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 19 | Epoch Time: 0m 0s\n\tTrain Loss: 1.110 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 20 | Epoch Time: 0m 0s\n\tTrain Loss: 1.110 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 21 | Epoch Time: 0m 0s\n\tTrain Loss: 1.111 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 22 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 23 | Epoch Time: 0m 0s\n\tTrain Loss: 1.108 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 24 | Epoch Time: 0m 0s\n\tTrain Loss: 1.107 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 25 | Epoch Time: 0m 0s\n\tTrain Loss: 1.111 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 26 | Epoch Time: 0m 0s\n\tTrain Loss: 1.110 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 27 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 28 | Epoch Time: 0m 0s\n\tTrain Loss: 1.110 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 29 | Epoch Time: 0m 0s\n\tTrain Loss: 1.107 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 30 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 31 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 32 | Epoch Time: 0m 0s\n\tTrain Loss: 1.110 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 33 | Epoch Time: 0m 0s\n\tTrain Loss: 1.107 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 34 | Epoch Time: 0m 0s\n\tTrain Loss: 1.106 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 35 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 36 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 37 | Epoch Time: 0m 0s\n\tTrain Loss: 1.108 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 38 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 39 | Epoch Time: 0m 0s\n\tTrain Loss: 1.108 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 40 | Epoch Time: 0m 0s\n\tTrain Loss: 1.107 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 41 | Epoch Time: 0m 0s\n\tTrain Loss: 1.108 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 42 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 43 | Epoch Time: 0m 0s\n\tTrain Loss: 1.108 | Train Acc: 33.33%\n\t Val. Loss: 1.109 | Val. Acc: 33.33%\nEpoch: 44 | Epoch Time: 0m 0s\n\tTrain Loss: 1.105 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 45 | Epoch Time: 0m 0s\n\tTrain Loss: 1.107 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 46 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 47 | Epoch Time: 0m 0s\n\tTrain Loss: 1.113 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 48 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 49 | Epoch Time: 0m 0s\n\tTrain Loss: 1.108 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 50 | Epoch Time: 0m 0s\n\tTrain Loss: 1.114 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 51 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 52 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 53 | Epoch Time: 0m 0s\n\tTrain Loss: 1.108 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 54 | Epoch Time: 0m 0s\n\tTrain Loss: 1.110 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 55 | Epoch Time: 0m 0s\n\tTrain Loss: 1.108 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 56 | Epoch Time: 0m 0s\n\tTrain Loss: 1.105 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 57 | Epoch Time: 0m 0s\n\tTrain Loss: 1.111 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 58 | Epoch Time: 0m 0s\n\tTrain Loss: 1.106 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 59 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 60 | Epoch Time: 0m 0s\n\tTrain Loss: 1.107 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 61 | Epoch Time: 0m 0s\n\tTrain Loss: 1.110 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 62 | Epoch Time: 0m 0s\n\tTrain Loss: 1.107 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 63 | Epoch Time: 0m 0s\n\tTrain Loss: 1.107 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 64 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 65 | Epoch Time: 0m 0s\n\tTrain Loss: 1.108 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 66 | Epoch Time: 0m 0s\n\tTrain Loss: 1.104 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 67 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 68 | Epoch Time: 0m 0s\n\tTrain Loss: 1.108 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 69 | Epoch Time: 0m 0s\n\tTrain Loss: 1.103 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 70 | Epoch Time: 0m 0s\n\tTrain Loss: 1.106 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 71 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 72 | Epoch Time: 0m 0s\n\tTrain Loss: 1.107 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 73 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.108 | Val. Acc: 33.33%\nEpoch: 74 | Epoch Time: 0m 0s\n\tTrain Loss: 1.105 | Train Acc: 33.33%\n\t Val. Loss: 1.107 | Val. Acc: 33.33%\nEpoch: 75 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.107 | Val. Acc: 33.33%\nEpoch: 76 | Epoch Time: 0m 0s\n\tTrain Loss: 1.105 | Train Acc: 33.33%\n\t Val. Loss: 1.107 | Val. Acc: 33.33%\nEpoch: 77 | Epoch Time: 0m 0s\n\tTrain Loss: 1.106 | Train Acc: 33.33%\n\t Val. Loss: 1.107 | Val. Acc: 33.33%\nEpoch: 78 | Epoch Time: 0m 0s\n\tTrain Loss: 1.103 | Train Acc: 33.33%\n\t Val. Loss: 1.107 | Val. Acc: 33.33%\nEpoch: 79 | Epoch Time: 0m 0s\n\tTrain Loss: 1.102 | Train Acc: 33.33%\n\t Val. Loss: 1.107 | Val. Acc: 33.33%\nEpoch: 80 | Epoch Time: 0m 0s\n\tTrain Loss: 1.106 | Train Acc: 33.33%\n\t Val. Loss: 1.107 | Val. Acc: 33.33%\nEpoch: 81 | Epoch Time: 0m 0s\n\tTrain Loss: 1.109 | Train Acc: 33.33%\n\t Val. Loss: 1.107 | Val. Acc: 33.33%\nEpoch: 82 | Epoch Time: 0m 0s\n\tTrain Loss: 1.107 | Train Acc: 33.33%\n\t Val. Loss: 1.107 | Val. Acc: 33.33%\nEpoch: 83 | Epoch Time: 0m 0s\n\tTrain Loss: 1.107 | Train Acc: 33.33%\n\t Val. Loss: 1.107 | Val. Acc: 33.33%\nEpoch: 84 | Epoch Time: 0m 0s\n\tTrain Loss: 1.104 | Train Acc: 33.33%\n\t Val. Loss: 1.107 | Val. Acc: 33.33%\nEpoch: 85 | Epoch Time: 0m 0s\n\tTrain Loss: 1.108 | Train Acc: 33.33%\n\t Val. Loss: 1.107 | Val. Acc: 33.33%\nEpoch: 86 | Epoch Time: 0m 0s\n\tTrain Loss: 1.108 | Train Acc: 33.33%\n\t Val. Loss: 1.107 | Val. Acc: 33.33%\nEpoch: 87 | Epoch Time: 0m 0s\n\tTrain Loss: 1.107 | Train Acc: 33.33%\n\t Val. Loss: 1.107 | Val. Acc: 33.33%\nEpoch: 88 | Epoch Time: 0m 0s\n\tTrain Loss: 1.106 | Train Acc: 33.33%\n\t Val. Loss: 1.107 | Val. Acc: 33.33%\nEpoch: 89 | Epoch Time: 0m 0s\n\tTrain Loss: 1.104 | Train Acc: 33.33%\n\t Val. Loss: 1.107 | Val. Acc: 33.33%\nEpoch: 90 | Epoch Time: 0m 0s\n\tTrain Loss: 1.104 | Train Acc: 33.33%\n\t Val. Loss: 1.107 | Val. Acc: 33.33%\n"
],
[
"#test\n#model_drca_nn.load_state_dict(torch.load('nn-agg-drca.pt'))\n\ntest_drca_loss, test_drca_acc, test_drca_prec, test_drca_recall, test_drca_f1 = evaluate(model_drca_nn, test_drca_dataloader, criterion_drca)\nprint(f'Test Loss: {test_drca_loss:.3f} | Test Acc: {test_drca_acc*100:.2f}% | Test Prec: {test_drca_prec*100:.2f}% | Test Recall: {test_drca_recall*100:.2f}% | Test F1: {test_drca_f1*100:.2f}%')\n",
"Test Loss: 1.112 | Test Acc: 33.33% | Test Prec: 11.11% | Test Recall: 33.33% | Test F1: 16.67%\n"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
]
|
ec63232b71d5da6c21f6a4f51e68e08cd892741e | 142,156 | ipynb | Jupyter Notebook | Camara.ipynb | JoaoAugustoMV/JupyterNotebooks | e8753b28753e5b2fe013978e8ec9081a2bf7a3d9 | [
"MIT"
]
| null | null | null | Camara.ipynb | JoaoAugustoMV/JupyterNotebooks | e8753b28753e5b2fe013978e8ec9081a2bf7a3d9 | [
"MIT"
]
| null | null | null | Camara.ipynb | JoaoAugustoMV/JupyterNotebooks | e8753b28753e5b2fe013978e8ec9081a2bf7a3d9 | [
"MIT"
]
| null | null | null | 43.051484 | 1,029 | 0.316969 | [
[
[
"<a href=\"https://colab.research.google.com/github/JoaoAugustoMV/JupyterNotebooks/blob/main/Camara.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"Pacote para transformar tabelas de arquivo pdf em DataFrames",
"_____no_output_____"
]
],
[
[
"pip install tabula-py",
"Collecting tabula-py\n Downloading tabula_py-2.3.0-py3-none-any.whl (12.0 MB)\n\u001b[K |████████████████████████████████| 12.0 MB 19.1 MB/s \n\u001b[?25hRequirement already satisfied: pandas>=0.25.3 in /usr/local/lib/python3.7/dist-packages (from tabula-py) (1.3.5)\nCollecting distro\n Downloading distro-1.6.0-py2.py3-none-any.whl (19 kB)\nRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from tabula-py) (1.19.5)\nRequirement already satisfied: pytz>=2017.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.25.3->tabula-py) (2018.9)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.25.3->tabula-py) (2.8.2)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas>=0.25.3->tabula-py) (1.15.0)\nInstalling collected packages: distro, tabula-py\nSuccessfully installed distro-1.6.0 tabula-py-2.3.0\n"
],
[
"import pandas as pd\nimport tabula",
"_____no_output_____"
]
],
[
[
"Há varias tabelas no arquivos mas sempre as mesmas colunas",
"_____no_output_____"
]
],
[
[
"tabela = tabula.read_pdf('/content/drive/MyDrive/DataSetsDrive/Camara_01-2022.pdf', pages='all')\nlen(tabela)",
"Got stderr: Feb 03, 2022 10:02:14 PM org.apache.pdfbox.pdmodel.font.FileSystemFontProvider loadDiskCache\nWARNING: New fonts found, font cache will be re-built\nFeb 03, 2022 10:02:14 PM org.apache.pdfbox.pdmodel.font.FileSystemFontProvider <init>\nWARNING: Building on-disk font cache, this may take a while\nFeb 03, 2022 10:02:14 PM org.apache.pdfbox.pdmodel.font.FileSystemFontProvider <init>\nWARNING: Finished building on-disk font cache, found 17 fonts\n\n"
],
[
"tabela[0]",
"_____no_output_____"
],
[
"tabela[1]",
"_____no_output_____"
]
],
[
[
"Juntas as tabelas em um único Dataframe",
"_____no_output_____"
]
],
[
[
"df = pd.concat(tabela, verify_integrity=True, ignore_index=True)",
"_____no_output_____"
]
],
[
[
"# Transformar as colunas com valores em float",
"_____no_output_____"
]
],
[
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 331 entries, 0 to 330\nData columns (total 12 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 MATRÍCULA 331 non-null int64 \n 1 NOME 331 non-null object\n 2 NOME CARGO 331 non-null object\n 3 SALÁRIO BASE 331 non-null object\n 4 TRIÊNIO 331 non-null object\n 5 ADICIONAIS 331 non-null object\n 6 OUTRAS VANTAGENS 331 non-null object\n 7 PREVIDÊNCIA 331 non-null object\n 8 IMPOSTO DE RENDA 331 non-null object\n 9 ADIANTAMENTO 331 non-null object\n 10 OUTROS DESCONTOS 331 non-null object\n 11 LÍQUIDO 331 non-null object\ndtypes: int64(1), object(11)\nmemory usage: 31.2+ KB\n"
]
],
[
[
"",
"_____no_output_____"
],
[
"Colunas object",
"_____no_output_____"
]
],
[
[
"df.drop(['MATRÍCULA', 'NOME', 'NOME CARGO'], axis=1)",
"_____no_output_____"
]
],
[
[
"## Problemas com a formatação BR de numeros",
"_____no_output_____"
]
],
[
[
"for col in df.drop(['MATRÍCULA', 'NOME', 'NOME CARGO'], axis=1):\n pd.to_numeric(df[col])",
"_____no_output_____"
]
],
[
[
"### Remover os '.'(pontos) e depois substituir as ','(virgulas) por '.'",
"_____no_output_____"
]
],
[
[
"for col in df.drop(['MATRÍCULA', 'NOME', 'NOME CARGO'], axis=1):\n df[col] = [x.replace('.', '') for x in df[col]]\n df[col] = [x.replace(',', '.') for x in df[col]]",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
]
],
[
[
"### Remove os 'R$'",
"_____no_output_____"
]
],
[
[
"for col in df.drop(['MATRÍCULA', 'NOME', 'NOME CARGO'], axis=1):\n df[col] = [x.replace('R$ ', '') for x in df[col]]\n \n",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
]
],
[
[
"### Substitui os '-' por 0",
"_____no_output_____"
]
],
[
[
"df.replace('-', 0, inplace=True)",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 331 entries, 0 to 330\nData columns (total 12 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 MATRÍCULA 331 non-null int64 \n 1 NOME 331 non-null object\n 2 NOME CARGO 331 non-null object\n 3 SALÁRIO BASE 331 non-null object\n 4 TRIÊNIO 331 non-null object\n 5 ADICIONAIS 331 non-null object\n 6 OUTRAS VANTAGENS 331 non-null object\n 7 PREVIDÊNCIA 331 non-null object\n 8 IMPOSTO DE RENDA 331 non-null object\n 9 ADIANTAMENTO 331 non-null object\n 10 OUTROS DESCONTOS 331 non-null object\n 11 LÍQUIDO 331 non-null object\ndtypes: int64(1), object(11)\nmemory usage: 31.2+ KB\n"
]
],
[
[
"## Transformação de str para float",
"_____no_output_____"
]
],
[
[
"for col in df.drop(['MATRÍCULA', 'NOME', 'NOME CARGO'], axis=1):\n df[col] = pd.to_numeric(df[col])",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 331 entries, 0 to 330\nData columns (total 12 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 MATRÍCULA 331 non-null int64 \n 1 NOME 331 non-null object \n 2 NOME CARGO 331 non-null object \n 3 SALÁRIO BASE 331 non-null float64\n 4 TRIÊNIO 331 non-null float64\n 5 ADICIONAIS 331 non-null float64\n 6 OUTRAS VANTAGENS 331 non-null float64\n 7 PREVIDÊNCIA 331 non-null float64\n 8 IMPOSTO DE RENDA 331 non-null float64\n 9 ADIANTAMENTO 331 non-null float64\n 10 OUTROS DESCONTOS 331 non-null float64\n 11 LÍQUIDO 331 non-null float64\ndtypes: float64(9), int64(1), object(2)\nmemory usage: 31.2+ KB\n"
]
],
[
[
"# Preparando dados",
"_____no_output_____"
],
[
"## Add coluna Ganho mensal(adiantamento + liquido)",
"_____no_output_____"
],
[
"## Add coluna Desconto Total(soma de todos descontos)",
"_____no_output_____"
],
[
"# Análise Exploratoria de Dados",
"_____no_output_____"
]
],
[
[
"df.describe()",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
]
]
|
ec63297a7f0c585f21a0c74cee30dfe1038a60ef | 4,113 | ipynb | Jupyter Notebook | Dynamic Programming/1029/1340. Jump Game V.ipynb | YuHe0108/Leetcode | 90d904dde125dd35ee256a7f383961786f1ada5d | [
"Apache-2.0"
]
| 1 | 2020-08-05T11:47:47.000Z | 2020-08-05T11:47:47.000Z | Dynamic Programming/1029/1340. Jump Game V.ipynb | YuHe0108/LeetCode | b9e5de69b4e4d794aff89497624f558343e362ad | [
"Apache-2.0"
]
| null | null | null | Dynamic Programming/1029/1340. Jump Game V.ipynb | YuHe0108/LeetCode | b9e5de69b4e4d794aff89497624f558343e362ad | [
"Apache-2.0"
]
| null | null | null | 24.482143 | 100 | 0.400681 | [
[
[
"说明:\n 给你一个整数数组 arr 和一个整数 d 。每一步你可以从下标 i 跳到:\n \n 1、i + x ,其中 i + x < arr.length 且 0 < x <= d 。\n 2、i - x ,其中 i - x >= 0 且 0 < x <= d 。\n \n 除此以外,你从下标 i 跳到下标 j 需要满足:arr[i] > arr[j] 且 arr[i] > arr[k] ,\n 其中下标 k 是所有 i 到 j 之间的数字, 即 min(i, j) < k < max(i, j)。\n 你可以选择数组的任意下标开始跳跃。\n 请你返回你 最多 可以访问多少个下标。\n 请注意,任何时刻你都不能跳到数组的外面。",
"_____no_output_____"
]
],
[
[
"<img src='https://assets.leetcode-cn.com/aliyun-lc-upload/uploads/2020/02/02/meta-chart.jpeg'>",
"_____no_output_____"
]
],
[
[
"例1:\n 输入:arr = [6,4,14,6,8,13,9,7,10,6,12], d = 2\n 输出:4\n 解释:\n 你可以从下标 10 出发,然后如上图依次经过 10 --> 8 --> 6 --> 7 。\n 注意,如果你从下标 6 开始,你只能跳到下标 7 处。你不能跳到下标 5 处因为 13 > 9 。\n 你也不能跳到下标 4 处,因为下标 5 在下标 4 和 6 之间且 13 > 9 。\n 类似的,你不能从下标 3 处跳到下标 2 或者下标 1 处。\n\n示例 2:\n 输入:arr = [3,3,3,3,3], d = 3\n 输出:1\n 解释:你可以从任意下标处开始且你永远无法跳到任何其他坐标。\n\n示例 3:\n 输入:arr = [7,6,5,4,3,2,1], d = 1\n 输出:7\n 解释:从下标 0 处开始,你可以按照数值从大到小,访问所有的下标。\n\n示例 4:\n 输入:arr = [7,1,7,1,7,1], d = 2\n 输出:2\n\n示例 5:\n 输入:arr = [66], d = 1\n 输出:1\n\n提示:\n 1、1 <= arr.length <= 1000\n 2、1 <= arr[i] <= 10^5\n 3、1 <= d <= arr.length",
"_____no_output_____"
]
],
[
[
"class Solution:\n def maxJumps(self, arr, d: int) -> int:\n self.mem = [0] * 1001 # 记忆在每个柱子上所能调到的最多柱子数\n res = -float('inf')\n for i in range(len(arr)):\n res = max(res, self.dfs(i, arr, d))\n return res\n \n def dfs(self, idx, arr, d):\n if self.mem[idx] != 0: \n return self.mem[idx]\n \n res = 1\n # 向左边跳\n for k in range(1, d+1):\n if idx + k >= len(arr): # 一旦越界或者中间的值大于当前柱子的高度,就跳出循环\n break\n if arr[idx + k] >= arr[idx]:\n break\n print(k)\n res = max(res, self.dfs(idx+k, arr, d) + 1)\n \n # 向右边跳\n for k in range(1, d+1):\n if idx - k < 0:\n break\n if arr[idx - k] >= arr[idx]:\n break\n res = max(res, self.dfs(idx-k, arr, d) + 1)\n \n self.mem[idx] = res\n return self.mem[idx]",
"_____no_output_____"
],
[
"solution = Solution()\nsolution.maxJumps(arr = [6,4,14,6,8,13,9,7,10,6,12], d = 2)",
"1\n1\n2\n1\n1\n2\n1\n"
]
]
]
| [
"raw",
"markdown",
"raw",
"code"
]
| [
[
"raw"
],
[
"markdown"
],
[
"raw"
],
[
"code",
"code"
]
]
|
ec6330ccd587e328635efb74e24dec609a598fa5 | 16,708 | ipynb | Jupyter Notebook | exercises/weatherdata2/Weatherdata_2.ipynb | lunduniversity/schoolprog-satellite | 6d5fec82bccb7449b398811f004b416e046a258c | [
"CC-BY-4.0"
]
| 3 | 2020-06-04T21:12:34.000Z | 2022-03-15T21:21:59.000Z | exercises/weatherdata2/Weatherdata_2.ipynb | lunduniversity/schoolprog-satellite | 6d5fec82bccb7449b398811f004b416e046a258c | [
"CC-BY-4.0"
]
| 21 | 2019-06-01T11:43:12.000Z | 2020-08-10T10:37:27.000Z | exercises/weatherdata2/Weatherdata_2.ipynb | lunduniversity/schoolprog-satellite | 6d5fec82bccb7449b398811f004b416e046a258c | [
"CC-BY-4.0"
]
| null | null | null | 34.953975 | 546 | 0.519033 | [
[
[
"# Väderdata 2\nDenna uppgift är en vidareutveckling på väderdata. Vi rekommenderar starkt att du gör den först. Du kan hitta den [här](https://github.com/lunduniversity/schoolprog-satellite/tree/master/exercises/weatherdata). Vi kommer använda samma data i denna uppgiften men göra den mer lättillgänglig genom att göra en interaktiv graf med hjälp av biblioteket [bokeh](https://bokeh.pydata.org/en/latest/).\n\nBörja med att köra följande kod för att importera rätt bibliotek:",
"_____no_output_____"
]
],
[
[
"from ipywidgets import interact\nimport numpy as np\nfrom bokeh.io import push_notebook, show, output_notebook\nfrom bokeh.plotting import figure\n\noutput_notebook()",
"_____no_output_____"
]
],
[
[
"Koden nedan laddar ned datan vi kommer använda och sparar den i `data`. Funktionerna är samma som i den första väderdatauppgiften. Kör koden för att läsa in datan.",
"_____no_output_____"
]
],
[
[
"!wget https://github.com/lunduniversity/schoolprog-satellite-data/raw/master/smhi/1961.all.ssv.gz --no-verbose\nimport gzip\nimport matplotlib.pyplot as plt\n\n\ndef get_station_data():\n result = {}\n with gzip.open('1961.all.ssv.gz', 'rt') as f:\n raw_data = f.read().split('\\n\\n')\n for s in raw_data:\n station_data = s.split('\\n')\n name = ' '.join(station_data[0].split()[1:])\n data = []\n for entry in station_data[1:]:\n y, m, d, t = entry.split()\n data.append((int(y), int(m), int(d), float(t)))\n if(len(name) > 0):\n result[name] = data\n return result\n\n\ndef plot(x=None, y=None, fname=\"plot.png\"):\n if x: \n plt.plot(x, y)\n else:\n plt.plot(y)\n plt.savefig(fname)\n plt.show()\n \ndef data_by_year(year, city_data):\n result = []\n for datum in city_data:\n if datum[0] == year:\n result.append(datum[3])\n return result\n\ndata = get_station_data()",
"_____no_output_____"
]
],
[
[
"## 1. Plotta med `bokeh`\n\nNu ska vi börja med `bokeh`. I `bokeh` plottar man saker i en `figure`. Man skapar en `figure` på följande vis:",
"_____no_output_____"
]
],
[
[
"plot = figure(title=\"Temperatur för ett år\", plot_height=300, plot_width=600, y_range=(-30, 30))",
"_____no_output_____"
]
],
[
[
"**Uppdrag:** Vad gör de olika parametrarna? Försök lista ut bara utifrån namnen.\n\n<details>\n<summary markdown=\"span\">\nSvar\n</summary>\n<p>\n \n- <code>title</code> sätter titeln för plotten.\n \n- <code>plot_height</code> bestämmer höjden på plotten.\n\n- <code>plot_width</code> bestämmer bredden på plotten.\n\n- <code>y_range</code> avgör vilka y-värden som ska vara med i plotten.\n\n</p>\n</details>\n\nI en `figure` i bokeh kan man lägga till exempelvis linjer, vilket vi ska göra nu. Men först behöver vi någon data att plotta med linjen. \n\n**Uppdrag:** skapa en lista `y` som innehåller temperaturerna från Lund 2015 samt en lista `x` som innehåller talen 1, 2 till och med 365. Detta ska representera dagarna. \n\n",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"<details>\n<summary markdown=\"span\">\nTips\n</summary>\n<p>\n Använd funktionen <code>data_by_year()</code> du skrev i den förra väderuppgiften. \n</p>\n</details>\n\n<details><summary markdown=\"span\">Lösning</summary>\n<p>\n<pre><code>y = data_by_year(2015, data[\"Lund\"])\nx = [i+1 for i in range(365)]\n</code></pre></p>\n</details>\n\nNu när du skapat listorna är det dags att plottad dem. \n\n**Uppdrag:** Kör koden nedan för att först lägga till en linje som du kallar `my_line` och visa plotten med hjälp av `show(plot)`.",
"_____no_output_____"
]
],
[
[
"my_line = plot.line(x, y)\nshow(plot)",
"_____no_output_____"
]
],
[
[
"Som du ser är denna plotten inte helt lik den du känner igen sen tidigare. Innan använde vi `matplotlib.pyplot` och nu använder vi `bokeh`. Du kan direkt se hur `bokeh` är mer interaktivt genom att du kan välja olika verktyg på sidan om plotten som låter dig zooma och dra runt plotten. Men vi ska göra plotten ännu mer interaktiv!\n\n## 2. Interaktiv plot med `bokeh`\n\n**Uppdrag:** Skriv en funktion `formatted_data_by_year(year, city)` som tar in ett år och en stad. Funktionen ska returnera nästan samma sak som `data_by_year()` men den ska fixa till datan så att vi alltid har 365 dagar per år. Detta är eftersom vissa städer saknar data för exempelvis de sista månaderna på året, och ibland är det skottår.\n\n",
"_____no_output_____"
]
],
[
[
"def formatted_data_by_year(year, city):\n # Din kod här..",
"_____no_output_____"
]
],
[
[
"<details>\n<summary markdown=\"span\">\nTips\n</summary>\n<p>\nUndvik att göra någon komplicerat. Du kan exempelvis göra följande: om det finns för många värden tar du bort det sista värdet tills du har exakt 365. Om du tvärtemot har för få värden kan du lägga till det sista värdet till listan tills du har exakt 365. \n</p>\n</details>\n\n<details><summary markdown=\"span\">Lösning</summary>\n<p>\n<pre><code>def formatted_data_by_year(year, city):\n res = data_by_year(year, data[city])\n while (len(res) > 365):\n res = res[:-1]\n while (len(res) < 365):\n res.append(res[-1])\n return res\n</code></pre></p>\n</details>\n\n\nNu ska vi börja använda `ipywidgets` och dess funktion `interact`.\n\n**Uppdrag:** Kör följande kod. Vad händer? Varför? Försök förstå vad de olika delarna gör. ",
"_____no_output_____"
]
],
[
[
"def update(city, year):\n my_line.data_source.data['y'] = formatted_data_by_year(year, city)\n push_notebook(handle=my_handle)\n \nmy_handle=show(plot, notebook_handle=True)\ninteract(update, city=[\"Lund\", \"Stockholm\"], year=(1965, 2016, 1))",
"_____no_output_____"
]
],
[
[
"<details>\n<summary markdown=\"span\">\nSvar\n</summary>\n<p>\n\n- <code>update(city, year)</code> är en funktion som ska uppdatera grafen varje gång något ändras från inställningarna. \n- <code>my_line.data_source.data['y'] = formatted_data_by_year(year, city)</code> ändrar linjens y-värden till värdena som returneras av <code>formatted_data_by_year()</code>. \n- <code>push_notebook(handle=my_handle)</code> uppdaterar grafen som har <code>my_handle</code> som handle. \n- <code>my_handle=show(plot, notebook_handle=True)</code> gör så att grafen får en handle som vi kallar <code>my_handle</code>. \n- <code>interact()</code> är funktionen som ger listan av alternativ och en slider där man kan ange vilket år man vill ha datan ifrån. <code>interact()</code> tar först en <code>update</code>-funktion som anropas varje gång värdena uppdateras. Sedan måste man för varje parameter till <code>update</code> ange hur man ska läsa in dem. Anger man en lista av strängar får man en flervalslista och anger man en tuple får man en slider. De två första värdena i tupeln anger start och slutvärde. Det tredje anger hur små steg man kan ta. \n</p>\n</details>\n\nNu vill vi kunna se fler städer. \n\n**Uppdrag:** Ändra i koden ovan så att vi får alla städer vi har i flervalslistan. \n\n\n<details><summary markdown=\"span\">Lösning</summary>\n<p>\n<pre><code>interact(update, city=list(data.keys()), year=(1965, 2016,1))\n</code></pre></p>\n</details>",
"_____no_output_____"
],
[
"## 3. Ackumulerad temperatur\n\nEn ackumulerad tempertur innebär att man summerar alla de tidigare temperaturerna. Om det exempelvis har varit 20 grader flera dagar i streck blir den ackumulerade temperaturen dag ett 20 grader, dag två 40 grader, dag tre 60 grader osv. Vi tänke nu kolla på hur den ackulmulerade temperaturen ser ut för ett år i de olika städerna vi kollat på innan. ",
"_____no_output_____"
],
[
"För att göra detta behöver vi skriva en funktion som hela tiden läger ihop de tidigare värdena med dagens värde. Vi kan utgå från vår funktion `formatted_data_by_year()`. \n\n**Uppdrag:** Skriv funktionen `formatted_data_by_year_ack(year,city)`.\n",
"_____no_output_____"
]
],
[
[
"def formatted_data_by_year_ack(year, city):\n # Din kod här..",
"_____no_output_____"
]
],
[
[
"<details>\n<summary markdown=\"span\">\nTips\n</summary>\n<p>\n Vi kan exempelvis loopa genom listan från <code>formatted_data_by_year</code> och för varje element lägga till värdet av det tidigare elementet (som då kommer vara det ackumulerade värdet för det elementet).\n</p>\n</details>\n\n<details>\n<summary markdown=\"span\">\nLösning\n</summary>\n<p>\n<pre>def formatted_data_by_year_ack(year, city):\n res = formatted_data_by_year(year, city)\n for i in range(1,365):\n res[i] = res[i] + res[i-1]\n return res</pre>\n</p>\n</details>\n",
"_____no_output_____"
],
[
"Vi kan nu plotta våra ackumulerade värden på samma sätt som innan. Koden nedan är i princip samma som du skrev i del 2. Vi väljer denna gång att göra ett stapeldiagram istället för en linje. Kör koden nedan för att se vad som händer. ",
"_____no_output_____"
]
],
[
[
"plot2 = figure(title=\"Ackumulerad temperatur för ett år\", plot_height=300, plot_width=600, y_range=(-2000, 4000))\ny = formatted_data_by_year_ack(2015, \"Lund\")\nx = [i+1 for i in range(365)]\nmy_vbar = plot2.vbar(x, top=y, width = 0.5)\n\ndef update_ack(city, year):\n my_vbar.data_source.data['top'] = formatted_data_by_year_ack(year, city)\n push_notebook(handle=my_handle2)\n \nmy_handle2=show(plot2, notebook_handle=True)\ninteract(update_ack, city=list(data.keys()), year=(1965, 2016, 1))",
"_____no_output_____"
]
],
[
[
"**Uppdrag:** Kolla på lite olika städer. Vad innebär det om den ackumulerade temperaturen är under noll i slutet av året?\n\n<details>\n<summary markdown=\"span\">\nSvar\n</summary>\n<p>\nDet innebär att medeltemperaturen för hela året är negativ. Vi kan bestämma medeltemperaturen genom att ta det sista värdet, det totala ackumulerade värdet för året, och dela det på 365.\n</p>\n</details>",
"_____no_output_____"
],
[
"## Fortsättningsuppgifter\n- Gör om fler plottar från förra uppgiften och gör dem interaktiva. \n- Ändra så att man i `update`-även uppdaterar x-värdena för att slippa använda `formated_data_by_year` och istället bara använda `data_by_year`.",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
]
|
ec63333be4c62ce5b965a5a1475eea6da96a6e60 | 9,528 | ipynb | Jupyter Notebook | sagemaker-python-sdk/mxnet_gluon_mnist/mxnet_mnist_with_gluon.ipynb | vikramelango/amazon-sagemaker-examples | 9a3b8de17c253fc18fc089120885afc6ff36111d | [
"Apache-2.0"
]
| null | null | null | sagemaker-python-sdk/mxnet_gluon_mnist/mxnet_mnist_with_gluon.ipynb | vikramelango/amazon-sagemaker-examples | 9a3b8de17c253fc18fc089120885afc6ff36111d | [
"Apache-2.0"
]
| 1 | 2022-03-15T20:04:30.000Z | 2022-03-15T20:04:30.000Z | sagemaker-python-sdk/mxnet_gluon_mnist/mxnet_mnist_with_gluon.ipynb | vivekmadan2/amazon-sagemaker-examples | 4ccb050067c5305a50db750df3444dbc85600d5f | [
"Apache-2.0"
]
| 1 | 2022-03-19T17:04:30.000Z | 2022-03-19T17:04:30.000Z | 31.445545 | 624 | 0.599706 | [
[
[
"# MNIST Training with MXNet and Gluon\n\nMNIST is a widely used dataset for handwritten digit classification. It consists of 70,000 labeled 28x28 pixel grayscale images of hand-written digits. The dataset is split into 60,000 training images and 10,000 test images. There are 10 classes (one for each of the 10 digits). This tutorial shows how to train and test an MNIST model on SageMaker using MXNet and the Gluon API.\n\n## Runtime\n\nThis notebook takes approximately 20 minutes to run.\n\n## Contents\n\n1. [Download training and test data](#Download-training-and-test-data)\n1. [Upload the data](#Upload-the-data)\n1. [Implement the training function](#Implement-the-training-function)\n1. [Run the training script on SageMaker](#Run-the-training-script-on-SageMaker)\n1. [Cleanup](#Cleanup)\n",
"_____no_output_____"
]
],
[
[
"import os\nimport boto3\nimport sagemaker\nfrom sagemaker.mxnet import MXNet\nfrom mxnet import gluon\nfrom sagemaker import get_execution_role\n\nsagemaker_session = sagemaker.Session()\n\nrole = get_execution_role()",
"_____no_output_____"
]
],
[
[
"## Download training and test data",
"_____no_output_____"
]
],
[
[
"import os\n\nfor inner_dir in [\"train\", \"test\"]:\n data_dir = \"./data/{}/\".format(inner_dir)\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n\ns3 = boto3.client(\"s3\")\ns3.download_file(\n \"sagemaker-sample-files\",\n \"datasets/image/MNIST/train/train-images-idx3-ubyte.gz\",\n \"./data/train/train-images-idx3-ubyte.gz\",\n)\ns3.download_file(\n \"sagemaker-sample-files\",\n \"datasets/image/MNIST/train/train-labels-idx1-ubyte.gz\",\n \"./data/train/train-labels-idx1-ubyte.gz\",\n)\ns3.download_file(\n \"sagemaker-sample-files\",\n \"datasets/image/MNIST/test/t10k-images-idx3-ubyte.gz\",\n \"./data/test/t10k-images-idx3-ubyte.gz\",\n)\ns3.download_file(\n \"sagemaker-sample-files\",\n \"datasets/image/MNIST/test/t10k-labels-idx1-ubyte.gz\",\n \"./data/test/t10k-labels-idx1-ubyte.gz\",\n)",
"_____no_output_____"
]
],
[
[
"## Upload the data\n\nWe use the `sagemaker.Session.upload_data` function to upload our datasets to an S3 location. The return value `inputs` identifies the location -- we use this later when we start the training job.",
"_____no_output_____"
]
],
[
[
"inputs = sagemaker_session.upload_data(path=\"data\", key_prefix=\"data/DEMO-mnist\")",
"_____no_output_____"
]
],
[
[
"## Implement the training function\n\nWe need to provide a training script that can run on the SageMaker platform. The training scripts are essentially the same as one you would write for local training, except that you need to provide a `train()` function. The `train()` function checks for the validation accuracy at the end of every epoch and checkpoints the best model so far, along with the optimizer state, in the folder `/opt/ml/checkpoints` if the folder path exists, else it skips the checkpointing. When SageMaker calls your function, it passes in arguments that describe the training environment. Check the script below to see how this works.\n\nThe script here is an adaptation of the [Gluon MNIST example](https://github.com/apache/incubator-mxnet/blob/master/example/gluon/mnist/mnist.py) provided by the [Apache MXNet](https://mxnet.incubator.apache.org/) project.",
"_____no_output_____"
]
],
[
[
"!cat 'mnist.py'",
"_____no_output_____"
]
],
[
[
"## Run the training script on SageMaker\n\nThe ```MXNet``` class allows us to run our training function on SageMaker infrastructure. We need to configure it with our training script, an IAM role, the number of training instances, and the training instance type. In this case we run our training job on a single c4.xlarge instance.",
"_____no_output_____"
]
],
[
[
"m = MXNet(\n \"mnist.py\",\n role=role,\n instance_count=1,\n instance_type=\"ml.c4.xlarge\",\n framework_version=\"1.6.0\",\n py_version=\"py3\",\n hyperparameters={\n \"batch-size\": 100,\n \"epochs\": 20,\n \"learning-rate\": 0.1,\n \"momentum\": 0.9,\n \"log-interval\": 100,\n },\n)",
"_____no_output_____"
]
],
[
[
"After we've constructed our `MXNet` object, we fit it using the data we uploaded to S3. SageMaker makes sure our data is available in the local filesystem, so our training script can simply read the data from disk.\n",
"_____no_output_____"
]
],
[
[
"m.fit(inputs)",
"_____no_output_____"
]
],
[
[
"After training, we use the MXNet object to build and deploy an MXNetPredictor object. This creates a SageMaker endpoint that we use to perform inference.\n\nThis allows us to perform inference on JSON-encoded multi-dimensional arrays.",
"_____no_output_____"
]
],
[
[
"predictor = m.deploy(initial_instance_count=1, instance_type=\"ml.m4.xlarge\")",
"_____no_output_____"
]
],
[
[
"We can now use this predictor to classify hand-written digits. Manually drawing into the image box loads the pixel data into a 'data' variable in this notebook, which we can then pass to the MXNet predictor.",
"_____no_output_____"
]
],
[
[
"from IPython.display import HTML\n\nHTML(open(\"input.html\").read())",
"_____no_output_____"
]
],
[
[
"Fetch the first image from the test dataset and display it.",
"_____no_output_____"
]
],
[
[
"import gzip\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nf = gzip.open(\"data/train/train-images-idx3-ubyte.gz\", \"r\")\n\nimage_size = 28\n\nf.read(16)\nbuf = f.read(image_size * image_size)\ndata = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)\ndata = data.reshape(1, image_size, image_size, 1)\n\nimage = np.asarray(data).squeeze()\nplt.imshow(image)\nplt.show()",
"_____no_output_____"
]
],
[
[
"The predictor runs inference on our input data and returns the predicted digit (as a float value, so we convert to int for display).",
"_____no_output_____"
]
],
[
[
"response = predictor.predict(data)\nprint(int(response))",
"_____no_output_____"
]
],
[
[
"## Cleanup\n\nAfter you have finished with this example, delete the prediction endpoint to release the instance associated with it.",
"_____no_output_____"
]
],
[
[
"predictor.delete_endpoint()",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec63335ab388b286da2d1ed742457d1a51ab5e7b | 822,428 | ipynb | Jupyter Notebook | pix2pix.ipynb | bchattaraj/CS236_bchattar_code | c4d74daa26cc64c5fdcd1c316e3e6120be1cf505 | [
"BSD-3-Clause"
]
| null | null | null | pix2pix.ipynb | bchattaraj/CS236_bchattar_code | c4d74daa26cc64c5fdcd1c316e3e6120be1cf505 | [
"BSD-3-Clause"
]
| null | null | null | pix2pix.ipynb | bchattaraj/CS236_bchattar_code | c4d74daa26cc64c5fdcd1c316e3e6120be1cf505 | [
"BSD-3-Clause"
]
| null | null | null | 181.111649 | 135,292 | 0.791697 | [
[
[
"<a href=\"https://colab.research.google.com/github/bkkaggle/pytorch-CycleGAN-and-pix2pix/blob/master/pix2pix.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Install",
"_____no_output_____"
]
],
[
[
"!git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix",
"_____no_output_____"
],
[
"import os\nos.chdir('./pytorch-CycleGAN-and-pix2pix/')",
"_____no_output_____"
],
[
"!pip install -r requirements.txt",
"Requirement already satisfied: torch>=1.4.0 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from -r requirements.txt (line 1)) (1.10.0)\nRequirement already satisfied: torchvision>=0.5.0 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from -r requirements.txt (line 2)) (0.11.1)\nRequirement already satisfied: dominate>=2.4.0 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from -r requirements.txt (line 3)) (2.6.0)\nRequirement already satisfied: visdom>=0.1.8.8 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from -r requirements.txt (line 4)) (0.1.8.9)\nRequirement already satisfied: wandb in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from -r requirements.txt (line 5)) (0.12.7)\nRequirement already satisfied: typing_extensions in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from torch>=1.4.0->-r requirements.txt (line 1)) (3.10.0.2)\nRequirement already satisfied: numpy in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from torchvision>=0.5.0->-r requirements.txt (line 2)) (1.21.4)\nRequirement already satisfied: pillow!=8.3.0,>=5.3.0 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from torchvision>=0.5.0->-r requirements.txt (line 2)) (8.4.0)\nRequirement already satisfied: pyzmq in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from visdom>=0.1.8.8->-r requirements.txt (line 4)) (22.2.1)\nRequirement already satisfied: six in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from visdom>=0.1.8.8->-r requirements.txt (line 4)) (1.16.0)\nRequirement already satisfied: tornado in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from visdom>=0.1.8.8->-r requirements.txt (line 4)) (6.1)\nRequirement already satisfied: scipy in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from visdom>=0.1.8.8->-r requirements.txt (line 4)) (1.7.3)\nRequirement already satisfied: jsonpatch in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from visdom>=0.1.8.8->-r requirements.txt (line 4)) (1.32)\nRequirement already satisfied: websocket-client in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from visdom>=0.1.8.8->-r requirements.txt (line 4)) (1.2.1)\nRequirement already satisfied: torchfile in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from visdom>=0.1.8.8->-r requirements.txt (line 4)) (0.1.0)\nRequirement already satisfied: requests in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from visdom>=0.1.8.8->-r requirements.txt (line 4)) (2.26.0)\nRequirement already satisfied: protobuf>=3.12.0 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from wandb->-r requirements.txt (line 5)) (3.19.1)\nRequirement already satisfied: subprocess32>=3.5.3 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from wandb->-r requirements.txt (line 5)) (3.5.4)\nRequirement already satisfied: configparser>=3.8.1 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from wandb->-r requirements.txt (line 5)) (5.1.0)\nRequirement already satisfied: python-dateutil>=2.6.1 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from wandb->-r requirements.txt (line 5)) (2.8.2)\nRequirement already satisfied: sentry-sdk>=1.0.0 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from wandb->-r requirements.txt (line 5)) (1.5.0)\nRequirement already satisfied: Click!=8.0.0,>=7.0 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from wandb->-r requirements.txt (line 5)) (8.0.3)\nRequirement already satisfied: GitPython>=1.0.0 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from wandb->-r requirements.txt (line 5)) (3.1.24)\nRequirement already satisfied: docker-pycreds>=0.4.0 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from wandb->-r requirements.txt (line 5)) (0.4.0)\nRequirement already satisfied: yaspin>=1.0.0 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from wandb->-r requirements.txt (line 5)) (2.1.0)\nRequirement already satisfied: PyYAML in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from wandb->-r requirements.txt (line 5)) (6.0)\nRequirement already satisfied: pathtools in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from wandb->-r requirements.txt (line 5)) (0.1.2)\nRequirement already satisfied: psutil>=5.0.0 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from wandb->-r requirements.txt (line 5)) (5.8.0)\nRequirement already satisfied: promise<3,>=2.0 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from wandb->-r requirements.txt (line 5)) (2.3)\nRequirement already satisfied: shortuuid>=0.5.0 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from wandb->-r requirements.txt (line 5)) (1.0.8)\nRequirement already satisfied: colorama in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from Click!=8.0.0,>=7.0->wandb->-r requirements.txt (line 5)) (0.4.4)\nRequirement already satisfied: gitdb<5,>=4.0.1 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from GitPython>=1.0.0->wandb->-r requirements.txt (line 5)) (4.0.9)\nRequirement already satisfied: smmap<6,>=3.0.1 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from gitdb<5,>=4.0.1->GitPython>=1.0.0->wandb->-r requirements.txt (line 5)) (5.0.0)\nRequirement already satisfied: idna<4,>=2.5 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from requests->visdom>=0.1.8.8->-r requirements.txt (line 4)) (3.2)\nRequirement already satisfied: charset-normalizer~=2.0.0 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from requests->visdom>=0.1.8.8->-r requirements.txt (line 4)) (2.0.4)\nRequirement already satisfied: urllib3<1.27,>=1.21.1 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from requests->visdom>=0.1.8.8->-r requirements.txt (line 4)) (1.26.7)\nRequirement already satisfied: certifi>=2017.4.17 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from requests->visdom>=0.1.8.8->-r requirements.txt (line 4)) (2021.10.8)\nRequirement already satisfied: termcolor<2.0.0,>=1.1.0 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from yaspin>=1.0.0->wandb->-r requirements.txt (line 5)) (1.1.0)\nRequirement already satisfied: jsonpointer>=1.9 in c:\\users\\bchat\\anaconda3\\envs\\cs236_project_test\\lib\\site-packages (from jsonpatch->visdom>=0.1.8.8->-r requirements.txt (line 4)) (2.2)\n"
]
],
[
[
"# Datasets\n\nDownload one of the official datasets with:\n\n- `bash ./datasets/download_pix2pix_dataset.sh [cityscapes, night2day, edges2handbags, edges2shoes, facades, maps]`\n\nOr use your own dataset by creating the appropriate folders and adding in the images. Follow the instructions [here](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/datasets.md#pix2pix-datasets).",
"_____no_output_____"
]
],
[
[
"!bash ./datasets/download_pix2pix_dataset.sh facades",
"'bash' is not recognized as an internal or external command,\noperable program or batch file.\n"
]
],
[
[
"# Pretrained models\n\nDownload one of the official pretrained models with:\n\n- `bash ./scripts/download_pix2pix_model.sh [edges2shoes, sat2map, map2sat, facades_label2photo, and day2night]`\n\nOr add your own pretrained model to `./checkpoints/{NAME}_pretrained/latest_net_G.pt`",
"_____no_output_____"
]
],
[
[
"!bash ./scripts/download_pix2pix_model.sh facades_label2photo",
"_____no_output_____"
],
[
"# Start here\n# To view training results and loss plots, run python -m visdom.server and click the URL http://localhost:8097.\n# first \"python -m visdom.server\" on condo cmd window\nhttp://localhost:8097",
"_____no_output_____"
],
[
"import visdom",
"_____no_output_____"
],
[
"!wandb login 9e662fe57b66dc13ad4fe84fca5955dcd8849cd3",
"wandb: Appending key for api.wandb.ai to your netrc file: C:\\Users\\bchat/.netrc\n"
]
],
[
[
"# Training\n\n- `python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA`\n\nChange the `--dataroot` and `--name` to your own dataset's path and model's name. Use `--gpu_ids 0,1,..` to train on multiple GPUs and `--batch_size` to change the batch size. Add `--direction BtoA` if you want to train a model to transfrom from class B to A.",
"_____no_output_____"
]
],
[
[
"# !python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA --use_wandb\n# !python train.py --dataroot ./datasets/PIX2PIXHURADAR --name PIX2PIXHU_RADAR2RADAR --model pix2pix --direction AtoB --use_wandb\n!python train.py --dataroot ./datasets/V2PIX2PIX_R2T --name V2PIX2PIX_R2T --model pix2pix --direction AtoB --use_wandb",
"----------------- Options ---------------\n batch_size: 1 \n beta1: 0.5 \n checkpoints_dir: ./checkpoints \n continue_train: False \n crop_size: 256 \n dataroot: ./datasets/V2PIX2PIX_R2T \t[default: None]\n dataset_mode: aligned \n direction: AtoB \n display_env: main \n display_freq: 400 \n display_id: 1 \n display_ncols: 4 \n display_port: 8097 \n display_server: http://localhost \n display_winsize: 256 \n epoch: latest \n epoch_count: 1 \n gan_mode: vanilla \n gpu_ids: 0 \n init_gain: 0.02 \n init_type: normal \n input_nc: 3 \n isTrain: True \t[default: None]\n lambda_L1: 100.0 \n load_iter: 0 \t[default: 0]\n load_size: 286 \n lr: 0.0002 \n lr_decay_iters: 50 \n lr_policy: linear \n max_dataset_size: inf \n model: pix2pix \t[default: cycle_gan]\n n_epochs: 100 \n n_epochs_decay: 100 "
]
],
[
[
"# Testing\n\n- `python test.py --dataroot ./datasets/facades --direction BtoA --model pix2pix --name facades_pix2pix`\n\nChange the `--dataroot`, `--name`, and `--direction` to be consistent with your trained model's configuration and how you want to transform images.\n\n> from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix:\n> Note that we specified --direction BtoA as Facades dataset's A to B direction is photos to labels.\n\n> If you would like to apply a pre-trained model to a collection of input images (rather than image pairs), please use --model test option. See ./scripts/test_single.sh for how to apply a model to Facade label maps (stored in the directory facades/testB).\n\n> See a list of currently available models at ./scripts/download_pix2pix_model.sh",
"_____no_output_____"
]
],
[
[
"!dir C:\\Users\\bchat\\pytorch-CycleGAN-and-pix2pix\\checkpoints\\",
" Volume in drive C has no label.\n Volume Serial Number is 762E-CD6B\n\n Directory of C:\\Users\\bchat\\pytorch-CycleGAN-and-pix2pix\\checkpoints\n\n12/05/2021 05:12 PM <DIR> .\n12/05/2021 05:12 PM <DIR> ..\n12/03/2021 12:13 AM <DIR> CGANV1_1R1T\n12/03/2021 12:32 AM <DIR> CGANV1_pretrained\n12/03/2021 09:01 PM <DIR> CGANV2_RADAR2RADAR\n12/03/2021 09:14 PM <DIR> CGANV2_RADAR2RADAR_pretrained\n11/28/2021 02:36 PM <DIR> facadesCG\n11/28/2021 02:12 PM <DIR> facades_label2photo_pretrained\n11/28/2021 01:20 PM <DIR> facades_pix2pix\n11/30/2021 12:56 AM <DIR> PIX2PIXHU_pix2pix\n12/03/2021 11:30 AM <DIR> PIX2PIXHU_RADAR2RADAR\n12/03/2021 11:38 AM <DIR> PIX2PIXHU_RADAR2RADAR_pretrained\n11/30/2021 08:32 AM <DIR> PIX2PIXHU_RADAR2TRACK\n11/30/2021 08:49 PM <DIR> PIX2PIXHU_RADAR2TRACK_pretrained\n11/30/2021 06:26 AM <DIR> PIX2PIXHU_Track2Radar_pretrained\n12/05/2021 05:12 PM <DIR> V2PIX2PIX_R2T\n 0 File(s) 0 bytes\n 16 Dir(s) 3,525,592,342,528 bytes free\n"
],
[
"#!python test.py --dataroot ./datasets/PIX2PIXHU --direction BtoA --model pix2pix --name PIX2PIXHU_Track2Radar_pretrained --use_wandb\n\n# !python test.py --dataroot ./datasets/PIX2PIXHU --direction AtoB --model pix2pix --name PIX2PIXHU_RADAR2TRACK_pretrained --use_wandb\n\n#!python test.py --dataroot ./datasets/PIX2PIXHURADAR --direction AtoB --model pix2pix --name PIX2PIXHU_RADAR2RADAR_pretrained --use_wandb\n\n!python test.py --dataroot ./datasets/V2PIX2PIX_R2T --direction AtoB --model pix2pix --name V2PIX2PIX_R2T_pretrained --use_wandb",
"----------------- Options ---------------\n aspect_ratio: 1.0 \n batch_size: 1 \n checkpoints_dir: ./checkpoints \n crop_size: 256 \n dataroot: ./datasets/V2PIX2PIX_R2T \t[default: None]\n dataset_mode: aligned \n direction: AtoB \n display_winsize: 256 \n epoch: latest \n eval: False \n gpu_ids: 0 \n init_gain: 0.02 \n init_type: normal \n input_nc: 3 \n isTrain: False \t[default: None]\n load_iter: 0 \t[default: 0]\n load_size: 256 \n max_dataset_size: inf \n model: pix2pix \t[default: test]\n n_layers_D: 3 \n name: V2PIX2PIX_R2T_pretrained \t[default: experiment_name]\n ndf: 64 \n netD: basic \n netG: unet_256 \n ngf: 64 \n no_dropout: False \n no_flip: False \n norm: batch \n num_test: 50 \n num_threads: 4 \n output_nc: 3 \n phase: test \n preprocess: resize_and_crop \n results_dir: ./results/ \n serial_batches: False \n suffix: \n use_wandb: True \t[default: False]\n verbose: False \n----------------- End -------------------\ndataset [AlignedDataset] was created\ninitialize network with normal\nmodel [Pix2PixModel] was created\nloading the model from ./checkpoints\\V2PIX2PIX_R2T_pretrained\\latest_net_G.pth\n---------- Networks initialized -------------\n[Network G] Total number of parameters : 54.414 M\n-----------------------------------------------\n\ncreating web directory ./results/V2PIX2PIX_R2T_pretrained\\test_latest\nprocessing (0000)-th image... ['./datasets/V2PIX2PIX_R2T\\\\test\\\\S101.png']\nprocessing (0005)-th image... ['./datasets/V2PIX2PIX_R2T\\\\test\\\\S106.png']\nprocessing (0010)-th image... ['./datasets/V2PIX2PIX_R2T\\\\test\\\\S201.png']\nprocessing (0015)-th image... ['./datasets/V2PIX2PIX_R2T\\\\test\\\\S206.png']\nprocessing (0020)-th image... ['./datasets/V2PIX2PIX_R2T\\\\test\\\\S401.png']\nprocessing (0025)-th image... ['./datasets/V2PIX2PIX_R2T\\\\test\\\\S406.png']\nprocessing (0030)-th image... ['./datasets/V2PIX2PIX_R2T\\\\test\\\\S601.png']\nprocessing (0035)-th image... ['./datasets/V2PIX2PIX_R2T\\\\test\\\\S606.png']\nprocessing (0040)-th image... ['./datasets/V2PIX2PIX_R2T\\\\test\\\\S801.png']\nprocessing (0045)-th image... ['./datasets/V2PIX2PIX_R2T\\\\test\\\\S806.png']\n\n"
]
],
[
[
"# Visualize",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n\nimg = plt.imread('./results/facades_label2photo_pretrained/test_latest/images/100_fake_B.png')\nplt.imshow(img)",
"_____no_output_____"
],
[
"img = plt.imread('./results/facades_label2photo_pretrained/test_latest/images/100_real_A.png')\nplt.imshow(img)",
"_____no_output_____"
],
[
"img = plt.imread('./results/facades_label2photo_pretrained/test_latest/images/100_real_B.png')\nplt.imshow(img)",
"_____no_output_____"
],
[
"import torch\ntorch.cuda.init()\nprint(torch.randn(1, device='cuda'))",
"tensor([-1.2114], device='cuda:0')\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
]
|
ec6354b823195c97af74e44709a8c0fa3f72861f | 5,340 | ipynb | Jupyter Notebook | Algorithms/landsat_simple_composite.ipynb | OIEIEIO/earthengine-py-notebooks | 5d6c5cdec0c73bf02020ee17d42c9e30d633349f | [
"MIT"
]
| 1,008 | 2020-01-27T02:03:18.000Z | 2022-03-24T10:42:14.000Z | Algorithms/landsat_simple_composite.ipynb | rafatieppo/earthengine-py-notebooks | 99fbc4abd1fb6ba41e3d8a55f8911217353a3237 | [
"MIT"
]
| 8 | 2020-02-01T20:18:18.000Z | 2021-11-23T01:48:02.000Z | Algorithms/landsat_simple_composite.ipynb | rafatieppo/earthengine-py-notebooks | 99fbc4abd1fb6ba41e3d8a55f8911217353a3237 | [
"MIT"
]
| 325 | 2020-01-27T02:03:36.000Z | 2022-03-25T20:33:33.000Z | 37.083333 | 470 | 0.56161 | [
[
[
"<table class=\"ee-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://github.com/giswqs/earthengine-py-notebooks/tree/master/Algorithms/landsat_simple_composite.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /> View source on GitHub</a></td>\n <td><a target=\"_blank\" href=\"https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Algorithms/landsat_simple_composite.ipynb\"><img width=26px src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png\" />Notebook Viewer</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Algorithms/landsat_simple_composite.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /> Run in Google Colab</a></td>\n</table>",
"_____no_output_____"
],
[
"## Install Earth Engine API and geemap\nInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://geemap.org). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.\nThe following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.",
"_____no_output_____"
]
],
[
[
"# Installs geemap package\nimport subprocess\n\ntry:\n import geemap\nexcept ImportError:\n print('Installing geemap ...')\n subprocess.check_call([\"python\", '-m', 'pip', 'install', 'geemap'])",
"_____no_output_____"
],
[
"import ee\nimport geemap",
"_____no_output_____"
]
],
[
[
"## Create an interactive map \nThe default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function. ",
"_____no_output_____"
]
],
[
[
"Map = geemap.Map(center=[40,-100], zoom=4)\nMap",
"_____no_output_____"
]
],
[
[
"## Add Earth Engine Python script ",
"_____no_output_____"
]
],
[
[
"# Add Earth Engine dataset\n# Load a raw Landsat 5 ImageCollection for a single year.\ncollection = ee.ImageCollection('LANDSAT/LT05/C01/T1') \\\n .filterDate('2010-01-01', '2010-12-31')\n\n# Create a cloud-free composite with default parameters.\ncomposite = ee.Algorithms.Landsat.simpleComposite(collection)\n\n# Create a cloud-free composite with custom parameters for\n# cloud score threshold and percentile.\ncustomComposite = ee.Algorithms.Landsat.simpleComposite(**{\n 'collection': collection,\n 'percentile': 75,\n 'cloudScoreRange': 5\n})\n\n# Display the composites.\nMap.setCenter(-122.3578, 37.7726, 10)\nMap.addLayer(composite, {'bands': ['B4', 'B3', 'B2'], 'max': 128}, 'TOA composite')\nMap.addLayer(customComposite, {'bands': ['B4', 'B3', 'B2'], 'max': 128},\n 'Custom TOA composite')\n\n",
"_____no_output_____"
]
],
[
[
"## Display Earth Engine data layers ",
"_____no_output_____"
]
],
[
[
"Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.\nMap",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec6355b5d0419f260544113d4d1707d468d2b274 | 19,759 | ipynb | Jupyter Notebook | notebooks/scratch/.ipynb_checkpoints/fst_prototype-checkpoint.ipynb | AI-sandbox/hyperLAI | 49f1a9d3c645ee0e5b0c2ed16d54ee8df0626689 | [
"MIT"
]
| null | null | null | notebooks/scratch/.ipynb_checkpoints/fst_prototype-checkpoint.ipynb | AI-sandbox/hyperLAI | 49f1a9d3c645ee0e5b0c2ed16d54ee8df0626689 | [
"MIT"
]
| null | null | null | notebooks/scratch/.ipynb_checkpoints/fst_prototype-checkpoint.ipynb | AI-sandbox/hyperLAI | 49f1a9d3c645ee0e5b0c2ed16d54ee8df0626689 | [
"MIT"
]
| null | null | null | 26.62938 | 160 | 0.433322 | [
[
[
"import sys\nsys.path.append(\"../../../\")\nsys.path.append(\"../../hyperLAI/\")\nsys.path.append(\"../../../libraries/\")\nfrom utils.model_utils import *\nfrom models.fc_model import fc_model\nfrom features.hyperLAIdataset import HyperLoader\nfrom sklearn.feature_selection import VarianceThreshold\nimport torch\nfrom torch.utils import data\nfrom features.hyperLAIdataset import HyperLoader\nimport numpy as np\nimport pandas as pd\nimport json",
"_____no_output_____"
],
[
"data_dir = \"/scratch/users/patelas/hyperLAI/snp_data/whole_genome/variance_filtered_500000_updated/\"\ntrain_inds = np.load(\"/scratch/users/patelas/hyperLAI/ancestry_training_splits/80_10_10/train_indices.npy\")\nvalid_inds = np.load(\"/scratch/users/patelas/hyperLAI/ancestry_training_splits/80_10_10/valid_indices.npy\")\ntest_inds = np.load(\"/scratch/users/patelas/hyperLAI/ancestry_training_splits/80_10_10/test_indices.npy\")\nall_inds = np.sort(np.concatenate([train_inds, valid_inds, test_inds]))\nprint(all_inds[0], all_inds[-1])\n\n#Create the dataset\ndataset = HyperLoader(data_dir, all_inds, [0,1,2,3,4,5,6], \"all\")\n",
"_____no_output_____"
],
[
"train_snps = dataset.snps[train_inds]\ntrain_labels = dataset.suppop_labels[train_inds]",
"_____no_output_____"
],
[
"overall_vars = np.var(train_snps, axis=0)",
"_____no_output_____"
],
[
"labels_group = pd.DataFrame(train_snps).groupby(train_labels)",
"_____no_output_____"
],
[
"pop_vars = labels_group.apply(np.var)",
"_____no_output_____"
],
[
"pop_freqs = labels_group.count()[0] / train_snps.shape[0]",
"_____no_output_____"
],
[
"weighted_sum_vars = pop_freqs.values @ pop_vars.values",
"_____no_output_____"
],
[
"fst_vals = (overall_vars - weighted_sum_vars) / overall_vars",
"_____no_output_____"
],
[
"def fst_filter(snp_data, indices, labels, snps_to_keep):\n ind_snps = snp_data[indices]\n ind_labels = labels[indices]\n overall_vars = np.var(ind_snps, axis=0)\n labels_group = pd.DataFrame(ind_snps).groupby(ind_labels)\n pop_vars = labels_group.apply(np.var)\n pop_freqs = labels_group.count()[0] / ind_snps.shape[0]\n weighted_sum_vars = pop_freqs.values @ pop_vars.values\n fst_vals = (overall_vars - weighted_sum_vars) / overall_vars\n snps_preserved = np.argsort(fst_vals)[::-1][0:snps_to_keep]\n return snps_preserved",
"_____no_output_____"
],
[
"fs_out = fst_filter(dataset.snps, train_inds, dataset.suppop_labels, 20)",
"_____no_output_____"
],
[
"fs_out",
"_____no_output_____"
],
[
"fst_vals",
"_____no_output_____"
],
[
"(pd.DataFrame(train_snps).loc[train_labels == 0]).apply(np.var)",
"_____no_output_____"
],
[
"pop_vars",
"_____no_output_____"
],
[
"pop_freqs",
"_____no_output_____"
],
[
"0.196669 * 0.245368 + 0.261383 * 0.246243 + 0.028879 * 0.179019 + 0.233558 * 0.225339 + 0.218381 * 0.124509 + 0.011172 * 0.234959 +0.049958 * 0.220798 \n\n\n\n\n",
"_____no_output_____"
],
[
"weighted_sum_vars",
"_____no_output_____"
],
[
"len(train_snps)",
"_____no_output_____"
],
[
"pd.DataFrame(train_snps).sum(0)",
"_____no_output_____"
],
[
"2329*(4744-2329) / 4744**2",
"_____no_output_____"
],
[
"overall_vars",
"_____no_output_____"
],
[
"np.mean([4,7,3,5])",
"_____no_output_____"
],
[
"np.var([4,7,3,5])",
"_____no_output_____"
],
[
"mn = 1/4.75",
"_____no_output_____"
],
[
"(1-mn)/(mn**2)",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec6356ed68b152bb62b0c5c03218af3315ed53ab | 9,207 | ipynb | Jupyter Notebook | HelperProjects/Convolutional-Neural-Networks/cnn-in-tensorflow/convolutional-layer-workspace.ipynb | luk6xff/SelfDrivingCarND | 1ad0a203f3c1ebd8ee3c114d8efc0d0cf99ddc42 | [
"MIT"
]
| 5 | 2019-06-05T18:32:55.000Z | 2020-05-29T07:41:24.000Z | HelperProjects/Convolutional-Neural-Networks/cnn-in-tensorflow/convolutional-layer-workspace.ipynb | luk6xff/SelfDrivingCarND | 1ad0a203f3c1ebd8ee3c114d8efc0d0cf99ddc42 | [
"MIT"
]
| null | null | null | HelperProjects/Convolutional-Neural-Networks/cnn-in-tensorflow/convolutional-layer-workspace.ipynb | luk6xff/SelfDrivingCarND | 1ad0a203f3c1ebd8ee3c114d8efc0d0cf99ddc42 | [
"MIT"
]
| 1 | 2019-06-05T18:33:08.000Z | 2019-06-05T18:33:08.000Z | 36.828 | 387 | 0.539155 | [
[
[
"## Using Convolution Layers in TensorFlow\nLet's now apply what we've learned to build real CNNs in TensorFlow. In the below exercise, you'll be asked to set up the dimensions of the Convolution filters, the weights, the biases. This is in many ways the trickiest part to using CNNs in TensorFlow. Once you have a sense of how to set up the dimensions of these attributes, applying CNNs will be far more straight forward.\n\nReview\nYou should go over the TensorFlow documentation for 2D convolutions. Most of the documentation is straightforward, except perhaps the ```padding``` argument. The padding might differ depending on whether you pass ```'VALID'``` or ```'SAME'```.\n\nHere are a few more things worth reviewing:\n\nIntroduction to TensorFlow -> TensorFlow Variables.\nHow to determine the dimensions of the output based on the input size and the filter size (shown below). You'll use this to determine what the size of your filter should be.\n* ```new_height = (input_height - filter_height + 2 * P)/S + 1```\n* ```new_width = (input_width - filter_width + 2 * P)/S + 1```\n* Instructions:\nFinish off each `TODO` in the `conv2d` function.\nSetup the `strides`, `padding` and filter weight/bias (`F_w` and `F_b`) such that the output shape is ```(1, 2, 2, 3)```. Note that all of these except `strides` should be TensorFlow variables.",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nimport numpy as np",
"_____no_output_____"
],
[
"\"\"\"\nSetup the strides, padding and filter weight/bias such that\nthe output shape is (1, 2, 2, 3).\n\"\"\"\n# `tf.nn.conv2d` requires the input be 4D (batch_size, height, width, depth)\n# (1, 4, 4, 1)\nx = np.array([\n [0, 1, 0.5, 10],\n [2, 2.5, 1, -8],\n [4, 0, 5, 6],\n [15, 1, 2, 3]], dtype=np.float32).reshape((1, 4, 4, 1))\nX = tf.constant(x)\n\ndef conv2d(input_array):\n # Filter (weights and bias)\n # The shape of the filter weight is (height, width, input_depth, output_depth)\n # The shape of the filter bias is (output_depth,)\n # TODO: Define the filter weights `F_W` and filter bias `F_b`.\n # NOTE: Remember to wrap them in `tf.Variable`, they are trainable parameters after all.\n F_W = tf.Variable(tf.truncated_normal((2, 2, 1, 3)))\n F_b = tf.Variable(tf.zeros(3))\n # TODO: Set the stride for each dimension (batch_size, height, width, depth)\n strides = [1, 2, 2, 1]\n # TODO: set the padding, either 'VALID' or 'SAME'.\n padding = 'VALID'\n # https://www.tensorflow.org/versions/r0.11/api_docs/python/nn.html#conv2d\n # `tf.nn.conv2d` does not include the bias computation so we have to add it ourselves after.\n return tf.nn.conv2d(input_array, F_W, strides, padding) + F_b\n\noutput = conv2d(X)\noutput",
"_____no_output_____"
],
[
"##### Do Not Modify ######\n\nimport tensorflow as tf\nimport numpy as np\nimport json\n\n\ndef solution(input_array):\n # Filter (weights and bias)\n F_W = tf.Variable(tf.truncated_normal((2, 2, 1, 3)))\n F_b = tf.Variable(tf.zeros(3))\n strides = [1, 2, 2, 1]\n padding = 'VALID'\n return tf.nn.conv2d(input_array, F_W, strides, padding) + F_b\n\ndef get_result(input_array, student_func):\n \n result = {'is_correct': None, 'error': False, 'values': [], 'output': '', 'custom_msg': ''}\n ours = solution(input_array)\n theirs = student_func(input_array)\n\n dim_names = ['Batch', 'Height', 'Width', 'Depth']\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n our_shape = ours.get_shape().as_list()\n their_shape = theirs.get_shape().as_list()\n\n did_pass = False\n\n try:\n for dn, ov, tv in zip(dim_names, our_shape, their_shape):\n if ov != tv:\n # dimension mismatch\n raise Exception('{} dimension: mismatch we have {}, you have {}'.format(dn, ov, tv))\n if np.alltrue(our_shape == their_shape):\n did_pass = True\n else:\n did_pass = False\n except:\n did_pass = False\n\n if did_pass:\n result['is_correct'] = 'Great Job!'\n result['values'] = ['your output shape: {}'.format(their_shape)]\n else:\n result['values'] = ['correct shape: {}'.format(our_shape)]\n result['output'] = ['your output shape: {}'.format(their_shape)]\n\n return result\n \ndef run_grader(input_array, student_func):\n \n grader_result = get_result(input_array, student_func)\n gt_shape = grader_result.get('values')\n student_func_shape = grader_result.get('output')\n comment = \"\"\n\n if grader_result['is_correct']:\n comment= \"Great job! Your Convolution layer looks good :)\"\n elif not grader_result['error']:\n comment = \"Not quite. The correct output shape is {} while your output shape is {}.\".format(gt_shape, student_func_shape)\n else:\n test_error = grader_result['error']\n comment = \"Something went wrong with your submission: {}\".format(test_error)\n\n grader_result['feedback'] = comment\n \n return grader_result.get('feedback')\n\ntest_X = tf.constant(np.random.randn(1, 4, 4, 1), dtype=tf.float32)\n\ntry:\n response = run_grader(test_X, conv2d)\n print(response)\n \n \nexcept Exception as err:\n print(str(err))\n ",
"Great job! Your Convolution layer looks good :)\n"
]
],
[
[
"## SOLUTION\n\n\n```python\ndef conv2d(input):\n # Filter (weights and bias)\n F_W = tf.Variable(tf.truncated_normal((2, 2, 1, 3)))\n F_b = tf.Variable(tf.zeros(3))\n strides = [1, 2, 2, 1]\n padding = 'VALID'\n return tf.nn.conv2d(input, F_W, strides, padding) + F_b\n```\nI want to transform the input shape ```(1, 4, 4, 1)``` to ```(1, 2, 2, 3)```. I choose ```'VALID'``` for the padding algorithm. I find it simpler to understand and it achieves the result I'm looking for.\n\n```python\nout_height = ceil(float(in_height - filter_height + 1) / float(strides[1]))\nout_width = ceil(float(in_width - filter_width + 1) / float(strides[2]))\n```\nPlugging in the values:\n\n```python\nout_height = ceil(float(4 - 2 + 1) / float(2)) = ceil(1.5) = 2\nout_width = ceil(float(4 - 2 + 1) / float(2)) = ceil(1.5) = 2\n```\nIn order to change the depth from 1 to 3, I have to set the output depth of my filter appropriately:\n\n```python\nF_W = tf.Variable(tf.truncated_normal((2, 2, 1, 3))) # (height, width, input_depth, output_depth)\nF_b = tf.Variable(tf.zeros(3)) # (output_depth)\nThe input has a depth of 1, so I set that as the input_depth of the filter.\n```",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
]
|
ec635aa365da81a075aa7fbe2820fd4caae3c9d3 | 4,941 | ipynb | Jupyter Notebook | Jupyter/cpp/variant.ipynb | tedi21/SisypheReview | f7c05bad1ccc036f45870535149d9685e1120c2c | [
"Unlicense"
]
| null | null | null | Jupyter/cpp/variant.ipynb | tedi21/SisypheReview | f7c05bad1ccc036f45870535149d9685e1120c2c | [
"Unlicense"
]
| null | null | null | Jupyter/cpp/variant.ipynb | tedi21/SisypheReview | f7c05bad1ccc036f45870535149d9685e1120c2c | [
"Unlicense"
]
| null | null | null | 23.641148 | 131 | 0.488767 | [
[
[
"empty"
]
]
]
| [
"empty"
]
| [
[
"empty"
]
]
|
ec6363506a74fa231fa0586b15df99300729efc6 | 658,363 | ipynb | Jupyter Notebook | notebooks/zeisel/Zeisel_SGBM_sqrt.ipynb | redst4r/arboreto | 3ff7b6f987b32e5774771751dea646fa6feaaa52 | [
"BSD-3-Clause"
]
| 20 | 2018-06-28T07:00:47.000Z | 2020-10-08T08:58:22.000Z | notebooks/zeisel/Zeisel_SGBM_sqrt.ipynb | redst4r/arboreto | 3ff7b6f987b32e5774771751dea646fa6feaaa52 | [
"BSD-3-Clause"
]
| 23 | 2018-06-06T13:11:20.000Z | 2021-01-08T03:37:43.000Z | notebooks/zeisel/Zeisel_SGBM_sqrt.ipynb | redst4r/arboreto | 3ff7b6f987b32e5774771751dea646fa6feaaa52 | [
"BSD-3-Clause"
]
| 15 | 2018-11-21T08:21:46.000Z | 2020-11-25T06:28:32.000Z | 567.55431 | 238,530 | 0.930058 | [
[
[
"# Zeisel GRN Inference and Analysis",
"_____no_output_____"
],
[
"## 0. Import dependencies",
"_____no_output_____"
]
],
[
[
"import os\nimport sys\nsys.path.append('../../')\n\nfrom arboreto.core import *\nfrom arboreto.utils import *\n\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"## 1. Load the data (outside the scope of the arboreto API)",
"_____no_output_____"
]
],
[
[
"zeisel_ex_path = '/media/tmo/data/work/datasets/zeisel/expression_sara_filtered.txt'\nzeisel_tf_path = '/media/tmo/data/work/datasets/TF/mm9_TFs.txt'",
"_____no_output_____"
],
[
"def clean_transpose(df, numeric_type=np.float32):\n t = df.T\n new_header = t.iloc[0]\n t = t[1:]\n t.columns = new_header\n return t.astype(numeric_type)",
"_____no_output_____"
],
[
"zeisel_df = pd.read_csv(zeisel_ex_path, sep='\\t')",
"_____no_output_____"
],
[
"zeisel_df.shape",
"_____no_output_____"
],
[
"zeisel_df.head()",
"_____no_output_____"
],
[
"zeisel_df_T = clean_transpose(zeisel_df)",
"_____no_output_____"
],
[
"zeisel_gene_names = list(zeisel_df_T.columns)",
"_____no_output_____"
],
[
"zeisel_ex_matrix = zeisel_df_T.as_matrix()",
"_____no_output_____"
],
[
"zeisel_tf_names = load_tf_names(zeisel_tf_path)",
"_____no_output_____"
]
],
[
[
"## 2. Initialize Dask client",
"_____no_output_____"
]
],
[
[
"from dask.distributed import Client, LocalCluster",
"_____no_output_____"
],
[
"client = Client(LocalCluster(memory_limit=8e9))",
"_____no_output_____"
],
[
"client",
"_____no_output_____"
]
],
[
[
"If you work remotely, use port forwarding to view the dashboard:\n\n```bash\n$ ssh -L 8000:localhost:8787 nostromo\n```",
"_____no_output_____"
]
],
[
[
"client.shutdown()",
"_____no_output_____"
]
],
[
[
"## 3. Compute GRN inference graph",
"_____no_output_____"
]
],
[
[
"SGBM_sqrt_KWARGS = {\n 'learning_rate': 0.01,\n 'max_features': 'sqrt',\n 'n_estimators': 5000,\n 'subsample': 0.9}",
"_____no_output_____"
]
],
[
[
"#### Create the dask computation graphs",
"_____no_output_____"
]
],
[
[
"%%time\nnetwork_graph, meta_graph = create_graph(zeisel_ex_matrix,\n zeisel_gene_names,\n zeisel_tf_names,\n \"GBM\",\n SGBM_sqrt_KWARGS,\n target_genes='all',\n early_stop_window_length=25,\n include_meta=True)",
"CPU times: user 7.04 s, sys: 624 ms, total: 7.67 s\nWall time: 7.08 s\n"
]
],
[
[
"#### Persist the distributed DataFrames\n\n* returns futures, i.e. not yet reified references to objects",
"_____no_output_____"
]
],
[
[
"%%time\na, b = client.persist([network_graph, meta_graph])",
"CPU times: user 11.6 s, sys: 336 ms, total: 12 s\nWall time: 11.9 s\n"
]
],
[
[
"#### Compute results",
"_____no_output_____"
]
],
[
[
"%%time\nnetwork_df = a.compute(sync=True)",
"CPU times: user 4min 32s, sys: 2min 34s, total: 7min 6s\nWall time: 8min 7s\n"
],
[
"%%time\nmeta_df = b.compute(sync=True)",
"CPU times: user 15.2 s, sys: 1.49 s, total: 16.7 s\nWall time: 16 s\n"
]
],
[
[
"## 4. Save full and top_100k networks to file",
"_____no_output_____"
]
],
[
[
"len(network_df)",
"_____no_output_____"
],
[
"len(meta_df)",
"_____no_output_____"
],
[
"network_df.sort_values(by='importance', ascending=0).to_csv('zeisel_sgbm_sqrt_all.txt', index=False, sep='\\t')",
"_____no_output_____"
],
[
"top_100k = network_df.nlargest(100000, columns=['importance'])",
"_____no_output_____"
],
[
"top_100k.to_csv('zeisel_sgbm_sqrt_100k.txt', index=False, sep='\\t')",
"_____no_output_____"
],
[
"merged_df = top_100k.merge(meta_df, on='target')",
"_____no_output_____"
],
[
"merged_df.head()",
"_____no_output_____"
],
[
"merged_df['imp2'] = merged_df['importance'] / merged_df['n_estimators']",
"_____no_output_____"
],
[
"top_100k.plot(use_index=0, figsize=(16,9))\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Distribution of nr of boosting rounds per regression",
"_____no_output_____"
]
],
[
[
"meta_df.hist(bins=100, figsize=(20, 9), log=0)\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Plot the maximum variable importance (sklearn default) vs. nr of boosting rounds\n\n* **!= the formula in Arboreto**\n* Using the sklearn default variable importances which normalizes regressions by dividing by nr of trees in the ensemble.\n* Effect is that regressions with few trees also deliver high feature importances (aka network links), this is undesirable.\n* In Arboreto, we omit this normalization step to make use of the nr of trees as a heuristic indicator of how much *signal* there is in a regression.",
"_____no_output_____"
]
],
[
[
"max_imp2_by_rounds =\\\nmeta_df.merge(merged_df.groupby(['target'])['imp2'].nlargest(1).reset_index(), \n how='left', \n on=['target'])\n\nmax_imp2_by_rounds.plot.scatter(x='n_estimators', y='imp2', figsize=(16, 9))\nplt.show()",
"_____no_output_____"
],
[
"max_imp2_by_rounds.plot.hexbin(x='n_estimators', \n y='imp2', \n bins='log', \n cmap='inferno',\n figsize=(16, 9))\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Plotting corrected feature importance (Arboreto SGBM default) vs. nr of boosting rounds",
"_____no_output_____"
]
],
[
[
"max_imp_by_rounds =\\\nmeta_df.merge(network_df.groupby(['target'])['importance'].nlargest(1).reset_index(), \n how='left', \n on=['target'])\n\nmax_imp_by_rounds.plot.scatter(x='n_estimators', y='importance', figsize=(16, 9))\nplt.show()",
"_____no_output_____"
],
[
"max_imp_by_rounds.plot.hexbin(x='n_estimators', \n bins='log',\n cmap='inferno',\n y='importance',\n figsize=(16, 9))\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Links in common with GENIE3",
"_____no_output_____"
]
],
[
[
"z_genie3 = pd.read_csv('/media/tmo/data/work/datasets/benchmarks/genie3/zeisel/zeisel.filtered.genie3.txt', header=None, sep='\\t')\nz_genie3.columns=['TF', 'target', 'importance']",
"_____no_output_____"
],
[
"inner = z_genie3.merge(top_100k, how='inner', on=['TF', 'target'])",
"_____no_output_____"
],
[
"len(inner)",
"_____no_output_____"
],
[
"inner_50k = z_genie3[:50000].merge(top_100k[:50000], how='inner', on=['TF', 'target'])",
"_____no_output_____"
],
[
"len(inner_50k)",
"_____no_output_____"
],
[
"inner_25k = z_genie3[:25000].merge(top_100k[:25000], how='inner', on=['TF', 'target'])",
"_____no_output_____"
],
[
"len(inner_25k) / 25000",
"_____no_output_____"
],
[
"inner_10k = z_genie3[:10000].merge(top_100k[:10000], how='inner', on=['TF', 'target'])",
"_____no_output_____"
],
[
"len(inner_10k)",
"_____no_output_____"
],
[
"inner_5k = z_genie3[:5000].merge(top_100k[:5000], how='inner', on=['TF', 'target'])",
"_____no_output_____"
],
[
"len(inner_5k)",
"_____no_output_____"
],
[
"top_100k_10pct = pd.read_csv('zeisel_sgbm_100k.txt', sep='\\t')",
"_____no_output_____"
],
[
"inner_sgbms = top_100k_10pct.merge(top_100k, on=['TF', 'target'])",
"_____no_output_____"
],
[
"len(inner_sgbms)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec6385330fa6ec00c75e50a6decdc346fe7bc2fd | 21,828 | ipynb | Jupyter Notebook | Copy_of_tflite_c06_exercise_rock_paper_scissors_solution.ipynb | Chemokoren/AdvancedNetworkUtils | 11db96204aa92b46f7c7b9d5e91f8e24167b2f0e | [
"Apache-2.0"
]
| null | null | null | Copy_of_tflite_c06_exercise_rock_paper_scissors_solution.ipynb | Chemokoren/AdvancedNetworkUtils | 11db96204aa92b46f7c7b9d5e91f8e24167b2f0e | [
"Apache-2.0"
]
| null | null | null | Copy_of_tflite_c06_exercise_rock_paper_scissors_solution.ipynb | Chemokoren/AdvancedNetworkUtils | 11db96204aa92b46f7c7b9d5e91f8e24167b2f0e | [
"Apache-2.0"
]
| null | null | null | 28.056555 | 286 | 0.485157 | [
[
[
"<a href=\"https://colab.research.google.com/github/Chemokoren/AdvancedNetworkUtils/blob/master/Copy_of_tflite_c06_exercise_rock_paper_scissors_solution.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"##### Copyright 2018 The TensorFlow Authors.",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Rock, Paper & Scissors with TensorFlow Hub - TFLite",
"_____no_output_____"
],
[
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_lite/tflite_c06_exercise_rock_paper_scissors_solution.ipynb\">\n <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />\n Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_lite/tflite_c06_exercise_rock_paper_scissors_solution.ipynb\">\n <img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />\n View source on GitHub</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"## Setup",
"_____no_output_____"
]
],
[
[
"import os\n\nimport matplotlib.pylab as plt\nimport numpy as np\n\nimport tensorflow as tf\nimport tensorflow_hub as hub\n\nprint(\"Version: \", tf.__version__)\nprint(\"Eager mode: \", tf.executing_eagerly())\nprint(\"Hub version: \", hub.__version__)\nprint(\"GPU is\", \"available\" if tf.test.is_gpu_available() else \"NOT AVAILABLE\")",
"_____no_output_____"
]
],
[
[
"## Select the Hub/TF2 module to use\n\nHub modules for TF 1.x won't work here, please use one of the selections provided.",
"_____no_output_____"
]
],
[
[
"module_selection = (\"mobilenet_v2\", 224, 1280) #@param [\"(\\\"mobilenet_v2\\\", 224, 1280)\", \"(\\\"inception_v3\\\", 299, 2048)\"] {type:\"raw\", allow-input: true}\nhandle_base, pixels, FV_SIZE = module_selection\nMODULE_HANDLE =\"https://tfhub.dev/google/tf2-preview/{}/feature_vector/4\".format(handle_base)\nIMAGE_SIZE = (pixels, pixels)\nprint(\"Using {} with input size {} and output dimension {}\".format(\n MODULE_HANDLE, IMAGE_SIZE, FV_SIZE))",
"_____no_output_____"
]
],
[
[
"## Data preprocessing",
"_____no_output_____"
],
[
"Use [TensorFlow Datasets](http://tensorflow.org/datasets) to load the rock, paper and scissors dataset.\n\nThis `tfds` package is the easiest way to load pre-defined data. If you have your own data, and are interested in importing using it with TensorFlow see [loading image data](../load_data/images.ipynb)\n",
"_____no_output_____"
]
],
[
[
"import tensorflow_datasets as tfds\ntfds.disable_progress_bar()",
"_____no_output_____"
]
],
[
[
"The `tfds.load` method downloads and caches the data, and returns a `tf.data.Dataset` object. These objects provide powerful, efficient methods for manipulating data and piping it into your model.\n\nSince `\"rock_paper_scissors\"` doesn't define standard splits, use the subsplit feature to divide it into (train, validation, test) with 80%, 10%, 10% of the data respectively.",
"_____no_output_____"
]
],
[
[
"splits = tfds.Split.ALL.subsplit(weighted=(80, 10, 10))\n\nsplits, info = tfds.load('rock_paper_scissors', with_info=True, as_supervised=True, split = splits)\n\n(train_examples, validation_examples, test_examples) = splits\n\nnum_examples = info.splits['train'].num_examples\nnum_classes = info.features['label'].num_classes",
"_____no_output_____"
]
],
[
[
"### Format the Data\n\nUse the `tf.image` module to format the images for the task.\n\nResize the images to a fixes input size, and rescale the input channels",
"_____no_output_____"
]
],
[
[
"def format_image(image, label):\n image = tf.image.resize(image, IMAGE_SIZE) / 255.0\n return image, label\n",
"_____no_output_____"
]
],
[
[
"Now shuffle and batch the data\n",
"_____no_output_____"
]
],
[
[
"BATCH_SIZE = 32 #@param {type:\"integer\"}",
"_____no_output_____"
],
[
"train_batches = train_examples.shuffle(num_examples // 4).batch(BATCH_SIZE).map(format_image).prefetch(1)\nvalidation_batches = validation_examples.batch(BATCH_SIZE).map(format_image).prefetch(1)\ntest_batches = test_examples.batch(1).map(format_image)",
"_____no_output_____"
]
],
[
[
"Inspect a batch",
"_____no_output_____"
]
],
[
[
"for image_batch, label_batch in train_batches.take(1):\n pass\n\nimage_batch.shape",
"_____no_output_____"
]
],
[
[
"## Defining the model\n\nAll it takes is to put a linear classifier on top of the `feature_extractor_layer` with the Hub module.\n\nFor speed, we start out with a non-trainable `feature_extractor_layer`, but you can also enable fine-tuning for greater accuracy.",
"_____no_output_____"
]
],
[
[
"do_fine_tuning = False #@param {type:\"boolean\"}",
"_____no_output_____"
],
[
"print(\"Building model with\", MODULE_HANDLE)\nmodel = tf.keras.Sequential([\n hub.KerasLayer(MODULE_HANDLE,\n input_shape=IMAGE_SIZE + (3, ), \n output_shape=[FV_SIZE],\n trainable=do_fine_tuning),\n tf.keras.layers.Dense(num_classes)\n])\nmodel.summary()",
"_____no_output_____"
]
],
[
[
"## Training the model",
"_____no_output_____"
]
],
[
[
"if do_fine_tuning:\n model.compile(\n optimizer=tf.keras.optimizers.SGD(lr=0.002, momentum=0.9), \n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\nelse:\n model.compile(\n optimizer='adam', \n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])",
"_____no_output_____"
],
[
"EPOCHS = 5\nhist = model.fit(train_batches,\n epochs=EPOCHS,\n validation_data=validation_batches)",
"_____no_output_____"
]
],
[
[
"## Export the model",
"_____no_output_____"
]
],
[
[
"RPS_SAVED_MODEL = \"rps_saved_model\"",
"_____no_output_____"
]
],
[
[
"Export the SavedModel",
"_____no_output_____"
]
],
[
[
"tf.saved_model.save(model, RPS_SAVED_MODEL)",
"_____no_output_____"
],
[
"%%bash -s $RPS_SAVED_MODEL\nsaved_model_cli show --dir $1 --tag_set serve --signature_def serving_default",
"_____no_output_____"
],
[
"loaded = tf.saved_model.load(RPS_SAVED_MODEL)",
"_____no_output_____"
],
[
"print(list(loaded.signatures.keys()))\ninfer = loaded.signatures[\"serving_default\"]\nprint(infer.structured_input_signature)\nprint(infer.structured_outputs)",
"_____no_output_____"
]
],
[
[
"## Convert with TFLiteConverter",
"_____no_output_____"
]
],
[
[
"converter = tf.lite.TFLiteConverter.from_saved_model(RPS_SAVED_MODEL)\nconverter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]\n\n\ntflite_model = converter.convert()\nwith open(\"converted_model.tflite\", \"wb\") as f:\n f.write(tflite_model)",
"_____no_output_____"
]
],
[
[
"Test the TFLite model using the Python Interpreter",
"_____no_output_____"
]
],
[
[
"# Load TFLite model and allocate tensors.\ntflite_model_file = 'converted_model.tflite'\nwith open(tflite_model_file, 'rb') as fid:\n tflite_model = fid.read()\n \ninterpreter = tf.lite.Interpreter(model_content=tflite_model)\ninterpreter.allocate_tensors()\n\ninput_index = interpreter.get_input_details()[0][\"index\"]\noutput_index = interpreter.get_output_details()[0][\"index\"]",
"_____no_output_____"
],
[
"from tqdm import tqdm\n\n# Gather results for the randomly sampled test images\npredictions = []\n\ntest_labels, test_imgs = [], []\nfor img, label in tqdm(test_batches.take(10)):\n interpreter.set_tensor(input_index, img)\n interpreter.invoke()\n predictions.append(interpreter.get_tensor(output_index))\n \n test_labels.append(label.numpy()[0])\n test_imgs.append(img)",
"_____no_output_____"
],
[
"#@title Utility functions for plotting\n# Utilities for plotting\n\nclass_names = ['rock', 'paper', 'scissors']\n\ndef plot_image(i, predictions_array, true_label, img):\n predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n \n img = np.squeeze(img)\n\n plt.imshow(img, cmap=plt.cm.binary)\n\n predicted_label = np.argmax(predictions_array)\n print(type(predicted_label), type(true_label))\n if predicted_label == true_label:\n color = 'green'\n else:\n color = 'red'\n \n plt.xlabel(\"{} {:2.0f}% ({})\".format(class_names[predicted_label],\n 100*np.max(predictions_array),\n class_names[true_label]),\n color=color)\n",
"_____no_output_____"
],
[
"#@title Visualize the outputs { run: \"auto\" }\nindex = 0 #@param {type:\"slider\", min:0, max:9, step:1}\nplt.figure(figsize=(6,3))\nplt.subplot(1,2,1)\nplot_image(index, predictions, test_labels, test_imgs)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Download the model\n\n**NOTE: You might have to run to the cell below twice**",
"_____no_output_____"
]
],
[
[
"with open('labels.txt', 'w') as f:\n f.write('\\n'.join(class_names))\n\ntry:\n from google.colab import files\n files.download('converted_model.tflite')\n files.download('labels.txt')\nexcept:\n pass",
"_____no_output_____"
]
],
[
[
"# Prepare the test images for download (Optional)",
"_____no_output_____"
],
[
"This part involves downloading additional test images for the Mobile Apps only in case you need to try out more samples",
"_____no_output_____"
]
],
[
[
"!mkdir -p test_images",
"_____no_output_____"
],
[
"from PIL import Image\n\nfor index, (image, label) in enumerate(test_batches.take(50)):\n image = tf.cast(image * 255.0, tf.uint8)\n image = tf.squeeze(image).numpy()\n pil_image = Image.fromarray(image)\n pil_image.save('test_images/{}_{}.jpg'.format(class_names[label[0]], index))",
"_____no_output_____"
],
[
"!ls test_images",
"_____no_output_____"
],
[
"!zip -qq rps_test_images.zip -r test_images/",
"_____no_output_____"
],
[
"try:\n files.download('rps_test_images.zip')\nexcept:\n pass",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
]
|
ec63877c2b57dae2a376fb494868b84cefdceda1 | 17,709 | ipynb | Jupyter Notebook | 01_Data Preprocessing/01_load_data_ir.ipynb | yjik122/final_project | 7e908893548d11ea216f1337263e4a122420b500 | [
"Unlicense"
]
| 1 | 2020-12-31T17:26:03.000Z | 2020-12-31T17:26:03.000Z | 01_Data Preprocessing/01_load_data_ir.ipynb | yjik122/final_project | 7e908893548d11ea216f1337263e4a122420b500 | [
"Unlicense"
]
| null | null | null | 01_Data Preprocessing/01_load_data_ir.ipynb | yjik122/final_project | 7e908893548d11ea216f1337263e4a122420b500 | [
"Unlicense"
]
| 1 | 2022-02-09T05:58:28.000Z | 2022-02-09T05:58:28.000Z | 60.030508 | 6,178 | 0.471399 | [
[
[
"## Interest Rate",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
]
],
[
[
"- **path_ir**: path of the interest rate file for an individual country (the data can only be downloaded one country at a time)\n- **country_code**: country code used throughout (refer to README)\n- **fred_code**: alphanumeric code given for each dataset, for each country by the FRED website",
"_____no_output_____"
]
],
[
[
"def get_interest_rate(path_ir, country_code, fred_code):\n\n df_ir = pd.read_csv(path_ir)\n df_ir['DATE'] = pd.to_datetime(df_ir['DATE'])\n df_ir['month'] = df_ir['DATE'].dt.month\n df_ir['year'] = df_ir['DATE'].dt.year\n df_ir = df_ir.rename(columns={fred_code: f'{country_code}_IR', 'DATE': 'index'})\n df_ir[f'{country_code}_IR'] = df_ir[f'{country_code}_IR']/100\n \n return df_ir",
"_____no_output_____"
],
[
"def append_df(df, df_ir):\n \n df_ir.drop(columns='index', inplace=True)\n df = pd.merge(df, df_ir, left_on=['month', 'year'], right_on=['month', 'year'])\n\n return df",
"_____no_output_____"
],
[
"def data_combine(path, df_ir):\n \n exchange_df = pd.read_csv(path) \n print(exchange_df.shape)\n\n df_with_ir = pd.merge(exchange_df, df_ir, left_on=['month', 'year'], right_on=['month', 'year'])\n df_with_ir.drop(columns='index', inplace=True)\n\n return df_with_ir",
"_____no_output_____"
],
[
"#replace country_code & fred_code values with values for the chosen interest rate file \n\npath_ir = '<path to interest rate file>'\ncountry_code = 'USD' \nfred_code = 'IRSTCI01USM156N' ",
"_____no_output_____"
],
[
"df_ir = get_interest_rate(path_ir, country_code, fred_code)\ndf_ir",
"_____no_output_____"
]
],
[
[
"As the data can only be downloaded one country at a time, we have to manually merge the interest rate values into one dataframe. In order to do so, a copy of the first 'df_ir' is made and set aside as the dataframe (df) to which the other interest rate values will be appended to. From the second 'df_ir' onward, we will be appending the values to 'df' to get a dataset that only contains the interest rate values.",
"_____no_output_____"
]
],
[
[
"#use only for the first instance, comment out afterwards\ndf = df_ir.copy()",
"_____no_output_____"
],
[
"#start using from the second instance\ndf = append_df(df, df_ir)",
"_____no_output_____"
],
[
"df #240 rows",
"_____no_output_____"
],
[
"df.to_csv('<path to save interest rate dataset>', index=False)",
"_____no_output_____"
]
],
[
[
"- **forex_path**: path of the dataset with exchange rates",
"_____no_output_____"
]
],
[
[
"forex_path = '<path of main dataset>'",
"_____no_output_____"
],
[
"df_with_ir = data_combine(forex_path, df)\nprint(df_with_ir.shape) #4997 rows\nprint(df_with_ir.isna().sum())",
"(4997, 21)\n(4997, 37)\nTime Series 0\nAUD_USD 0\nNZD_USD 0\nGBP_USD 0\nBRL_USD 0\nCND_USD 0\nCNY_USD 0\nIDR_USD 0\nKRW_USD 0\nMXN_USD 0\nZAR_USD 0\nDKK_USD 0\nJPY_USD 0\nNOK_USD 0\nSEK_USD 0\nCHF_USD 0\nmonth 0\nyear 0\nUSD_USD 0\nprice_gold 0\nfc_year 0\nAUD_IR 0\nNZD_IR 0\nGBP_IR 0\nBRL_IR 0\nCND_IR 0\nCNY_IR 0\nIDR_IR 0\nKRW_IR 0\nMXN_IR 0\nZAR_IR 0\nDKK_IR 0\nJPY_IR 0\nNOK_IR 0\nSEK_IR 0\nCHF_IR 0\nUSD_IR 0\ndtype: int64\n"
],
[
"df_with_ir.to_csv('<path to save the new main dataset 1>', index=False)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
]
|
ec639329e3be70be210ffd0289d9aeae74377737 | 35,413 | ipynb | Jupyter Notebook | 06_runner2.ipynb | ftm624/fastai_nbs | e567edefbad666c06d929558cdb3a58d6e65f395 | [
"Apache-2.0"
]
| null | null | null | 06_runner2.ipynb | ftm624/fastai_nbs | e567edefbad666c06d929558cdb3a58d6e65f395 | [
"Apache-2.0"
]
| null | null | null | 06_runner2.ipynb | ftm624/fastai_nbs | e567edefbad666c06d929558cdb3a58d6e65f395 | [
"Apache-2.0"
]
| null | null | null | 55.944708 | 11,400 | 0.758676 | [
[
[
"%reload_ext autoreload\n%autoreload 2\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"# Runner v2.0\n> Exception Control Flow\n\nWe're now going to rework our Runner. ",
"_____no_output_____"
]
],
[
[
"#export\n\nfrom exp.nb_05 import *\n\n# import torch.nn.functional as F\n# import torch.nn as nn\n# import torch.optim as optim",
"_____no_output_____"
]
],
[
[
"## Get Data",
"_____no_output_____"
]
],
[
[
"x_train,y_train,x_valid,y_valid = get_data()\ntrain_ds,valid_ds = Dataset(x_train, y_train),Dataset(x_valid, y_valid)\nnh,bs = 50,512\nc = y_train.max().item()+1\nloss_func = F.cross_entropy",
"_____no_output_____"
],
[
"data = DataBunch(*get_dls(train_ds, valid_ds, bs), c)",
"_____no_output_____"
]
],
[
[
"## Callbacks",
"_____no_output_____"
],
[
"Previously, our `Callback` parent class was ",
"_____no_output_____"
]
],
[
[
"#export \nclass Callback():\n _order = 0\n def set_runner(self, run): self.run = run\n\n def __getattr__(self, k): return getattr(self.run, k)\n\n @property\n def name(self):\n name = re.sub(r'Callback$', '', self.__class__.__name__)\n return camel2snake(name or \"callback\")\n \n # new to Runner 2.0\n def __call__(self, cb_name):\n cb = getattr(self, cb_name, None)\n if cb and cb(): return True\n return False\n ",
"_____no_output_____"
]
],
[
[
"We're not altering the code for the `TrainEvalCallback` but now it is inheriting from our modified `Callback` parent",
"_____no_output_____"
]
],
[
[
"#export \nclass TrainEvalCallback(Callback):\n def begin_fit(self):\n self.run.n_epochs = 0.\n self.run.n_iter = 0\n\n def after_batch(self):\n if not self.in_train:\n return\n self.run.n_epochs += 1./self.iters\n self.run.n_iter += 1\n\n def begin_epoch(self):\n self.run.n_epochs = self.epoch\n self.model.train()\n self.run.in_train = True\n\n def begin_validate(self):\n self.model.eval()\n self.run.in_train = False",
"_____no_output_____"
]
],
[
[
"We will define three types of Exceptions that our new Runner will use to control the training loop:",
"_____no_output_____"
]
],
[
[
"#export\nclass CancelTrainException(Exception): pass\nclass CancelEpochException(Exception): pass\nclass CancelBatchException(Exception): pass",
"_____no_output_____"
]
],
[
[
"## Runner",
"_____no_output_____"
],
[
"We're going to remove all of the `if self('some_method')` and instead just call our callbacks:",
"_____no_output_____"
]
],
[
[
"#export\nclass Runner():\n def __init__(self, cbs=None, cb_funcs=None):\n cbs = listify(cbs)\n for cbf in listify(cb_funcs):\n cb = cbf()\n setattr(self, cb.name, cb)\n cbs.append(cb)\n self.stop = False\n self.cbs = [TrainEvalCallback()]+cbs\n\n @property\n def opt(self): return self.learn.opt\n @property\n def model(self): return self.learn.model\n @property\n def loss_func(self):return self.learn.loss_func\n @property\n def data(self):return self.learn.data\n\n def one_batch(self, xb, yb):\n try:\n self.xb, self.yb = xb, yb\n self('begin_batch')\n self.pred = self.model(self.xb)\n self('after_pred')\n self.loss = self.loss_func(self.pred, self.yb)\n self('after_loss')\n if not self.in_train: return # exits if in validation mode\n self.loss.backward()\n self('after_backward')\n self.opt.step()\n self('after_step')\n self.opt.zero_grad()\n except CancelBatchException: self('after_cancel_batch')\n finally: self('after_batch')\n\n def all_batches(self, dl):\n self.iters = len(dl)\n try:\n for xb, yb in dl: self.one_batch(xb, yb)\n except: CancelEpochException: self('after_cancel_epoch')\n\n def fit(self, epochs, learn):\n self.epochs = epochs\n self.learn = learn\n self.loss = tensor(0.)\n\n try:\n for cb in self.cbs: cb.set_runner(self) # passes self as the runner object to each callback\n self(\"begin_fit\")\n \n for epoch in range(epochs):\n self.epoch = epoch\n if not self('begin_epoch'): self.all_batches(self.data.train_dl)\n\n with torch.no_grad():\n if not self('begin_validate'):self.all_batches(self.data.valid_dl)\n self('after_epoch')\n\n except: CancelTrainException: self('after_cancel_train')\n finally:\n self('after_fit')\n self.learn = None\n\n def __call__(self, cb_name):\n res = False\n for cb in sorted(self.cbs, key=lambda x: x._order):\n res = cb(cb_name) or res\n return res",
"_____no_output_____"
]
],
[
[
"## Other Callbacks",
"_____no_output_____"
],
[
"### TestCallback",
"_____no_output_____"
],
[
"We now have the ability to completely kill the training in mid-epoch if we reach a certain condition.\n\nThis is done by simply raising the `CancelTrainException`",
"_____no_output_____"
]
],
[
[
"class TestCallback(Callback):\n _order=1\n def after_step(self):\n print(self.n_iter)\n if self.n_iter >= 10: raise CancelTrainException()",
"_____no_output_____"
],
[
"learn = create_learner(get_model, loss_func, data)",
"_____no_output_____"
],
[
"run = Runner(TestCallback())",
"_____no_output_____"
],
[
"run.fit(1, learn)",
"0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n"
]
],
[
[
"### Recorder",
"_____no_output_____"
],
[
"Adding in the recording lrs for multiple param groups. And a new plot function that plots the losses against the lrs. ",
"_____no_output_____"
]
],
[
[
"#export\nclass Recorder(Callback):\n def begin_fit(self):\n self.losses = []\n self.lrs = [[] for _ in self.opt.param_groups]\n\n def after_step(self):\n if not self.in_train: return \n for pg,lr in zip(self.opt.param_groups, self.lrs): lr.append(pg['lr'])\n self.losses.append(self.loss.detach().cpu())\n \n def plot_losses(self, skip_last=0):\n plt.plot(self.losses[:len(self.losses)-slip_last])\n\n def plot_lr(self, pgid=-1):\n plt.plot(self.lrs[pgid])\n \n def plot(self, skip_last=0, pgid=-1):\n losses = [o.item() for o in self.losses]\n lrs = self.lrs[pgid]\n n = len(losses)-skip_last\n plt.xscale('log')\n plt.plot(lrs[:n], losses[:n])",
"_____no_output_____"
]
],
[
[
"### Param Scheduler",
"_____no_output_____"
],
[
"Again adding functionality to deal with multiple param groups.",
"_____no_output_____"
]
],
[
[
"#export\nclass ParamScheduler(Callback):\n _order = 1\n \n def __init__(self, pname, sched_funcs):\n self.pname = pname\n self.sched_funcs = sched_funcs\n \n def begin_fit(self):\n if not isinstance(self.sched_funcs, (list, tuple)):\n self.sched_funcs = [self.sched_funcs] * len(self.opt.param_groups)\n \n def set_param(self):\n assert len(self.opt.param_groups)==len(self.sched_funcs) # checking that begin_fit was called\n for pg, f in zip(self.opt.param_groups, self.sched_funcs):\n pg[self.pname]=f(self.n_epochs/self.epochs) # call the schedule function with the current position\n \n def begin_batch(self):\n if self.in_train: self.set_param()\n ",
"_____no_output_____"
]
],
[
[
"### LR Finder",
"_____no_output_____"
],
[
"LR Finder is supposed to help determine a suitable value for the learning rate. ",
"_____no_output_____"
]
],
[
[
"#export\nclass LR_Find(Callback):\n _order = 1\n def __init__(self, max_iter=100, min_lr = 1e-6, max_lr=10):\n self.max_iter = max_iter\n self.min_lr = min_lr\n self.max_lr = max_lr\n self.best_loss = 1e9\n \n def begin_batch(self):\n if not self.in_train: return\n pos = self.n_iter/self.max_iter\n lr = self.min_lr * (self.max_lr/self.min_lr) ** pos\n for pg in self.opt.param_groups: pg['lr'] = lr\n \n def after_step(self):\n if self.n_iter>=self.max_iter or self.loss>self.best_loss*10:\n raise CancelTrainException\n if self.loss < self.best_loss: self.best_loss = self.loss",
"_____no_output_____"
],
[
"learn = create_learner(get_model, loss_func, data)",
"_____no_output_____"
]
],
[
[
"### AvgStats",
"_____no_output_____"
]
],
[
[
"#export\nclass AvgStatsCallback(Callback):\n def __init__(self, metrics):\n self.train_stats = AvgStats(metrics, True)\n self.valid_stats = AvgStats(metrics, False)\n\n def begin_epoch(self):\n self.train_stats.reset()\n self.valid_stats.reset()\n\n def after_loss(self):\n stats = self.train_stats if self.in_train else self.valid_stats\n with torch.no_grad(): stats.accumulate(self.run)\n\n def after_epoch(self):\n print(self.train_stats)\n print(self.valid_stats)",
"_____no_output_____"
]
],
[
[
"## Test",
"_____no_output_____"
]
],
[
[
"run = Runner(cb_funcs=[LR_Find, Recorder])",
"_____no_output_____"
],
[
"run.fit(2, learn)",
"_____no_output_____"
],
[
"run.recorder.plot(skip_last=10)",
"_____no_output_____"
],
[
"run.recorder.plot_lr()",
"_____no_output_____"
],
[
"!python notebook2script.py 06_runner2.ipynb",
"Converted 06_runner2.ipynb to exp\\nb_06.py\n"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
]
|
ec63a31d61ea937a617c161e6e620628d42b8eb2 | 46,076 | ipynb | Jupyter Notebook | 2021/anns.ipynb | harvard-visionlab/psy1406 | 20a620e09e5ed96f56d0ad1bfcfca9f03829638a | [
"MIT"
]
| 1 | 2021-01-28T22:02:05.000Z | 2021-01-28T22:02:05.000Z | 2021/anns.ipynb | harvard-visionlab/psy1406 | 20a620e09e5ed96f56d0ad1bfcfca9f03829638a | [
"MIT"
]
| null | null | null | 2021/anns.ipynb | harvard-visionlab/psy1406 | 20a620e09e5ed96f56d0ad1bfcfca9f03829638a | [
"MIT"
]
| null | null | null | 30.985878 | 710 | 0.485589 | [
[
[
"<a href=\"https://colab.research.google.com/github/harvard-visionlab/psy1410/blob/master/psy1410_week03_anns.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Psy1410 - Week03 - Artificial Neural Networks with PyTorch\n\nThis week we're going to use PyTorch to create and train ANNs on the MNIST digit recognition task.\n\nFor this workshop, set your Runtime to GPU!",
"_____no_output_____"
],
[
"## Boilerplate \"Model Training Functions\"\n\nHere we'll define any helper functions that we can use as we go. We'll probably add to this as we find a need for new helper functions.\n\nTo train a model, we'll need:\n- [x] a model\n- [x] a dataset (MNIST), with train/test split\n- [x] a loss function (Cross Entropy Loss)\n- [x] an optimizer (which will do all of the `back-propogation of errors` that we need to modify the weights\n- [x] we need a training function\n- [x] useful to have a validation function too (to test how well the model generalizes to data outside of the training set)",
"_____no_output_____"
]
],
[
[
"#required\n%config InlineBackend.figure_format = 'retina'\nimport torch \nimport torch \nimport torch.nn as nn \nimport numpy as np \nfrom PIL import Image \nfrom IPython.core.debugger import set_trace \nimport matplotlib.pyplot as plt\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import DataLoader\n\ndef show_image(img):\n return Image.fromarray( (img * 256).squeeze().numpy().astype(np.uint8) )\n\ndef show_weights(m):\n idx = -1\n fig, axs = plt.subplots(2, 5, figsize=(15, 6))\n for row in axs:\n for ax in row:\n idx += 1\n if hasattr(m, 'weight') and len(m.weight.shape) == 4:\n shape = m.weight[idx].shape[1:]\n w = m.weight[idx].detach().reshape(*shape).cpu()\n ax.imshow(w, extent=[0, 1, 0, 1], cmap='gray')\n elif hasattr(m, 'weight'):\n w = m.weight[idx].detach().reshape(28,28).cpu()\n ax.imshow(w, extent=[0, 1, 0, 1], cmap='coolwarm')\n else:\n w = m.fc.weight[idx].detach().reshape(28,28).cpu()\n ax.imshow(w, extent=[0, 1, 0, 1], cmap='coolwarm')\n ax.set_title(f\"unit={idx}\")\n ax.grid(True)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n plt.show() \n\ndef train(model, train_loader, criterion, optimizer, mb=None):\n # use gpu if available\n device = 'cuda' if torch.cuda.is_available() else 'cpu' \n model.to(device)\n criterion.to(device)\n\n # place model in \"train mode\" so gradients are computed\n model.train()\n \n # loop through ALL images\n losses = []\n for imgs,labels in progress_bar(train_loader, parent=mb):\n # put images and labels on gpu if available\n imgs = imgs.to(device)\n labels = labels.to(device)\n\n # forward pass (pass images through model)\n output = model(imgs)\n\n # compute the loss \n loss = criterion(output, labels)\n\n # backward pass (compute gradients, do backprop)\n optimizer.zero_grad() # zero out any existing gradients\n loss.backward() # compute gradients (tells us which direction to change weights)\n optimizer.step() # modify learnable parameters (optimizer decides how much to update weights, in direction of gradients)\n\n losses.append(loss.item())\n\n return torch.tensor(losses).mean().item()\n\n#required\ndef validate(model, test_loader, criterion, optimizer, mb=None):\n # use gpu if available\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n model.to(device)\n criterion.to(device)\n\n # place the model in \"eval\" mode (do not compute gradients during testing) \n model.eval() \n\n # iterate over batches, compute loss and accuracy for each batch\n losses = []\n correct = []\n with torch.no_grad():\n for imgs,labels in progress_bar(test_loader, parent=mb):\n imgs = imgs.to(device)\n labels = labels.to(device)\n\n # forward pass \n output = model(imgs)\n\n # calculate loss and classification accuracy\n loss = criterion(output, labels)\n _, correct_k = accuracy(output, labels, topk=(1,)) \n\n losses.append(loss.item())\n correct.append(correct_k)\n\n top1 = torch.cat(correct).mean()\n\n return torch.tensor(losses).mean().item(), top1.mean().item()\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n acc = []\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float()\n acc.append(correct_k) \n res.append(correct_k.sum(0, keepdim=True).mul_(100.0 / batch_size))\n return res, acc[0]\n\n#required\nfrom fastprogress.fastprogress import master_bar, progress_bar \n\ndef train_model(num_epochs):\n mb = master_bar( range(num_epochs) )\n mb.names = ['train_loss', 'val_loss']\n xs,y1,y2 = [], [], []\n for epoch in mb:\n train_loss = train(model, train_loader, criterion, optimizer, mb=mb)\n val_loss, top1 = validate(model, test_loader, criterion, optimizer, mb=mb)\n # print(f\"Epoch {epoch}: Train Loss {train_loss}, Val Loss {val_loss} Top1 {top1}\")\n\n # graph results\n xs.append(epoch)\n y1.append(train_loss)\n y2.append(val_loss)\n graphs = [[xs,y1], [xs,y2]]\n x_bounds = [0, num_epochs]\n y_bounds = [0,max(max(y1),max(y2))*1.1]\n mb.update_graph(graphs, x_bounds, y_bounds)\n print(\"All Done!\")\n print(f\"Epoch {epoch}: Train Loss {train_loss:3.3f}, Val Loss {val_loss:3.3f} Top1 {top1:3.3f}\") ",
"_____no_output_____"
]
],
[
[
"## A Minimal ANN\n\nLet's start by defining a very minimal artificial neural network, with a single fully-connected linear layer that directly maps the input (1x28x28 pixels) to the output categories (10 digit categories).",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.nn as nn\n\nclass MyNet(nn.Module):\n def __init__(self):\n super(MyNet, self).__init__()\n # in_features = 784, because the input image is 1x28x28 = 784\n # out_features = 10, because there are 10 output categories (digits 0-9)\n self.fc = nn.Linear(in_features=784, out_features=10)\n \n def forward(self, x):\n # in the \"forward pass\", we take an input (a batch of images, x)\n # then first we flatten it into batchSize x 784, \n batchSize = x.shape[0] # first dimension of x is \"batchSize\"\n x = x.view(batchSize, -1) # the -1 tells pytorch to flatten the tensor to be batchSize x \"whatever size fits\"\n\n # finally, we pass the flattened input into our fully-connected layer \n # which will compute the weighted sum of the input for each of the 10 \n # categories\n x = self.fc(x)\n\n return x",
"_____no_output_____"
],
[
"# create an instance of MyNet\nmodel = MyNet()\nmodel",
"_____no_output_____"
],
[
"for param in model.parameters():\n print(param.shape)",
"_____no_output_____"
]
],
[
[
"## Inspect/visualize the weights of your randomly intialized network\n\nRemember that each output node has a weight on each of the 28x28 pixels. We can visualize these weights by color-coding the pixels according to the weight (negatives in blue, positives in red; brighter colors for larger weights).",
"_____no_output_____"
]
],
[
[
"# we can directly access modules of the model, and their params, like so:\nmodel.fc.weight.shape, model.fc.bias.shape",
"_____no_output_____"
],
[
"# grab the weights for the `zero` output node\nw = model.fc.weight[0].detach().reshape(28,28)\nw.shape",
"_____no_output_____"
],
[
"plt.imshow(w, extent=[0, 1, 0, 1], cmap='coolwarm');",
"_____no_output_____"
],
[
"show_weights(model.fc)",
"_____no_output_____"
]
],
[
[
"## MNIST Dataset\n\n- we'll start with the standard MNIST dataset",
"_____no_output_____"
]
],
[
[
"transform = transforms.Compose([\n transforms.ToTensor(),\n])",
"_____no_output_____"
],
[
"train_dataset = datasets.MNIST('./data/MNIST', train=True, download=True, transform=transform)\ntrain_dataset",
"_____no_output_____"
],
[
"test_dataset = datasets.MNIST('./data/MNIST', train=False, download=True, transform=transform)\ntest_dataset",
"_____no_output_____"
],
[
"train_dataset[0][0].shape",
"_____no_output_____"
],
[
"train_loader = DataLoader(train_dataset, batch_size=256, \n num_workers=4, pin_memory=True, shuffle=True)\ntrain_loader",
"_____no_output_____"
],
[
"test_loader = DataLoader(test_dataset, batch_size=256, \n num_workers=4, pin_memory=True, shuffle=False)\ntest_loader",
"_____no_output_____"
],
[
"imgs, labels = next(iter(train_loader))",
"_____no_output_____"
],
[
"imgs.shape, labels.shape",
"_____no_output_____"
],
[
"output = model(imgs)\noutput.shape",
"_____no_output_____"
],
[
"idx = 10\nactual = labels[idx].item()\nprint(actual)\nshow_image(imgs[idx])",
"_____no_output_____"
],
[
"predicted = output[idx].argmax().item() \nprint(f\"predicted={predicted}, actual={actual}\")",
"_____no_output_____"
]
],
[
[
"## Loss Function\n\nLet's use the standard cross-entropy loss function",
"_____no_output_____"
]
],
[
[
"# create a fresh instance of your model \nmodel = MyNet()",
"_____no_output_____"
],
[
"# define loss function (criterion)\ncriterion = nn.CrossEntropyLoss()",
"_____no_output_____"
],
[
"# pass some images through your model, get the outputs\n# why is the output 256 x 10?\nimgs, labels = next(iter(train_loader))\noutput = model(imgs)\noutput.shape",
"_____no_output_____"
],
[
"loss = criterion(output, labels)\nloss ",
"_____no_output_____"
]
],
[
[
"## Define the Optimizer",
"_____no_output_____"
]
],
[
[
"# define the optimizer\n# this updates the weights for us using gradient descent\noptimizer = torch.optim.SGD(model.parameters(), lr=.03)",
"_____no_output_____"
]
],
[
[
"# Exercise 1 - Train the Model to Recognize Digits!",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.nn as nn\n\nclass MyNet(nn.Module):\n def __init__(self):\n super(MyNet, self).__init__()\n # in_features = 784, because the input image is 1x28x28 = 784\n # out_features = 10, because there are 10 output categories (digits 0-9)\n self.fc = nn.Linear(in_features=784, out_features=10)\n \n def forward(self, x):\n # in the \"forward pass\", we take an input (a batch of images, x)\n # then first we flatten it into batchSize x 784, \n batchSize = x.shape[0] # first dimension of x is \"batchSize\"\n x = x.view(batchSize, -1) # the -1 tells pytorch to flatten the tensor to be batchSize x \"whatever size fits\"\n\n # finally, we pass the flattened input into our fully-connected layer \n # which will compute the weighted sum of the input for each of the 10 \n # categories\n x = self.fc(x)\n\n return x",
"_____no_output_____"
],
[
"# create a fresh instance of our model\nmodel = MyNet()\nmodel",
"_____no_output_____"
],
[
"show_weights(model)",
"_____no_output_____"
],
[
"# load dataset\ntransform = transforms.Compose([\n transforms.ToTensor(),\n])\ntrain_dataset = datasets.MNIST('./data/MNIST', train=True, download=True, transform=transform)\nprint(train_dataset)\n\ntest_dataset = datasets.MNIST('./data/MNIST', train=False, download=True, transform=transform)\nprint(test_dataset)\n\ntrain_loader = DataLoader(train_dataset, batch_size=256, \n num_workers=8, pin_memory=True, shuffle=True)\n\ntest_loader = DataLoader(test_dataset, batch_size=256, \n num_workers=8, pin_memory=True, shuffle=False)",
"_____no_output_____"
],
[
"criterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=.03)\ntrain_model(num_epochs=15)",
"_____no_output_____"
],
[
"show_weights(model)",
"_____no_output_____"
]
],
[
[
"## Exercise 2 - Improve your Model by training longer (e.g., 30 epochs)\n",
"_____no_output_____"
]
],
[
[
"model = MyNet()\nmodel",
"_____no_output_____"
],
[
"show_weights(model)",
"_____no_output_____"
],
[
"criterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=.03)\ntrain_model(num_epochs=30)",
"_____no_output_____"
],
[
"show_weights(model)",
"_____no_output_____"
]
],
[
[
"## Exercise 3 - Improve your Model by using a better optimizer (e.g., Adam, Adadelta), or by varying the learning rate, or both; \n\nSave a record of the results for each variant you try (you can just create a new +Code cell for each run).\n\nSGD is known to show the \"best generalization\" but can also take longer. Adam and Adadelta are adaptive (intelligently adjust the step size), but Adam is known to have poorer generalization. ",
"_____no_output_____"
]
],
[
[
"# let's try Adam, an adaptive optimizer\nmodel = MyNet()\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=.03)\ntrain_model(num_epochs=15)",
"_____no_output_____"
],
[
"show_weights(model)",
"_____no_output_____"
],
[
"# let's try Adadelta\nmodel = MyNet()\ncriterion = nn.CrossEntropyLoss()\n# optimizer = torch.optim.Adam(model.parameters(), lr=1.0)\noptimizer = torch.optim.Adadelta(model.parameters(), lr=1.0)\ntrain_model(num_epochs=15)",
"_____no_output_____"
],
[
"show_weights(model)",
"_____no_output_____"
],
[
"# let's try Adam with a higher learning rate (matching the default for Adadelta)\nmodel = MyNet()\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=1.0)\n#optimizer = torch.optim.Adadelta(model.parameters(), lr=1.0)\ntrain_model(num_epochs=15)",
"_____no_output_____"
],
[
"show_weights(model)",
"_____no_output_____"
]
],
[
[
"Compare the output weights (the visualizations) for SGD (exercise one), Adam, and Adadelta. What do you notice? Which optimizer do you prefer, and why?",
"_____no_output_____"
],
[
"## Exercise 4 - Improve your Model by adding one or more hidden layers, with or without ReLU activations.",
"_____no_output_____"
]
],
[
[
"class MyShallowNet(nn.Module):\n def __init__(self, use_relu=True):\n super(MyShallowNet, self).__init__()\n self.use_relu = use_relu\n # in_features = 784, because the input image is 1x28x28 = 784\n # out_features = 128, because there are 10 output categories (digits 0-9)\n self.fc = nn.Linear(in_features=784, out_features=128)\n if self.use_relu:\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Linear(in_features=128, out_features=10)\n \n def forward(self, x):\n # in the \"forward pass\", we take an input (a batch of images, x)\n # then first we flatten it into batchSize x 784, \n batchSize = x.shape[0] # first dimension of x is \"batchSize\"\n x = x.view(batchSize, -1) # the -1 tells pytorch to flatten the tensor to be batchSize x \"whatever size fits\"\n \n x = self.fc(x)\n if self.use_relu:\n x = self.relu1(x)\n x = self.fc2(x)\n return x",
"_____no_output_____"
],
[
"# train with ReLU\nmodel = MyShallowNet(use_relu=True)\nmodel",
"_____no_output_____"
],
[
"criterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adadelta(model.parameters(), lr=1.0)\ntrain_model(num_epochs=15)",
"_____no_output_____"
],
[
"show_weights(model)",
"_____no_output_____"
],
[
"# train without ReLU\nmodel = MyShallowNet(use_relu=False)\nmodel",
"_____no_output_____"
],
[
"criterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adadelta(model.parameters(), lr=1.0)\ntrain_model(num_epochs=15)",
"_____no_output_____"
],
[
"# Let's try a deeper network, including dropout to prevent overfitting\n\nclass MyDeepNet(nn.Module):\n def __init__(self, use_relu=True, use_dropout=True):\n super(MyDeepNet, self).__init__()\n self.use_relu = use_relu\n self.use_dropout = use_dropout\n\n # in_features = 784, because the input image is 1x28x28 = 784\n # out_features = 128, because there are 10 output categories (digits 0-9)\n self.fc = nn.Linear(in_features=784, out_features=128)\n if self.use_relu:\n self.relu1 = nn.ReLU()\n \n if self.use_dropout:\n self.dropout1 = nn.Dropout2d(0.50)\n\n self.fc2 = nn.Linear(in_features=128, out_features=128)\n if self.use_relu:\n self.relu2 = nn.ReLU()\n \n if self.use_dropout:\n self.dropout2 = nn.Dropout2d(0.50)\n\n self.fc3 = nn.Linear(in_features=128, out_features=10)\n\n def forward(self, x):\n # in the \"forward pass\", we take an input (a batch of images, x)\n # then first we flatten it into batchSize x 784, \n batchSize = x.shape[0] # first dimension of x is \"batchSize\"\n x = x.view(batchSize, -1) # the -1 tells pytorch to flatten the tensor to be batchSize x \"whatever size fits\"\n \n x = self.fc(x)\n if self.use_relu:\n x = self.relu1(x)\n \n if self.use_dropout:\n x = self.dropout1(x)\n\n x = self.fc2(x)\n if self.use_relu:\n x = self.relu2(x)\n\n if self.use_dropout:\n x = self.dropout2(x)\n\n x = self.fc3(x)\n\n return x",
"_____no_output_____"
],
[
"model = MyDeepNet(use_relu=True, use_dropout=True)\nmodel",
"_____no_output_____"
],
[
"criterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adadelta(model.parameters(), lr=1.0)\ntrain_model(num_epochs=15)",
"_____no_output_____"
],
[
"show_weights(model)",
"_____no_output_____"
],
[
"# your turn. Two of us try without relu, two without dropout, two without both. ",
"_____no_output_____"
],
[
"model = MyDeepNet(use_relu=False, use_dropout=True)\nmodel",
"_____no_output_____"
]
],
[
[
"## gather measures from everyone to plot a graph",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport seaborn as sns\n\ndf = pd.DataFrame(columns=['relu','dropout','train_loss','val_loss','top1'])\nscores = [\n (True, True, 0.14, 0.09, 0.97),\n]\nfor relu,dropout,train_loss,val_loss,top1 in scores:\n model_name = f'relu{relu}_dropout{dropout}'\n df = df.append({\n #\"model_name\": model_name,\n \"relu\": relu,\n \"dropout\": dropout,\n \"train_loss\": train_loss,\n \"val_loss\": val_loss,\n \"top1\": top1,\n }, ignore_index=True)\ndf",
"_____no_output_____"
],
[
"ax = sns.barplot(data=df, x=\"relu\", y=\"train_loss\", hue=\"dropout\", \n order=[True,False], hue_order=[True,False]); ",
"_____no_output_____"
],
[
"ax = sns.barplot(data=df, x=\"relu\", y=\"val_loss\", hue=\"dropout\", \n order=[True,False], hue_order=[True,False]); ",
"_____no_output_____"
],
[
"ax = sns.barplot(data=df, x=\"relu\", y=\"top1\", hue=\"dropout\", \n order=[True,False], hue_order=[True,False]); ",
"_____no_output_____"
]
],
[
[
"## Exercise 4 - Improve your Model by using convolutional layers\n\nSave a record of the results for each variant you try.",
"_____no_output_____"
]
],
[
[
"import torch.nn as nn\nfrom collections import OrderedDict\n# reference: https://github.com/pytorch/examples/blob/master/mnist/main.py\nclass CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n self.cnn_backbone = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, stride=1)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1)),\n ('relu2', nn.ReLU()),\n ('pool2', nn.MaxPool2d(2)),\n ('dropout2', nn.Dropout2d(0.25))\n ]))\n self.head = nn.Sequential(OrderedDict([\n ('fc3', nn.Linear(9216, 128)),\n ('relu3', nn.ReLU()),\n ('dropout3', nn.Dropout2d(0.50)),\n ('fc4', nn.Linear(128, 10)),\n ('relu4', nn.ReLU()),\n ]))\n\n def forward(self, x):\n x = self.cnn_backbone(x)\n x = torch.flatten(x, 1)\n x = self.head(x)\n return x",
"_____no_output_____"
],
[
"model = CNN()\nmodel",
"_____no_output_____"
],
[
"fake_imgs = torch.rand(100,1,28,28)\nout = model(fake_imgs)\nout.shape",
"_____no_output_____"
],
[
"criterion = nn.CrossEntropyLoss()\n# optimizer = torch.optim.Adam(model.parameters(), lr=.03)\noptimizer = torch.optim.Adadelta(model.parameters(), lr=1.0)\ntrain_model(num_epochs=15)",
"_____no_output_____"
],
[
"show_weights(model.cnn_backbone.conv1)",
"_____no_output_____"
]
],
[
[
"Do these look like Gabor Wavelets? I wonder if we had larger kernels (5x5) if they would look more so? That might make more sense to test when we go for larger input images (Exercise 5).",
"_____no_output_____"
],
[
"## Exercise 5 - Challenge your model by adding position and scale variation, see how this affects learning, test performance.\n\nNOTE: If you get an \"out of memory\" error, you might have to goto \"Runtime\" => \"Restart runtime\". However, after you restart a runtime, everything is wiped from memory, include functions like \"train_model\", so you'll have to go back to the top and re-run the cell where we import and define functions (near the top, marked #required). \n\nBUT WAIT. Why did we run out of memory? Could be a bunch of other variables that you don't need hogging GPU space. It could be that your current model is TOO BIG to fit on the GPU, or your images are too large, or you are trying to run too many of them through the model at once.\n\nSo, how do you trouble shoot? Try the following, in order, but remember to restart the runtime and run #required cells before each troubleshooting step:\n- just restart and try again (don't change your model or training code): restart runtime, load only the required cells and anything you need for your new model\n- try reducing your batch size (but if this get's below 64, you'll run into issues due to small batch size)\n- try reducing the input image size\n- try reducing your model size\n- buy a bigger GPU\n- Pay Amazon or Google to rent their bigger GPUs",
"_____no_output_____"
]
],
[
[
"#required\nimport torch\nimport numpy as np\nimport torchvision.transforms.functional as F\nfrom torchvision import datasets, transforms\nfrom PIL import Image\n\ndef random_size(img, sizes=[28,56,128]):\n s = np.random.choice(sizes)\n return F.resize(img, (s, s))\n\ndef embed_image_centered(img, bg_size=(224,224)):\n img_w, img_h = img.size\n background = Image.new('L', bg_size, color=0)\n bg_w, bg_h = background.size\n # centered, but we want to randomly position\n offset = ((bg_w - img_w) // 2, (bg_h - img_h) // 2)\n background.paste(img, offset)\n return background \n\ndef embed_image_random(img, bg_size=(224,224)):\n img_w, img_h = img.size\n background = Image.new('L', bg_size, color=0)\n bg_w, bg_h = background.size\n h_shift = (bg_w - img_w) * np.random.rand()\n v_shift = (bg_h - img_h) * np.random.rand()\n offset = (int(h_shift), int(v_shift))\n background.paste(img, offset)\n return background ",
"_____no_output_____"
],
[
"from torchvision import datasets, transforms\nfrom torch.utils.data import DataLoader\n\ntransform = transforms.Compose([\n random_size,\n embed_image_random,\n transforms.ToTensor(),\n])",
"_____no_output_____"
],
[
"train_dataset = datasets.MNIST('./data/MNIST', train=True, download=True, transform=transform)\ntrain_dataset",
"_____no_output_____"
],
[
"train_dataset[0][0]",
"_____no_output_____"
],
[
"test_dataset = datasets.MNIST('./data/MNIST', train=False, download=True, transform=transform)\ntest_dataset",
"_____no_output_____"
],
[
"train_loader = DataLoader(train_dataset, batch_size=200, \n num_workers=4, pin_memory=True, shuffle=True)\ntrain_loader",
"_____no_output_____"
],
[
"test_loader = DataLoader(test_dataset, batch_size=200, \n num_workers=4, pin_memory=True, shuffle=True)\ntest_loader",
"_____no_output_____"
],
[
"import torch.nn as nn\nfrom collections import OrderedDict\n# reference: https://github.com/pytorch/examples/blob/master/mnist/main.py\nclass CNN_224(nn.Module):\n def __init__(self):\n super(CNN_224, self).__init__()\n self.cnn_backbone = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, stride=1)),\n ('relu1', nn.ReLU()),\n ('pool1', nn.MaxPool2d(2)),\n ('conv2', nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1)),\n ('relu2', nn.ReLU()),\n ('pool2', nn.MaxPool2d(2)),\n ('dropout2', nn.Dropout2d(0.25)), \n ]))\n self.downsample = nn.AdaptiveAvgPool2d((6,6))\n self.head = nn.Sequential(OrderedDict([\n ('fc3', nn.Linear(64*6*6, 128)),\n ('relu3', nn.ReLU()),\n ('dropout3', nn.Dropout2d(0.50)),\n ('fc4', nn.Linear(128, 10)),\n ('relu4', nn.ReLU()),\n ]))\n\n def forward(self, x):\n x = self.cnn_backbone(x)\n x = self.downsample(x)\n x = torch.flatten(x, 1)\n x = self.head(x)\n return x ",
"_____no_output_____"
],
[
"model = CNN_224()\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adadelta(model.parameters(), lr=1.0)\ntrain_model(num_epochs=15)",
"_____no_output_____"
],
[
"train_model(num_epochs=15)",
"_____no_output_____"
],
[
"train_model(num_epochs=70)",
"_____no_output_____"
]
],
[
[
"## Bonus Exercises\n\nExperiment with varying the kernel_size (ours are 3x3, Alexnet's were 11x11), or out_channels of different layers in your network. Use our default settings above as your \"baseline\". Then only adjust one parameter at a time, so you can isolate which factor accounts for any changes in performance (if you change two things, you have no way of knowing which one \"caused\" the change in performance). Try visualizing your kernels (whether you vary kernel_size or out_channels) to see whether the tuning functions change in any obvious way. Once you have a sense for how individual parameters affect your model, you can experiment with more dramatic changes (changing multiple parameters at once). \n\nCoordinate with each other if you would like to collate results (since it takes a while to run even one model!).\n",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec63acf123521651e50e45eb3e80e2d4d075cbe5 | 677,997 | ipynb | Jupyter Notebook | ipynb/Ames Housing Prices.ipynb | Mazharul-Hossain/DSP-COMP6993 | 9770d638d551fda4e15bfd1a7ede2e319f987998 | [
"MIT"
]
| null | null | null | ipynb/Ames Housing Prices.ipynb | Mazharul-Hossain/DSP-COMP6993 | 9770d638d551fda4e15bfd1a7ede2e319f987998 | [
"MIT"
]
| null | null | null | ipynb/Ames Housing Prices.ipynb | Mazharul-Hossain/DSP-COMP6993 | 9770d638d551fda4e15bfd1a7ede2e319f987998 | [
"MIT"
]
| null | null | null | 289.371319 | 157,052 | 0.870173 | [
[
[
"from __future__ import absolute_import, division, print_function",
"_____no_output_____"
],
[
"import numpy, os, pandas ",
"_____no_output_____"
],
[
"import tensorflow",
"_____no_output_____"
],
[
"from tensorflow import keras",
"_____no_output_____"
],
[
"print(tensorflow.__version__)",
"1.11.0\n"
],
[
"AmesHousing = pandas.read_excel('../data/AmesHousing.xls')\nAmesHousing.head(10)",
"_____no_output_____"
],
[
"cd ..",
"D:\\UofMemphis\\Fall-18\\COMP6993\\DSP-COMP6993\n"
],
[
"from libpy import NS_dp",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split",
"_____no_output_____"
]
],
[
[
"We use our own function to clean Data",
"_____no_output_____"
]
],
[
[
"df = NS_dp.clean_Ames_Housing(AmesHousing)",
"E:\\Anaconda3\\lib\\site-packages\\sklearn\\preprocessing\\data.py:323: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by MinMaxScaler.\n return self.partial_fit(X, y)\n"
],
[
"data, labels = df.iloc[ : , 2: ].drop( columns=[ \"SalePrice\" ] ), df[ \"SalePrice\" ]",
"_____no_output_____"
],
[
"train_data, test_data, train_labels, test_labels = train_test_split(data, labels, test_size=0.2)",
"_____no_output_____"
],
[
"from libpy import FS",
"_____no_output_____"
],
[
"# train_data, train_labels, test_data, test_labels = FS.feature_select(df)",
"_____no_output_____"
],
[
"print(\"Training set: {}\".format(train_data.shape)) # 1607 examples, ** features\nprint(\"Testing set: {}\".format(test_data.shape)) # 1071 examples, 13 features",
"Training set: (2142, 210)\nTesting set: (536, 210)\n"
],
[
"train_data.sample(10)",
"_____no_output_____"
]
],
[
[
"### Model building",
"_____no_output_____"
],
[
"Here we created a neural network of our own. Trained it and ecaluted it's score to to measure performance. Later we plot our results.",
"_____no_output_____"
],
[
"### Train a model",
"_____no_output_____"
]
],
[
[
"from libpy import KR",
"_____no_output_____"
]
],
[
[
"Here we use default 64 node Neural Network",
"_____no_output_____"
]
],
[
[
"model = KR.build_model(train_data)\nmodel.summary()",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense (Dense) (None, 64) 13504 \n_________________________________________________________________\ndense_1 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndense_2 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndense_3 (Dense) (None, 1) 65 \n=================================================================\nTotal params: 21,889\nTrainable params: 21,889\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"history, model = KR.train_model( model, train_data, train_labels )",
"\n....................................................................................................\n....................................................................................................\n.........................."
]
],
[
[
"In below, we calculate the mae score to find test accuracy, aka our model's accuracy",
"_____no_output_____"
]
],
[
[
"[loss, mae] = model.evaluate(test_data, test_labels, verbose=0)\n\nprint(\"Testing set Mean Abs Error: ${:7.2f}\".format( mae ))",
"Testing set Mean Abs Error: $18051.26\n"
],
[
"test_predictions = model.predict(test_data).flatten()",
"_____no_output_____"
]
],
[
[
"### Plot",
"_____no_output_____"
],
[
"Here we plot our model performance. ",
"_____no_output_____"
]
],
[
[
"from matplotlib import pyplot",
"_____no_output_____"
],
[
"from libpy import DNN_plot",
"_____no_output_____"
],
[
"DNN_plot.plot_history(history)",
"_____no_output_____"
]
],
[
[
"We shown our Training vs Validation loss. Here we used tf.losses.mean_squared_error (mse) as loss aprameter and mean_absolute_error (mae) to plot our Training performaance",
"_____no_output_____"
],
[
"Here we can see the Regression model ",
"_____no_output_____"
]
],
[
[
"DNN_plot.plot_predict( test_labels, test_predictions )",
"_____no_output_____"
]
],
[
[
"In the above graph, we can relate Test Labels vs Predicted Labels. Thus gives us the idea, how much price difference our model gives. As we can see, the result is related, can predict better at low price, but gives error for higher priced houses.\n\nAs, we have predicting price, we can now check how much price difference we are getting.",
"_____no_output_____"
]
],
[
[
"DNN_plot.plot_predict_error(test_labels, test_predictions)",
"_____no_output_____"
]
],
[
[
"### Experiment Depth of Neural Network",
"_____no_output_____"
],
[
"We want to check with increase of Hidden layer, does our model performs better? We increased up to 8 hidden layer",
"_____no_output_____"
]
],
[
[
"from libpy import CV",
"_____no_output_____"
],
[
"depths = []\nscores_mae = []\n\nfor i in range( 7 ):\n model = KR.build_model(train_data, depth=i) \n \n history, model = KR.train_model( model, train_data, train_labels )\n print(\"\")\n model.summary()\n \n DNN_plot.plot_history(history)\n \n [loss, mae] = model.evaluate(test_data, test_labels, verbose=0)\n print(\"Testing set Mean Abs Error: ${:7.2f}\".format( mae ))\n \n test_predictions = model.predict(test_data).flatten()\n DNN_plot.plot_predict( test_labels, test_predictions )\n \n depths.append( i+2 )\n scores_mae.append( mae) ",
"\n.\n.......................................................................................................................................................................................................\n.\n.......................................................................................................................................................................................................\n.\n.......................................................................................................................................................................................................\n.\n.......................................................................................................................................................................................................\n.\n......................................................................................................................................................................................................._________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_4 (Dense) (None, 64) 13504 \n_________________________________________________________________\ndense_5 (Dense) (None, 1) 65 \n=================================================================\nTotal params: 13,569\nTrainable params: 13,569\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"CV.plot_any( depths, scores_mae, xlabel='Depth', ylabel='Mean Abs Error [1000$]' )",
"_____no_output_____"
]
],
[
[
"We find out based on our experiment, Depth has effect felt random to us and with increase of depth, any visible improvement in accuracy was not made. But, based on the result, we kept default depth to 4. ",
"_____no_output_____"
],
[
"### Experiment Overfitting",
"_____no_output_____"
],
[
"In this part, we try multiple neural network model, with various node number to check Overfitting vs Underfitting ",
"_____no_output_____"
]
],
[
[
"model_16 = KR.build_model(train_data, units=16)\nhistory_16, model_16 = KR.train_model( model_16, train_data, train_labels )\nmodel_16.summary()",
"\n.\n.\n.\n.\n.\n.\n.\n.\n............................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................\n.\n.\n.\n.\n.\n.\n.\n.\n............................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................\n.\n.\n.\n.\n.\n.\n.\n.\n............................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................\n.\n.\n.\n.\n.\n.\n.\n.\n............................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................\n.\n.\n.\n.\n.\n.\n.\n.\n............................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_39 (Dense) (None, 16) 3376 \n_________________________________________________________________\ndense_40 (Dense) (None, 16) 272 \n_________________________________________________________________\ndense_41 (Dense) (None, 16) 272 \n_________________________________________________________________\ndense_42 (Dense) (None, 1) 17 \n=================================================================\nTotal params: 3,937\nTrainable params: 3,937\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"loss, acc = model_16.evaluate( train_data, train_labels )\nprint(\"Trained model, accuracy: {:5.2f}%\".format( acc ))",
"2142/2142 [==============================] - 0s 53us/step\nTrained model, accuracy: 18471.21%\n"
],
[
"model_32 = KR.build_model(train_data, units=32)\nhistory_32, model_32 = KR.train_model( model_32, train_data, train_labels )\nmodel_32.summary()",
"\n.\n.\n.\n.\n.\n.\n.\n.\n.\n...............................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................\n.\n.\n.\n.\n.\n.\n.\n.\n.\n...............................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................\n.\n.\n.\n.\n.\n.\n.\n.\n.\n...............................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................\n.\n.\n.\n.\n.\n.\n.\n.\n.\n................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................._________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_43 (Dense) (None, 32) 6752 \n_________________________________________________________________\ndense_44 (Dense) (None, 32) 1056 \n_________________________________________________________________\ndense_45 (Dense) (None, 32) 1056 \n_________________________________________________________________\ndense_46 (Dense) (None, 1) 33 \n=================================================================\nTotal params: 8,897\nTrainable params: 8,897\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"loss, acc = model_32.evaluate( train_data, train_labels )\nprint(\"Trained model, accuracy: {:5.2f}%\".format( acc))",
"2142/2142 [==============================] - 0s 51us/step\nTrained model, accuracy: 31917.29%\n"
],
[
"model_48 = KR.build_model(train_data, units=48)\nhistory_48, model_48 = KR.train_model( model_48, train_data, train_labels )\nmodel_48.summary()",
"\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n..................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n..................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n..................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.........................................................................................................................................................................................................................................................................................................._________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_47 (Dense) (None, 48) 10128 \n_________________________________________________________________\ndense_48 (Dense) (None, 48) 2352 \n_________________________________________________________________\ndense_49 (Dense) (None, 48) 2352 \n_________________________________________________________________\ndense_50 (Dense) (None, 1) 49 \n=================================================================\nTotal params: 14,881\nTrainable params: 14,881\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"loss, acc = model_48.evaluate( train_data, train_labels )\nprint(\"Trained model, accuracy: {:5.2f}%\".format( acc))",
"2142/2142 [==============================] - 0s 60us/step\nTrained model, accuracy: 18281.55%\n"
],
[
"model_64 = KR.build_model( train_data, units=64 )\nhistory_64, model_64 = KR.train_model( model_64, train_data, train_labels )\nmodel_64.summary()",
"\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.....................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.....................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................._________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_51 (Dense) (None, 64) 13504 \n_________________________________________________________________\ndense_52 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndense_53 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndense_54 (Dense) (None, 1) 65 \n=================================================================\nTotal params: 21,889\nTrainable params: 21,889\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"loss, acc = model_64.evaluate( train_data, train_labels )\nprint(\"Trained model, accuracy: {:5.2f}%\".format( acc))",
"2142/2142 [==============================] - 0s 56us/step\nTrained model, accuracy: 38304.85%\n"
],
[
"model_128 = KR.build_model( train_data, units=128)\nhistory_128, model_128 = KR.train_model( model_128, train_data, train_labels )\nmodel_128.summary()",
"\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n........................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n..........................................................................................................................................................................................................................................................................................................................................................................................................................................................._________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_55 (Dense) (None, 128) 27008 \n_________________________________________________________________\ndense_56 (Dense) (None, 128) 16512 \n_________________________________________________________________\ndense_57 (Dense) (None, 128) 16512 \n_________________________________________________________________\ndense_58 (Dense) (None, 1) 129 \n=================================================================\nTotal params: 60,161\nTrainable params: 60,161\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"loss, acc = model_128.evaluate( train_data, train_labels )\nprint(\"Trained model, accuracy: {:5.2f}%\".format( acc))",
"2142/2142 [==============================] - 0s 57us/step\nTrained model, accuracy: 29156.59%\n"
],
[
"model_512 = KR.build_model(train_data, units=512)\nhistory_512, model_512 = KR.train_model( model_512, train_data, train_labels )\nmodel_512.summary()",
"\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n...........................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n.\n..................................................................................................................................................................................................................................................................................................................................._________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_59 (Dense) (None, 512) 108032 \n_________________________________________________________________\ndense_60 (Dense) (None, 512) 262656 \n_________________________________________________________________\ndense_61 (Dense) (None, 512) 262656 \n_________________________________________________________________\ndense_62 (Dense) (None, 1) 513 \n=================================================================\nTotal params: 633,857\nTrainable params: 633,857\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"loss, acc = model_512.evaluate( train_data, train_labels )\nprint(\"Trained model, accuracy: {:5.2f}%\".format( acc))",
"2142/2142 [==============================] - 0s 63us/step\nTrained model, accuracy: 23475.53%\n"
],
[
"DNN_plot.plot_compare_history( [\n ('history_16', history_16 ),\n ('history_32', history_32 ),\n ('history_48', history_48 ),\n ('history_64', history_64 ),\n ('history_128', history_128 ),\n ('history_512', history_512 )\n] )",
"_____no_output_____"
]
],
[
[
"In our case, Validation and Training loss corrosponds to each other, all the models does not face any Overfitting or Underfitting. As we used EarlyStopping to stop training when val_loss stops to update. At the same time we used keras.regularizers.l2 to regularize our model.\n\nWe found accuracy difference in Models with different number of Units. ",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
]
|
ec63c476e8019bc91fa76157b3e95ad7a6ae37ec | 4,224 | ipynb | Jupyter Notebook | natural-language-processing/text-preprocessing-level-1/.ipynb_checkpoints/stemming-checkpoint.ipynb | naagarjunsa/data-science-portfolio | 02c8945de460ae241dce04ea69a78a0ffcb00b8d | [
"MIT"
]
| null | null | null | natural-language-processing/text-preprocessing-level-1/.ipynb_checkpoints/stemming-checkpoint.ipynb | naagarjunsa/data-science-portfolio | 02c8945de460ae241dce04ea69a78a0ffcb00b8d | [
"MIT"
]
| null | null | null | natural-language-processing/text-preprocessing-level-1/.ipynb_checkpoints/stemming-checkpoint.ipynb | naagarjunsa/data-science-portfolio | 02c8945de460ae241dce04ea69a78a0ffcb00b8d | [
"MIT"
]
| null | null | null | 33 | 259 | 0.546638 | [
[
[
"# TEXT PREPROCESSING - STEMMING\n\n**Stemming** is text preprocessing techinique where the tokens generated from the corpus are reduced to their base units. The base units need not be meaningful words. This makes it less complex and faster. Manual rule based way of cutting words down.\n* _go, going, gone --> go_\n\n\n**Overstemming** is when you stem too much of the token.\n* _universe, university, universities --> univers_\n\n**Understemming** is when you dont stem the token enough\n* datum , data -> dat ==> What about date?\n\n**StopWords** are words which do not add much meaning to the sentence.\n* a, an, the, is ",
"_____no_output_____"
]
],
[
[
"#will be using NLTK to demonstrate stemming\nimport nltk\nfrom nltk.stem import PorterStemmer\nfrom nltk.corpus import stopwords\n\nparagraph = \"\"\"Paragraphs are the building blocks of papers. Many students define paragraphs \\\nin terms of length. A paragraph is a group of at least five sentences. Paragraph \\\nis half a page long, etc.\"\"\"",
"_____no_output_____"
],
[
"#generate sentences from the paragraph\nsentences = nltk.sent_tokenize(paragraph)\nprint(sentences)",
"['Paragraphs are the building blocks of papers.', 'Many students define paragraphs in terms of length.', 'A paragraph is a group of at least five sentences.', 'Paragraph is half a page long, etc.']\n"
],
[
"#initalise stemmer and process each sentence\nstemmer = PorterStemmer()\nstem_sentences = []\nfor sentence in sentences:\n words = nltk.word_tokenize(sentence)\n print(\"Words before stemming : \", words)\n \n stem_words = []\n for word in words:\n if word not in set(stopwords.words('english')):\n stem_word = stemmer.stem(word)\n stem_words.append(stem_word)\n sentences[i] = ' '.join(stem_words)\n \n print(\"Words after stemming : \", stem_words)\nprint(sentences) ",
"Words before stemming : ['Paragraph', 'is', 'half', 'a', 'page', 'long', ',', 'etc', '.']\nWords after stemming : ['paragraph', 'half', 'page', 'long', ',', 'etc', '.']\nWords before stemming : ['Paragraph', 'is', 'half', 'a', 'page', 'long', ',', 'etc', '.']\nWords after stemming : ['paragraph', 'half', 'page', 'long', ',', 'etc', '.']\nWords before stemming : ['Paragraph', 'is', 'half', 'a', 'page', 'long', ',', 'etc', '.']\nWords after stemming : ['paragraph', 'half', 'page', 'long', ',', 'etc', '.']\nWords before stemming : ['Paragraph', 'is', 'half', 'a', 'page', 'long', ',', 'etc', '.']\nWords after stemming : ['paragraph', 'half', 'page', 'long', ',', 'etc', '.']\n['paragraph half page long , etc .', 'paragraph half page long , etc .', 'paragraph half page long , etc .', 'paragraph half page long , etc .']\n"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code"
]
]
|
ec63d0f4c81575252efa151b86331bb56687b648 | 599,109 | ipynb | Jupyter Notebook | 04.ComputerVision/segmentation_metrics_playground.ipynb | cuicaihao/Data_Science_Python | ca4cb64bf9afc1011c192586362d0dd036e9441e | [
"MIT"
]
| 2 | 2018-04-26T12:11:41.000Z | 2018-10-09T19:37:57.000Z | 04.ComputerVision/segmentation_metrics_playground.ipynb | cuicaihao/Data_Science_Python | ca4cb64bf9afc1011c192586362d0dd036e9441e | [
"MIT"
]
| null | null | null | 04.ComputerVision/segmentation_metrics_playground.ipynb | cuicaihao/Data_Science_Python | ca4cb64bf9afc1011c192586362d0dd036e9441e | [
"MIT"
]
| 4 | 2018-10-09T19:37:59.000Z | 2021-01-23T11:31:16.000Z | 667.159243 | 208,769 | 0.772706 | [
[
[
"import matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np",
"_____no_output_____"
],
[
"plt.rcParams.update({'font.size': 13})",
"_____no_output_____"
],
[
"import tensorflow as tf\nfrom tensorflow import keras as K",
"_____no_output_____"
]
],
[
[
"## Segmentation Metrics\n\nFor each metric I implement a Numpy and a Keras version, and verify that they give the same results. Examples are input images with a squares and circles.",
"_____no_output_____"
]
],
[
[
"def metrics_np(y_true, y_pred, metric_name, metric_type='standard', drop_last = True, mean_per_class=False, verbose=False):\n \"\"\" \n Compute mean metrics of two segmentation masks, via numpy.\n \n IoU(A,B) = |A & B| / (| A U B|)\n Dice(A,B) = 2*|A & B| / (|A| + |B|)\n \n Args:\n y_true: true masks, one-hot encoded.\n y_pred: predicted masks, either softmax outputs, or one-hot encoded.\n metric_name: metric to be computed, either 'iou' or 'dice'.\n metric_type: one of 'standard' (default), 'soft', 'naive'.\n In the standard version, y_pred is one-hot encoded and the mean\n is taken only over classes that are present (in y_true or y_pred).\n The 'soft' version of the metrics are computed without one-hot \n encoding y_pred.\n The 'naive' version return mean metrics where absent classes contribute\n to the class mean as 1.0 (instead of being dropped from the mean).\n drop_last = True: boolean flag to drop last class (usually reserved\n for background class in semantic segmentation)\n mean_per_class = False: return mean along batch axis for each class.\n verbose = False: print intermediate results such as intersection, union\n (as number of pixels).\n Returns:\n IoU/Dice of y_true and y_pred, as a float, unless mean_per_class == True\n in which case it returns the per-class metric, averaged over the batch.\n \n Inputs are B*W*H*N tensors, with\n B = batch size,\n W = width,\n H = height,\n N = number of classes\n \"\"\"\n \n assert y_true.shape == y_pred.shape, 'Input masks should be same shape, instead are {}, {}'.format(y_true.shape, y_pred.shape)\n assert len(y_pred.shape) == 4, 'Inputs should be B*W*H*N tensors, instead have shape {}'.format(y_pred.shape)\n \n flag_soft = (metric_type == 'soft')\n flag_naive_mean = (metric_type == 'naive')\n \n num_classes = y_pred.shape[-1]\n # if only 1 class, there is no background class and it should never be dropped\n drop_last = drop_last and num_classes>1\n \n if not flag_soft:\n if num_classes>1:\n # get one-hot encoded masks from y_pred (true masks should already be in correct format, do it anyway)\n y_pred = np.array([ np.argmax(y_pred, axis=-1)==i for i in range(num_classes) ]).transpose(1,2,3,0)\n y_true = np.array([ np.argmax(y_true, axis=-1)==i for i in range(num_classes) ]).transpose(1,2,3,0)\n else:\n y_pred = (y_pred > 0).astype(int)\n y_true = (y_true > 0).astype(int)\n \n # intersection and union shapes are batch_size * n_classes (values = area in pixels)\n axes = (1,2) # W,H axes of each image\n intersection = np.sum(np.abs(y_pred * y_true), axis=axes) # or, np.logical_and(y_pred, y_true) for one-hot\n mask_sum = np.sum(np.abs(y_true), axis=axes) + np.sum(np.abs(y_pred), axis=axes)\n union = mask_sum - intersection # or, np.logical_or(y_pred, y_true) for one-hot\n \n if verbose:\n print('intersection (pred*true), intersection (pred&true), union (pred+true-inters), union (pred|true)')\n print(intersection, np.sum(np.logical_and(y_pred, y_true), axis=axes), union, np.sum(np.logical_or(y_pred, y_true), axis=axes))\n \n smooth = .001\n iou = (intersection + smooth) / (union + smooth)\n dice = 2*(intersection + smooth)/(mask_sum + smooth)\n \n metric = {'iou': iou, 'dice': dice}[metric_name]\n \n # define mask to be 0 when no pixels are present in either y_true or y_pred, 1 otherwise\n mask = np.not_equal(union, 0).astype(int)\n # mask = 1 - np.equal(union, 0).astype(int) # True = 1\n \n if drop_last:\n metric = metric[:,:-1]\n mask = mask[:,:-1]\n \n # return mean metrics: remaining axes are (batch, classes)\n # if mean_per_class, average over batch axis only\n # if flag_naive_mean, average over absent classes too\n if mean_per_class:\n if flag_naive_mean:\n return np.mean(metric, axis=0)\n else:\n # mean only over non-absent classes in batch (still return 1 if class absent for whole batch)\n return (np.sum(metric * mask, axis=0) + smooth)/(np.sum(mask, axis=0) + smooth)\n else:\n if flag_naive_mean:\n return np.mean(metric)\n else:\n # mean only over non-absent classes\n class_count = np.sum(mask, axis=0)\n return np.mean(np.sum(metric * mask, axis=0)[class_count!=0]/(class_count[class_count!=0]))\n \ndef mean_iou_np(y_true, y_pred, **kwargs):\n \"\"\"\n Compute mean Intersection over Union of two segmentation masks, via numpy.\n \n Calls metrics_np(y_true, y_pred, metric_name='iou'), see there for allowed kwargs.\n \"\"\"\n return metrics_np(y_true, y_pred, metric_name='iou', **kwargs)\n\ndef mean_dice_np(y_true, y_pred, **kwargs):\n \"\"\"\n Compute mean Dice coefficient of two segmentation masks, via numpy.\n \n Calls metrics_np(y_true, y_pred, metric_name='dice'), see there for allowed kwargs.\n \"\"\"\n return metrics_np(y_true, y_pred, metric_name='dice', **kwargs)",
"_____no_output_____"
],
[
"# keras version\ndef seg_metrics(y_true, y_pred, metric_name, metric_type='standard', drop_last = True, mean_per_class=False, verbose=False):\n \"\"\" \n Compute mean metrics of two segmentation masks, via Keras.\n \n IoU(A,B) = |A & B| / (| A U B|)\n Dice(A,B) = 2*|A & B| / (|A| + |B|)\n \n Args:\n y_true: true masks, one-hot encoded.\n y_pred: predicted masks, either softmax outputs, or one-hot encoded.\n metric_name: metric to be computed, either 'iou' or 'dice'.\n metric_type: one of 'standard' (default), 'soft', 'naive'.\n In the standard version, y_pred is one-hot encoded and the mean\n is taken only over classes that are present (in y_true or y_pred).\n The 'soft' version of the metrics are computed without one-hot \n encoding y_pred.\n The 'naive' version return mean metrics where absent classes contribute\n to the class mean as 1.0 (instead of being dropped from the mean).\n drop_last = True: boolean flag to drop last class (usually reserved\n for background class in semantic segmentation)\n mean_per_class = False: return mean along batch axis for each class.\n verbose = False: print intermediate results such as intersection, union\n (as number of pixels).\n Returns:\n IoU/Dice of y_true and y_pred, as a float, unless mean_per_class == True\n in which case it returns the per-class metric, averaged over the batch.\n \n Inputs are B*W*H*N tensors, with\n B = batch size,\n W = width,\n H = height,\n N = number of classes\n \"\"\"\n \n flag_soft = (metric_type == 'soft')\n flag_naive_mean = (metric_type == 'naive')\n \n # always assume one or more classes\n num_classes = K.shape(y_true)[-1]\n \n if not flag_soft:\n # get one-hot encoded masks from y_pred (true masks should already be one-hot)\n y_pred = K.one_hot(K.argmax(y_pred), num_classes)\n y_true = K.one_hot(K.argmax(y_true), num_classes)\n\n # if already one-hot, could have skipped above command\n # keras uses float32 instead of float64, would give error down (but numpy arrays or keras.to_categorical gives float64)\n y_true = K.cast(y_true, 'float32')\n y_pred = K.cast(y_pred, 'float32')\n\n # intersection and union shapes are batch_size * n_classes (values = area in pixels)\n axes = (1,2) # W,H axes of each image\n intersection = K.sum(K.abs(y_true * y_pred), axis=axes)\n mask_sum = K.sum(K.abs(y_true), axis=axes) + K.sum(K.abs(y_pred), axis=axes)\n union = mask_sum - intersection # or, np.logical_or(y_pred, y_true) for one-hot\n\n smooth = .001\n iou = (intersection + smooth) / (union + smooth)\n dice = 2 * (intersection + smooth)/(mask_sum + smooth)\n\n metric = {'iou': iou, 'dice': dice}[metric_name]\n\n # define mask to be 0 when no pixels are present in either y_true or y_pred, 1 otherwise\n mask = K.cast(K.not_equal(union, 0), 'float32')\n \n if drop_last:\n metric = metric[:,:-1]\n mask = mask[:,:-1]\n \n if verbose:\n print('intersection, union')\n print(K.eval(intersection), K.eval(union))\n print(K.eval(intersection/union))\n \n # return mean metrics: remaining axes are (batch, classes)\n if flag_naive_mean:\n return K.mean(metric)\n\n # take mean only over non-absent classes\n class_count = K.sum(mask, axis=0)\n non_zero = tf.greater(class_count, 0)\n non_zero_sum = tf.boolean_mask(K.sum(metric * mask, axis=0), non_zero)\n non_zero_count = tf.boolean_mask(class_count, non_zero)\n \n if verbose:\n print('Counts of inputs with class present, metrics for non-absent classes')\n print(K.eval(class_count), K.eval(non_zero_sum / non_zero_count))\n \n return K.mean(non_zero_sum / non_zero_count)\n\ndef mean_iou(y_true, y_pred, **kwargs):\n \"\"\"\n Compute mean Intersection over Union of two segmentation masks, via Keras.\n\n Calls metrics_k(y_true, y_pred, metric_name='iou'), see there for allowed kwargs.\n \"\"\"\n return seg_metrics(y_true, y_pred, metric_name='iou', **kwargs)\n\ndef mean_dice(y_true, y_pred, **kwargs):\n \"\"\"\n Compute mean Dice coefficient of two segmentation masks, via Keras.\n\n Calls metrics_k(y_true, y_pred, metric_name='iou'), see there for allowed kwargs.\n \"\"\"\n return seg_metrics(y_true, y_pred, metric_name='dice', **kwargs)",
"_____no_output_____"
]
],
[
[
"## Input images\nI will build simple geometrical figures and use those as \"objects\" for assessing segmentation metrics. For example, see below how to generate circles and diamonds with numpy",
"_____no_output_____"
]
],
[
[
"x,y = np.meshgrid(np.arange(-7,7.1), np.arange(-7,7.1))\n\nfig, (ax1, ax2) = plt.subplots(1,2,figsize = (13,4))\n\n# ax1.contourf(x, y, circle, alpha=0.5)\n# # ax1.scatter(x, y, circle)\n# for i in range(len(x)):\n# for j in range(len(y)):\n# ax1.text(x[i][j], y[i][j], '%d'% circle[i][j], ha='center', va='center')\n\ncircle_fuzzy = np.minimum([1], np.maximum([0], 25-x**2-y**2)/20)\n\nax1.contourf(x, y, circle_fuzzy, alpha=0.4, vmin=0, vmax=1)\n# ax1.scatter(x, y, circle_fuzzy)\nfor i in range(len(x)):\n for j in range(len(y)):\n fmt = '%d' if circle_fuzzy[i][j] %1 ==0 else '%1.1f'\n ax1.text(x[i][j], y[i][j], fmt % circle_fuzzy[i][j] , ha='center', va='center')\n\ndiamond = np.minimum([1], np.maximum([0], (3 - abs(x)) + (3 - abs(y)))/3)\n\nax2.contourf(x,y,diamond, alpha=0.4, vmin=0, vmax=1)\nfor i in range(len(x)):\n for j in range(len(y)):\n fmt = '%d' if diamond[i][j] %1 ==0 else '%1.1f'\n ax2.text(x[i][j], y[i][j], fmt % diamond[i][j] , ha='center', va='center')\n\nfor ax in (ax1, ax2): ax.set_axis_off()",
"_____no_output_____"
]
],
[
[
"Segmentation masks - for now only do object (circle, diamonds), will add background later on. Truth value mask is zero/one outside/inside of object. Predicted mask has continuous values.",
"_____no_output_____"
]
],
[
[
"def fuzzy_circle(xy=(0,0), r=4, fuzz_factor=0.8):\n x0, y0 = xy\n max_fuzz = fuzz_factor * r**2\n circle = np.minimum([1], np.maximum([0], r**2 - (x-x0)**2 - (y-y0)**2)/max_fuzz)\n \n return circle",
"_____no_output_____"
],
[
"def fuzzy_diamond(xy=(0,0), r=2, fuzz_factor=1.5):\n x0, y0 = xy\n max_fuzz = fuzz_factor * r\n diamond = np.minimum([1], np.maximum([0], (r - abs(x-x0)) + (r-abs(y-y0)))/max_fuzz)\n \n return diamond",
"_____no_output_____"
],
[
"fine_grid = np.meshgrid(np.arange(-7,7.1,0.05), np.arange(-7,7.1,0.05))\nx,y = fine_grid\n\nzz = fuzzy_circle((2,0), r=3, fuzz_factor=0.1)\nplt.contour(x, y, zz, levels = [0.99], colors='b')\nzz = fuzzy_circle((1,1))\nplt.contourf(x, y, zz, alpha=0.5, levels=[0,0.25,0.5,0.75,0.99,1.25], cmap = 'gray_r')\nzz = fuzzy_diamond(xy=(-3.5,-3.5))\nplt.contourf(x, y, zz, alpha=0.5, levels=[0,0.25,0.5,0.75,0.99,1.25], cmap = 'gray_r')\nplt.gca().set_aspect(1)\nplt.gca().set_axis_off()",
"_____no_output_____"
]
],
[
[
"Compute IoU and Dice metrics for series of two overlapping circles",
"_____no_output_____"
]
],
[
[
"fig, axes = plt.subplots(1,3, figsize = (13,4))\nparams = [((0,0), 4), ((2,0), 4, ), ((2,0), 2) ]\ny_true = fuzzy_circle(fuzz_factor=0.01)\n\nprint('{:<10s} {:<10s} {:<10s}'.format('','explicit', 'np function'))\n\nfor i in range(len(axes)):\n axes[i].scatter(0,0, c='b')\n axes[i].add_artist(plt.Circle((0, 0), 4.05, lw=2, edgecolor='b', facecolor=(0,0,1,0.3), zorder=1))\n xy, r = params[i]\n axes[i].scatter(*xy, c='r')\n axes[i].add_artist(plt.Circle(xy, r, lw=2, ls='--', edgecolor='r', facecolor=(1,0,0,0.3), zorder=1))\n \n smooth = 0.001\n y_pred = fuzzy_circle(xy, r, 0.01)\n intersection = np.sum(np.logical_and(y_true, y_pred))\n union = np.sum(np.logical_or(y_pred, y_true))\n iou = np.mean((intersection)/union)\n dice = 2*np.mean(intersection/(np.sum(y_pred)+np.sum(y_true)))\n \n print('{:<10s} {:<10.2f} {:<10.2f}'.format('IoU', iou, metrics_np(np.reshape(y_true, (1,)+y_true.shape+(1,)), np.reshape(y_pred, (1,)+y_pred.shape+(1,)), metric_name = 'iou')))\n print('{:<10s} {:<10.2f} {:<10.2f}'.format('Dice', dice, metrics_np(np.reshape(y_true, (1,)+y_true.shape+(1,)), np.reshape(y_pred, (1,)+y_pred.shape+(1,)), metric_name = 'dice')))\n \n axes[i].text(0,5, f'IoU={iou:1.2f}\\nDice={dice:1.2f}', ha='center')\n \n axes[i].set_axis_off()\n axes[i].set(aspect=1, xlim=(-5,6.1), ylim=(-5,6))\nfig.savefig('metrics_iou_dice.png',bbox_inches='tight')",
" explicit np function\nIoU 1.00 1.00 \nDice 1.00 1.00 \nIoU 0.52 0.52 \nDice 0.69 0.69 \nIoU 0.25 0.25 \nDice 0.40 0.40 \n"
],
[
"x,y = fine_grid\nfig, axes = plt.subplots(1,4, figsize = (16,4))\nparams = [((0,0), 4, 0.8), ((0,0), 4, 1), ((2,0), 4, 0.8), ((2,0), 2, 0.8) ]\ny_true = fuzzy_circle(fuzz_factor=0.01)\n\nprint('{:<10s} {:<10s} {:<10s}'.format('','explicit', 'np function'))\n\nfor i in range(len(axes)):\n # axes[i].contour(x, y, y_true, levels = [0.99], colors='b')\n axes[i].add_artist(plt.Circle((0, 0), 4, lw=2, edgecolor='b', facecolor=(0,0,0,0), zorder=1))\n xy, r, fuzz_factor = params[i]\n y_pred = fuzzy_circle(xy, r, fuzz_factor)\n # axes[i].contourf(x, y, y_pred, alpha=0.5, levels=[0.01,0.5,0.99,1.25], cmap = 'gray_r')\n axes[i].pcolormesh(x, y, y_pred, alpha=0.3, shading='gouraud', cmap = 'gray_r')\n cs = axes[i].contour(x, y, y_pred, levels=[0.01,0.5,0.99,1.25], colors = 'k')\n axes[i].clabel(cs, fmt='%1.1f')\n \n intersection = np.sum(np.logical_and(y_true, y_pred))\n union = np.sum(np.logical_or(y_pred, y_true))\n iou = np.mean(intersection/union)\n \n intersection_soft = np.sum(np.abs(y_true * y_pred))\n union_soft = np.sum(np.abs(y_pred)) + np.sum(np.abs(y_true)) - intersection_soft\n iou_soft = np.mean(intersection_soft/union_soft)\n\n print('{:<10s} {:<10.2f} {:<10.2f}'.format('IoU',iou, metrics_np(np.reshape(y_true, (1,)+y_true.shape+(1,)), np.reshape(y_pred, (1,)+y_pred.shape+(1,)), metric_name='iou')))\n print('{:<10s} {:<10.2f} {:<10.2f}'.format('soft IoU',iou_soft, metrics_np(np.reshape(y_true, (1,)+y_true.shape+(1,)), np.reshape(y_pred, (1,)+y_pred.shape+(1,)),metric_name='iou', metric_type='soft')))\n \n axes[i].text(0,5, f'IoU={iou:1.2f}\\nsoft IoU={iou_soft:1.2f}', ha='center')\n \n axes[i].set_axis_off()\n axes[i].set(aspect=1)\nfig.savefig('metrics_iou_dice_soft.png',bbox_inches='tight')",
" explicit np function\nIoU 1.00 1.00 \nsoft IoU 0.60 0.60 \nIoU 1.00 1.00 \nsoft IoU 0.50 0.50 \nIoU 0.52 0.52 \nsoft IoU 0.42 0.42 \nIoU 0.25 0.25 \nsoft IoU 0.15 0.15 \n"
],
[
"y_true = fuzzy_circle(fuzz_factor=0.01)\ny_pred = fuzzy_circle((2,0), 4, 0.8)\n\nfig, axes = plt.subplots(1,3, figsize=(9,3))\nfor ax in axes:\n ax.set_axis_off(); ax.set(aspect=1)\n ax.add_artist(plt.Circle((0, 0), 4, lw=2, edgecolor='b', facecolor=(0,0,0,0), zorder=1))\n ax.text(-2,4,'True\\n mask', ha='center', va='bottom', color='b')\n ax.add_artist(plt.Circle((2, 0), 4, lw=2, edgecolor='r', facecolor=(0,0,0,0), zorder=1))\n ax.text(4,4,'Predicted\\n mask', ha='center', va='bottom', color='r')\n iax=list(axes).index(ax)\n if iax>0:\n axes[iax].annotate(['hard ','soft '][iax-1]+'intersection', (1,-2), xytext=(0,-6), ha='center', arrowprops={'arrowstyle': '->', 'color':'y'}, zorder=2)\n \naxes[0].pcolormesh(x,y, y_pred, cmap='gray_r')\naxes[1].pcolormesh(x,y, np.logical_and(y_true, y_pred), cmap='gray_r')\naxes[2].pcolormesh(x,y, y_true * y_pred, cmap='gray_r');\nfig.savefig('metrics_intersection_soft.png',bbox_inches='tight')",
"_____no_output_____"
]
],
[
[
"To test the non-naive mean_IoU, I need multiple classes, the masks of which overlap for only a small subset. I will arbitrarily take a circle and a diamond as examples of two classes, offset them a little and then find the IoU's",
"_____no_output_____"
]
],
[
[
"x,y = fine_grid\ntrue1 = fuzzy_circle(xy=(2,0), fuzz_factor=0.01)\npred1 = fuzzy_circle(xy=(3,0))\n# two instances of Diamond class: first has IoU=0.33 (half overlap), second one has IoU=0.24\ntrue2 = fuzzy_diamond(xy=(-4,-2),r=1,fuzz_factor=0.01) + fuzzy_diamond(xy=(-3.5,3),r=1,fuzz_factor=0.01)\npred2 = fuzzy_diamond(xy=(-5,-3),r=1) + fuzzy_diamond(xy=(-5,3),r=1)\nempty = np.zeros_like(true1)\n\nplt.contour(x,y,true1, colors='r')\nplt.contour(x,y,true2, colors='b')\n\nplt.pcolormesh(x,y,pred1, cmap=mpl.colors.ListedColormap([(0,0,0,0)]+list(map(plt.get_cmap('Oranges'), range(256)))[1:]))\nplt.pcolormesh(x,y,pred2, cmap=mpl.colors.ListedColormap([(0,0,0,0)]+list(map(plt.get_cmap('Purples'), range(256)))[1:]))\nplt.gca().set_axis_off()\nplt.gca().set(aspect=1)\n\ny_true = np.expand_dims(np.stack([true1, true2, empty, empty, (true1==0) & (true2==0).astype(int)], axis=-1), axis=0)\ny_pred = np.expand_dims(np.stack([pred1, pred2, empty, empty, (pred1==0) & (pred2==0).astype(int)], axis=-1), axis=0)\n\nprint('{:<60s} {:.3f}'.format('IoU of first class:', metrics_np(y_true[:,:,:,:1], y_pred[:,:,:,:1], metric_name='iou')))\nprint('{:<60s} {:.3f}'.format('IoU of second class:', metrics_np(y_true[:,:,:,1:2], y_pred[:,:,:,1:2], metric_name='iou')))\nprint('{:<60s} {:.3f}'.format('IoU of background:', metrics_np(y_true[:,:,:,-1:], y_pred[:,:,:,-1:], metric_name='iou')))\nprint('{:<60s} {}'.format('IoU of each class (explicit list):', metrics_np(y_true, y_pred, metric_name='iou', metric_type='naive', drop_last=False, mean_per_class=True)))\nprint('{:<60s} {:.3f}'.format('mean IoU of all classes (no background, naive mean):', metrics_np(y_true, y_pred, metric_name='iou', metric_type='naive')))\nprint('{:<60s} {:.3f}'.format('mean IoU of all classes (with background, naive mean):', metrics_np(y_true, y_pred, metric_name='iou', metric_type='naive', drop_last = False)))\nprint('{:<60s} {:.3f}'.format('mean IoU of all non-absent classes (dropping background):', metrics_np(y_true, y_pred, metric_name='iou')))\n\nplt.text(5,6,'Circle\\nIoU={:1.2f}'.format(metrics_np(y_true[:,:,:,:1], y_pred[:,:,:,:1], metric_name='iou')), color='r', ha='center', va='center')\nplt.text(-5,6,'Diamond\\nIoU={:1.2f}'.format(metrics_np(y_true[:,:,:,1:2], y_pred[:,:,:,1:2], metric_name='iou')), color='b', ha='center', va='center')\nplt.text(0,-5,'mean IoU={:1.2f}'.format(metrics_np(y_true, y_pred, metric_name='iou')), ha='center', va='bottom');\n\nplt.savefig('metrics_mean_iou_multiclass.png', bbox_inches='tight')",
"IoU of first class: 0.726\nIoU of second class: 0.286\nIoU of background: 0.775\nIoU of each class (explicit list): [0.72645972 0.28643223 1. 1. 0.7748001 ]\nmean IoU of all classes (no background, naive mean): 0.753\nmean IoU of all classes (with background, naive mean): 0.758\nmean IoU of all non-absent classes (dropping background): 0.506\n"
]
],
[
[
"So far I have used `batch_size=1`. Test the difference between naive and standard ways to take the mean, for multiple examples. Here I will take two images, the first with two classes as above and the second one with only the circle.",
"_____no_output_____"
]
],
[
[
"y_true = np.stack([np.stack([true1, true2, empty, empty, (true1==0) & (true2==0).astype(int)], axis=-1),\n np.stack([true1, empty, empty, empty, (true1==0)], axis=-1)])\ny_pred = np.stack([np.stack([pred1, pred2, empty, empty, (pred1==0) & (pred2==0).astype(int)], axis=-1),\n np.stack([pred1, empty, empty, empty, (pred1==0)], axis=-1)])\n\nprint('Naive per-class mean: {} -- Overall mean: {:1.2f}'.format(\n metrics_np(y_true, y_pred, metric_name='iou', metric_type='naive', mean_per_class=True), \n metrics_np(y_true, y_pred, metric_name='iou', metric_type='naive')))\nprint('Standard per-class mean: {} -- Overall mean: {:1.2f}'.format(\n metrics_np(y_true, y_pred, metric_name='iou', mean_per_class=True), \n metrics_np(y_true, y_pred, metric_name='iou')))\nprint('Standard per-class mean, with background', metrics_np(y_true, y_pred, metric_name='iou', mean_per_class=True, drop_last=False))\n# metrics_np(y_true, y_pred, metric_name='iou', mean_per_class=True),\\\n# metrics_np(y_true, y_pred, metric_name='iou'),\\\nprint('Soft per-class mean ', metrics_np(y_true, y_pred, metric_name='iou', metric_type='soft', mean_per_class=True))\n",
"Naive per-class mean: [0.72645972 0.64321612 1. 1. ] -- Overall mean: 0.84\nStandard per-class mean: [0.72659643 0.28714509 1. 1. ] -- Overall mean: 0.51\nStandard per-class mean, with background [0.72659643 0.28714509 1. 1. 0.83668973]\nSoft per-class mean [0.54782264 0.17182951 1. 1. ]\n"
]
],
[
[
"Test Keras version and verify it gives same result as Numpy",
"_____no_output_____"
]
],
[
[
"print('hard IoU {:1.6f} {:1.6f}'.format(metrics_np(y_true, y_pred, metric_name='iou'), \n K.eval(seg_metrics(y_true, y_pred, metric_name='iou'))))\nprint('soft IoU {:1.6f} {:1.6f}'.format(metrics_np(y_true, y_pred, metric_name='iou', metric_type='soft'), \n K.eval(seg_metrics(y_true, y_pred, metric_name='iou', metric_type='soft'))))\nprint('hard IoU, naive mean {:1.6f} {:1.6f}'.format(metrics_np(y_true, y_pred, metric_name='iou', metric_type='naive'), \n K.eval(seg_metrics(y_true, y_pred, metric_name='iou', metric_type='naive'))))\nprint('hard Dice {:1.6f} {:1.6f}'.format(metrics_np(y_true, y_pred, metric_name='dice'), \n K.eval(seg_metrics(y_true, y_pred, metric_name='dice'))))",
"hard IoU 0.506446 0.506446\nsoft IoU 0.359299 0.359298\nhard IoU, naive mean 0.842419 0.842419\nhard Dice 0.643436 0.643436\n"
]
],
[
[
"Print verbose info for metrics: look at number of pixels in intersection, union for each class and each input (`batch * classes` axes)",
"_____no_output_____"
]
],
[
[
"metrics_np(y_true, y_pred, metric_name='iou', verbose=True),\\\nmetrics_np(y_true, y_pred, metric_name='iou', metric_type='standard', mean_per_class=True),\\\nK.eval(seg_metrics(y_true, y_pred, metric_name='iou', verbose=True))\n",
"intersection (pred*true), intersection (pred&true), union (pred+true-inters), union (pred|true)\n[[16896 2850 0 0 46316]\n [16896 0 0 0 56266]] [[16896 2850 0 0 46316]\n [16896 0 0 0 56266]] [[23258 9950 0 0 59778]\n [23258 0 0 0 62628]] [[23258 9950 0 0 59778]\n [23258 0 0 0 62628]]\nintersection, union\n[[16896. 2850. 0. 0. 46316.]\n [16896. 0. 0. 0. 56266.]] [[23258. 9950. 0. 0. 59778.]\n [23258. 0. 0. 0. 62628.]]\n[[0.72645974 0.28643215 nan nan 0.7748001 ]\n [0.72645974 nan nan nan 0.89841604]]\nCounts of inputs with class present, metrics for non-absent classes\n[2. 1. 0. 0.] [0.72645974 0.28643224]\n"
]
],
[
[
"### Coarse-grained example\n\nImage with few pixels to explicitly check what is going on at the pixel level",
"_____no_output_____"
]
],
[
[
"x,y = np.meshgrid(np.arange(-7,7.1,1), np.arange(-7,7.1,1))\n\ntrue1 = fuzzy_circle(xy=(2,0), fuzz_factor=0.01)\npred1 = fuzzy_circle(xy=(3,0), fuzz_factor=1)\n# two instances of Diamond class: first has IoU=0.33 (half overlap), second one has IoU=0.24\ntrue2 = fuzzy_diamond(xy=(-4,-2),r=1,fuzz_factor=0.01) + fuzzy_diamond(xy=(-3,3),r=1,fuzz_factor=0.01)\npred2 = fuzzy_diamond(xy=(-5,-3),r=1) + fuzzy_diamond(xy=(-5,3),r=1)\nempty = np.zeros_like(true1)\n\n# build N*W*H*C ground truth and predicted masks\ny_true = np.stack([np.stack([true1, true2, empty, empty, (true1==0) & (true2==0).astype(int)], axis=-1),\n np.stack([true1, empty, empty, empty, (true1==0)], axis=-1)])\ny_pred = np.stack([np.stack([pred1, pred2, empty, empty, (pred1==0) & (pred2==0).astype(int)], axis=-1),\n np.stack([pred1, empty, empty, empty, (pred1==0)], axis=-1)])\n\n# plot predicted masks\nplt.pcolormesh(x,y,pred1, cmap=mpl.colors.ListedColormap([(0,0,0,0)]+list(map(plt.get_cmap('Oranges'), range(256)))[1:]))\nplt.pcolormesh(x,y,pred2, cmap=mpl.colors.ListedColormap([(0,0,0,0)]+list(map(plt.get_cmap('Purples'), range(256)))[1:]))\n\n# plot true masks\nplt.pcolormesh(x,y,true1, cmap=mpl.colors.ListedColormap([(0,0,0,0), (1,0,0,0.2)]))\nplt.pcolormesh(x,y,true2, cmap=mpl.colors.ListedColormap([(0,0,0,0), (0,0,1,0.2)]))\n\nfor i in range(len(x)):\n for j in range(len(y)):\n if pred1[i][j]!=0:\n fmt = '%d' if pred1[i][j] %1 ==0 else '%1.1f'\n plt.text(x[i][j]+0.5, y[i][j]+0.5, fmt % pred1[i][j] , ha='center', va='center')\n if pred2[i][j]!=0:\n fmt = '%d' if pred2[i][j] %1 ==0 else '%1.1f'\n plt.text(x[i][j]+0.5, y[i][j]+0.5, fmt % pred2[i][j] , ha='center', va='center')\n\nplt.text(5,6,'Circles\\n(I,U)=({:},{:})\\nIoU={:1.2f}'.format(np.logical_and(pred1, true1).sum(), np.logical_or(pred1, true1).sum(),\n metrics_np(y_true[:1,:,:,:1], y_pred[:1,:,:,:1], metric_name='iou')), color='r', ha='center', va='center')\nplt.text(-5.5,0.5,'Diamonds\\n(I,U)=({:},{:})\\nIoU={:1.2f}'.format(np.logical_and(pred2, true2).sum(), np.logical_or(pred2, true2).sum(),\n metrics_np(y_true[:1,:,:,1:2], y_pred[:1,:,:,1:2], metric_name='iou')), color='b', ha='center', va='center')\nplt.text(0,-5,'mean IoU={:1.2f}'.format(metrics_np(y_true[:1], y_pred[:1], metric_name='iou')), ha='center', va='bottom');\n\nplt.gca().set_axis_off()\n# plt.gca().set(aspect=1)\nplt.savefig('metrics_mean_iou_coarse_example.png', bbox_inches='tight')",
"_____no_output_____"
],
[
"metrics_np(y_true, y_pred, metric_name='iou',verbose=True),\\\nmetrics_np(y_true, y_pred, metric_name='iou', mean_per_class=True),\\\nK.eval(seg_metrics(y_true, y_pred, metric_name='iou', verbose=True))\n",
"intersection (pred*true), intersection (pred&true), union (pred+true-inters), union (pred|true)\n[[ 38 3 0 0 156]\n [ 38 0 0 0 173]] [[ 38 3 0 0 156]\n [ 38 0 0 0 173]] [[ 52 17 0 0 184]\n [ 52 0 0 0 187]] [[ 52 17 0 0 184]\n [ 52 0 0 0 187]]\nintersection, union\n[[ 38. 3. 0. 0. 156.]\n [ 38. 0. 0. 0. 173.]] [[ 52. 17. 0. 0. 184.]\n [ 52. 0. 0. 0. 187.]]\n[[0.7307692 0.1764706 nan nan 0.84782606]\n [0.7307692 nan nan nan 0.9251337 ]]\nCounts of inputs with class present, metrics for non-absent classes\n[2. 1. 0. 0.] [0.7307744 0.17651904]\n"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
]
|
ec63e7891163b527e677cf1c3e8b5d87dfc85d71 | 14,029 | ipynb | Jupyter Notebook | 02DataPreprocess/02Outlier.ipynb | aonekoda/Sklearn | 4266799c22f41dfd302f164b07fc4b7b110a2929 | [
"MIT"
]
| 2 | 2020-08-14T07:34:33.000Z | 2020-09-09T07:57:59.000Z | 02DataPreprocess/02Outlier.ipynb | aonekoda/Sklearn | 4266799c22f41dfd302f164b07fc4b7b110a2929 | [
"MIT"
]
| null | null | null | 02DataPreprocess/02Outlier.ipynb | aonekoda/Sklearn | 4266799c22f41dfd302f164b07fc4b7b110a2929 | [
"MIT"
]
| 2 | 2020-09-03T01:08:01.000Z | 2020-09-09T06:53:41.000Z | 25.051786 | 93 | 0.385416 | [
[
[
"# Outlier 처리하기\n\n* 통계적으로 outlier, 특이치는 다른 관측치와 크게 다른 데이터 포인트를 말한다.\n* 특이치는 통계 분석에 문제를 일으킬 수 있다.(평균과 표준편차에 영향을 줌)\n",
"_____no_output_____"
],
[
"### 이상치 확인하기\n* [-1.5 * IQR ~ 1.5*IQR] 의 구간에서 벗어나 있는 경우\n* 백분위 수에서 5th ~ 95th 범위에서 벗어나는 경우",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"# outlier를 포함한 임의의 데이터\nx = pd.Series([23,1,3,5,34,6,32,7,45,34,78])\nx.describe()",
"_____no_output_____"
]
],
[
[
"보통 통계적으로 [-1.5 * IQR ~ 1.5*IQR] 의 구간에서 벗어나 있는 경우를 outlier라고 한다.",
"_____no_output_____"
]
],
[
[
"# 이상치의 인덱스 값을 리턴하는 함수를 만든다. \n\ndef idx_of_outliers(x):\n q1, q3 = np.percentile(x, [25,75])\n IQR = q3 - q1\n lower_bound = q1 - [IQR * 1.5]\n upper_bound = q3 + [IQR * 1.5]\n return np.where((x > upper_bound) | (x < lower_bound))\n",
"_____no_output_____"
],
[
"idx_of_outliers(x.to_numpy())",
"_____no_output_____"
]
],
[
[
"백분위 수에서 5th ~ 95th 범위에서 벗어나는 경우를 outlier로 할 수도 있다.",
"_____no_output_____"
]
],
[
[
"print('5th percentile: ', x.quantile(q=0.05))\nprint('95th percentile: ', x.quantile(q=0.95))",
"5th percentile: 2.0\n95th percentile: 61.5\n"
],
[
"x[(x < x.quantile(q=0.05)) | (x > x.quantile(q=0.95))]",
"_____no_output_____"
]
],
[
[
"### outlier 처리하기\n* outliers의 값을 제거\n* 자연로그를 취해서 값을 감소시키는 방법 등으로 변환\n",
"_____no_output_____"
]
],
[
[
"houses = pd.DataFrame()\nhouses['Price'] = [534433, 392333, 293222, 4322032]\nhouses['Bedrooms'] = [2, 3.5, 2, 116]\nhouses['Square_Feets'] = [1500, 2500, 1500, 48000]\nhouses",
"_____no_output_____"
]
],
[
[
"outlier를 처리하는 가장 간단한 방법은 outlier를 삭제하는 것이다.",
"_____no_output_____"
]
],
[
[
"houses.describe()",
"_____no_output_____"
],
[
"q1 = houses['Bedrooms'].quantile(0.25) \nq3 = houses['Bedrooms'].quantile(0.75)\niqr = q3 - q1\n# Apply filter with respect to IQR\nfilter = (houses['Bedrooms'] >= q1 - 1.5*iqr) & (houses['Bedrooms'] <= q3 + 1.5*iqr)\nhouses.loc[filter] ",
"_____no_output_____"
]
],
[
[
"outlier의 영향이 줄어들도록 column을 변환한다.",
"_____no_output_____"
]
],
[
[
"# 로그변환\nhouses['Log_Square_Feets'] = [np.log(x) for x in houses['Square_Feets']]\nhouses",
"_____no_output_____"
]
],
[
[
"### 결론\n* outlier의 확인 및 처리 방법에는 정답이 없다. \n* 여러가지 방법을 고려하여 적절한 방법을 선택한다.\n",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
]
|
ec63f3dab95ba9dea111448e88ad256aaa95e29d | 37,192 | ipynb | Jupyter Notebook | 11_YudistirNet.ipynb | VICS-CORE/stats | 721d8ddc627a52a78c35ec5baa31d852e4226573 | [
"MIT"
]
| 6 | 2020-05-11T14:25:50.000Z | 2021-01-19T17:17:03.000Z | 11_YudistirNet.ipynb | VICS-CORE/stats | 721d8ddc627a52a78c35ec5baa31d852e4226573 | [
"MIT"
]
| 1 | 2021-03-30T09:55:44.000Z | 2021-03-30T09:55:44.000Z | 11_YudistirNet.ipynb | VICS-CORE/stats | 721d8ddc627a52a78c35ec5baa31d852e4226573 | [
"MIT"
]
| 2 | 2020-05-25T12:59:04.000Z | 2020-06-29T14:22:07.000Z | 35.692898 | 359 | 0.517638 | [
[
[
"### Training RNN using World's data\n\nThis is based on `COVID-19 growth prediction using multivariate\nlong short term memory` by `Novanto Yudistira`\n\nhttps://arxiv.org/pdf/2005.04809.pdf\n\nhttps://github.com/VICS-CORE/lstmcorona/blob/master/lstm.py\n\n- We've aligned all countries' inputs rather than taking an absolute timeline. We start when cumulative number of confirmed cases in the country has crossed 100.\n- We've normalised data by dividing by a population factor. That way the network can learn a general understanding of the pattern irrespective of the country's population.\n- Rather than using the entire timeline as an input as suggested by NYudistira, we're training a fixed window (e.g. 20 days) so that the model learns to predict the future by looking at present data. The problem with fixed window approach is that some countries have peaked, while others have not. Also few countries start early, and some start late.\n- The paper uses a multivariate network with confirmed, recovered and deceased data. However this'd increase computation time and hence we're restricting ourselves to a univariate model with confirmed cases as the only parameter.\n\n#### Other ideas\n- One idea is to train the current net with only the most populous countries' data, since their behaviour would be similar to India's.\n- Adding metrics like humidity, population density, lockdown intensity etc might be beneficial and should have some correlation with the growth in cases. But this'd need more computation power.\n- Another idea is to train a neuralnet to predict SIR like buckets.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport requests as rq\nimport datetime as dt\nimport torch\nimport json\n\ntnn = torch.nn\ntop = torch.optim\nfrom torch.utils import data as tdt\n\nfrom matplotlib.ticker import MultipleLocator\nfrom matplotlib.dates import DayLocator, AutoDateLocator, ConciseDateFormatter\n%matplotlib inline",
"_____no_output_____"
],
[
"CUDA=\"cuda:0\"\nCPU=\"cpu\"\nif torch.cuda.is_available():\n device = torch.device(CUDA)\n cd = torch.cuda.current_device()\n print(\"Num devices:\", torch.cuda.device_count())\n print(\"Current device:\", cd)\n print(\"Device name:\", torch.cuda.get_device_name(cd))\n print(\"Device props:\", torch.cuda.get_device_properties(cd))\n print(torch.cuda.memory_summary(cd))\nelse:\n device = torch.device(CPU)\nprint(device)",
"_____no_output_____"
],
[
"# define paths\nDATA_DIR = 'data'\nMODELS_DIR = 'models'",
"_____no_output_____"
]
],
[
[
"### Colab only",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"_____no_output_____"
],
[
"%cd 'drive/My Drive/CS/colab/'",
"_____no_output_____"
],
[
"!cat /proc/cpuinfo",
"_____no_output_____"
],
[
"!cat /proc/meminfo",
"_____no_output_____"
]
],
[
[
"### Read OWID data",
"_____no_output_____"
]
],
[
[
"!curl https://covid.ourworldindata.org/data/owid-covid-data.csv --output data/owid-covid-data.csv",
"_____no_output_____"
],
[
"!head -n1 data/owid-covid-data.csv",
"_____no_output_____"
],
[
"cols = ['location', 'date', 'total_cases', 'new_cases', 'total_deaths', 'new_deaths', 'population']\ndates = ['date']\ndf = pd.read_csv(DATA_DIR + \"/owid-covid-data.csv\", \n usecols=cols,\n parse_dates=dates)\ndf.sample()",
"_____no_output_____"
]
],
[
[
"### LSTM",
"_____no_output_____"
]
],
[
[
"class YudistirNet(tnn.Module):\n def __init__(self, ip_seq_len=1, op_seq_len=1, hidden_size=1, num_layers=1):\n super(YudistirNet, self).__init__()\n \n self.ip_seq_len = ip_seq_len\n self.op_seq_len = op_seq_len\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n \n self.lstm = tnn.LSTM(input_size=1, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True)\n self.linear = tnn.Linear(self.hidden_size * self.ip_seq_len, self.op_seq_len)\n self.sigmoid = tnn.Sigmoid()\n \n def forward(self, ip):\n lstm_out, _ = self.lstm(ip)\n linear_out = self.linear(lstm_out.reshape(-1, self.hidden_size * self.ip_seq_len))\n sigmoid_out = self.sigmoid(linear_out.view(-1, self.op_seq_len))\n return sigmoid_out\n \n def predict(self, ip):\n with torch.no_grad():\n preds = self.forward(ip)\n return preds",
"_____no_output_____"
]
],
[
[
"### Checkpoint methods",
"_____no_output_____"
]
],
[
[
"def save_checkpoint(epoch, model, optimizer, trn_losses, val_losses, min_val_loss, path=\"\"):\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'trn_losses': trn_losses,\n 'val_losses': val_losses,\n 'min_val_loss': min_val_loss\n }, path or MODELS_DIR + \"/latest.pt\")\n print(\"Checkpoint saved\")\n \ndef load_checkpoint(path=\"\", device=\"cpu\"):\n cp = torch.load(path or MODELS_DIR + \"/latest.pt\", map_location=device)\n print(\"Checkpoint loaded\")\n return cp['epoch'], cp['model_state_dict'], cp['optimizer_state_dict'], cp['trn_losses'], cp['val_losses'], cp.get('min_val_loss', np.Inf)",
"_____no_output_____"
]
],
[
[
"### Config",
"_____no_output_____"
]
],
[
[
"# config\nIP_SEQ_LEN = 40\nOP_SEQ_LEN = 20\n\nBATCH_SIZE = 1\nVAL_RATIO = 0.3\n\nHIDDEN_SIZE = 20\nNUM_LAYERS = 4\nLEARNING_RATE = 0.001\nNUM_EPOCHS = 3001\n\n# to continue training on another model, set resume to true\nRESUME = False\n\nmodel = YudistirNet(ip_seq_len=IP_SEQ_LEN, op_seq_len=OP_SEQ_LEN, hidden_size=HIDDEN_SIZE, num_layers=NUM_LAYERS)\nmodel = model.to(device)\n\nloss_fn = tnn.MSELoss()\noptimizer = top.Adam(model.parameters(), lr=LEARNING_RATE)",
"_____no_output_____"
],
[
"sum(p.numel() for p in model.parameters() if p.requires_grad)",
"_____no_output_____"
]
],
[
[
"### Prepare dataset",
"_____no_output_____"
]
],
[
[
"def gen_dataset():\n ip_trn = []\n op_trn = []\n\n countries = df['location'].unique()\n pop_countries = ['China', 'United States', 'Indonesia', 'Pakistan', 'Brazil', 'Bangladesh', 'Russia', 'Mexico']\n\n c = 0\n for country in countries:\n if country in ['World', 'International', 'India']: # Countries to be skipped\n continue\n country_df = df.loc[df.location == country]\n tot_cases_gt_100 = (country_df['total_cases'] >= 100)\n country_df = country_df.loc[tot_cases_gt_100]\n\n if len(country_df) >= IP_SEQ_LEN + OP_SEQ_LEN:\n c += 1\n pop = country_df['population'].iloc[0]\n print(c, country, len(country_df), pop)\n daily_cases = np.array(country_df['new_cases'].rolling(7, center=True, min_periods=1).mean() * 1000 / pop, dtype=np.float32)\n\n for i in range(len(country_df) - IP_SEQ_LEN - OP_SEQ_LEN + 1):\n ip_trn.append(daily_cases[i : i+IP_SEQ_LEN])\n op_trn.append(daily_cases[i+IP_SEQ_LEN : i+IP_SEQ_LEN+OP_SEQ_LEN])\n\n ip_trn = torch.from_numpy(np.array(ip_trn, dtype=np.float32))\n op_trn = torch.from_numpy(np.array(op_trn, dtype=np.float32))\n dataset = tdt.TensorDataset(ip_trn, op_trn)\n\n val_len = int(VAL_RATIO * len(dataset))\n trn_len = len(dataset) - val_len\n trn_set, val_set = tdt.random_split(dataset, (trn_len, val_len))\n return trn_set, val_set\n\ntry:\n ds = torch.load(DATA_DIR + '/ds.pt')\n trn_set, val_set = ds['trn'], ds['val']\n print(\"Loaded dataset from ds.pt\")\nexcept FileNotFoundError:\n trn_set, val_set = gen_dataset()\n torch.save({'trn': trn_set, 'val': val_set}, DATA_DIR + '/ds.pt')\n print(\"Saved dataset to ds.pt\")\nfinally:\n print(\"Training data:\", len(trn_set), \"Validation data:\", len(val_set))",
"_____no_output_____"
],
[
"trn_loader = tdt.DataLoader(trn_set, shuffle=True, batch_size=BATCH_SIZE)\nval_loader = tdt.DataLoader(val_set, shuffle=True, batch_size=BATCH_SIZE)",
"_____no_output_____"
]
],
[
[
"### Train",
"_____no_output_____"
]
],
[
[
"trn_loss_vals = []\nval_loss_vals = []\ne = 0\nmin_val_loss = np.Inf\n\nif RESUME:\n e, model_dict, optimizer_dict, trn_loss_vals, val_loss_vals, min_val_loss = load_checkpoint(device=device)\n e+=1\n model.load_state_dict(model_dict)\n optimizer.load_state_dict(optimizer_dict)\n\n# TRAIN\nprint(\"BEGIN: [\", dt.datetime.now(), \"]\")\nwhile e < NUM_EPOCHS:\n model.train()\n trn_losses = []\n for data in trn_loader:\n ip, op = data\n ip = ip.to(device)\n op = op.to(device)\n optimizer.zero_grad() # set grads to 0\n preds = model(ip.view(-1, IP_SEQ_LEN, 1)) # predict\n loss = loss_fn(preds, op.view(-1, OP_SEQ_LEN)) # calc loss\n loss.backward() # calc and assign grads\n optimizer.step() # update weights\n trn_losses.append(loss) # logging\n avg_trn_loss = torch.stack(trn_losses).mean().item() * 10000\n trn_loss_vals.append(avg_trn_loss)\n \n model.eval()\n with torch.no_grad():\n val_losses = []\n for data in val_loader:\n ip, op = data\n ip = ip.to(device)\n op = op.to(device)\n preds = model(ip.view(-1, IP_SEQ_LEN, 1))\n loss = loss_fn(preds, op.view(-1, OP_SEQ_LEN))\n val_losses.append(loss)\n avg_val_loss = torch.stack(val_losses).mean().item() * 10000\n val_loss_vals.append(avg_val_loss)\n \n if e%10==0:\n print(\"[\", dt.datetime.now(), \"] epoch:\", f\"{e:3}\", \"avg_val_loss:\", f\"{avg_val_loss: .5f}\", \"avg_trn_loss:\", f\"{avg_trn_loss: .5f}\")\n if e%100==0:\n save_checkpoint(e, model, optimizer, trn_loss_vals, val_loss_vals, min_val_loss, MODELS_DIR + \"/latest-e\" + str(e) + \".pt\")\n if avg_val_loss <= min_val_loss:\n min_val_loss = avg_val_loss\n save_checkpoint(e, model, optimizer, trn_loss_vals, val_loss_vals, min_val_loss, MODELS_DIR + \"/best-e\" + str(e) + \".pt\")\n e+=1\nprint(\"END: [\", dt.datetime.now(), \"]\")",
"_____no_output_____"
]
],
[
[
"### Load saved model for evaluation",
"_____no_output_____"
]
],
[
[
"# model_path = MODELS_DIR + \"/IP20_OP10_H10_L4_E2001_LR001.pt\"\nmodel_path = \"/home/mayank/Downloads/ds4020-e17xx.pt\"#ds4020-0612-e50x.pt\"\ne, md, _, trn_loss_vals, val_loss_vals, _ = load_checkpoint(model_path, device=device)\nprint(e)\nmodel.load_state_dict(md)\nmodel.eval()",
"_____no_output_____"
]
],
[
[
"### Plot losses",
"_____no_output_____"
]
],
[
[
"df_loss = pd.DataFrame({\n 'trn_loss': trn_loss_vals,\n 'val_loss': val_loss_vals\n})\ndf_loss['trn_loss'] = df_loss['trn_loss'].rolling(10).mean()\ndf_loss['val_loss'] = df_loss['val_loss'].rolling(10).mean()\n_ = df_loss.plot(\n y=['trn_loss', 'val_loss'],\n title='Loss per epoch',\n subplots=True,\n figsize=(5,6),\n sharex=False,\n logy=True\n)",
"_____no_output_____"
]
],
[
[
"### Evalute fit",
"_____no_output_____"
]
],
[
[
"c = \"India\"\npop_fct = df.loc[df.location==c, 'population'].iloc[0] / 1000\n\nall_preds = []\npred_vals = []\nout_vals = []\n\ntest_data = np.array(df.loc[(df.location==c) & (df.total_cases>=100), 'new_cases'].rolling(7, center=True, min_periods=1).mean() / pop_fct, dtype=np.float32)\n\nfor i in range(len(test_data) - IP_SEQ_LEN - OP_SEQ_LEN + 1):\n ip = torch.tensor(test_data[i : i+IP_SEQ_LEN])\n op = torch.tensor(test_data[i+IP_SEQ_LEN : i+IP_SEQ_LEN+OP_SEQ_LEN])\n ip = ip.to(device)\n op = op.to(device)\n\n pred = model.predict(ip.view(1, IP_SEQ_LEN, 1)) \n if i==0: # prepend first input\n out_vals.extend(ip.view(IP_SEQ_LEN).cpu().numpy() * pop_fct)\n pred_vals.extend([np.NaN] * IP_SEQ_LEN)\n all_preds.append(pred.view(OP_SEQ_LEN).cpu().numpy() * pop_fct)\n pred_vals.append(pred.view(OP_SEQ_LEN).cpu().numpy()[0] * pop_fct)\n out_vals.append(op.view(OP_SEQ_LEN).cpu().numpy()[0] * pop_fct)\n\n# last N-1 values\nout_vals.extend(op.view(OP_SEQ_LEN).cpu().numpy()[1:] * pop_fct)\npred_vals.extend(([np.NaN] * OP_SEQ_LEN)[1:]) # pad with NaN\n\ncmp_df = pd.DataFrame({\n 'actual': out_vals,\n 'predicted0': pred_vals\n})\n\n# set date\nstart_date = df.loc[(df.location==c) & (df.total_cases>=100)]['date'].iloc[0]\nend_date = start_date + dt.timedelta(days=cmp_df.index[-1])\ncmp_df['Date'] = pd.Series([start_date + dt.timedelta(days=i) for i in range(len(cmp_df))])\n\n# plot noodles\nax=None\ni=IP_SEQ_LEN\nmape=[]\nfor pred in all_preds:\n cmp_df['predicted_cases'] = np.NaN\n cmp_df.loc[i:i+OP_SEQ_LEN-1, 'predicted_cases'] = pred\n ax = cmp_df.plot(x='Date', y='predicted_cases', ax=ax, legend=False)\n ape = np.array(100 * ((cmp_df['actual'] - cmp_df['predicted_cases']).abs() / cmp_df['actual']))\n# mape.append(ape.mean())\n mape.append(ape[~np.isnan(ape)])\n i+=1\n\ntotal = np.zeros(OP_SEQ_LEN)\nfor m in mape:\n total += m\nelwise_mape = total / len(mape)\nprint(\"Day wise accuracy:\", 100 - elwise_mape)\nacc = f\"{100 - sum(elwise_mape)/len(elwise_mape):0.2f}%\"\n# acc = f\"{100 - sum(mape)/len(mape):0.2f}%\"\n\n# plot primary lines\nax = cmp_df.plot(\n x='Date',\n y=['actual', 'predicted0'],\n figsize=(20,8),\n lw=5,\n title=c + ' | Daily predictions | ' + acc,\n ax=ax\n)\nmn_l = DayLocator()\nax.xaxis.set_minor_locator(mn_l)\nmj_l = AutoDateLocator()\nmj_f = ConciseDateFormatter(mj_l, show_offset=False)\nax.xaxis.set_major_formatter(mj_f)",
"_____no_output_____"
]
],
[
[
"### Test (predict) using OWID data",
"_____no_output_____"
]
],
[
[
"c = \"India\"\nn_days_prediction = 200\n\npop_fct = df.loc[df.location==c, 'population'].iloc[0] / 1000\ntest_data = np.array(df.loc[(df.location==c) & (df.total_cases>=100), 'new_cases'].rolling(7, center=True, min_periods=1).mean() / pop_fct, dtype=np.float32)\n\nin_data = test_data[-IP_SEQ_LEN:]\nout_data = np.array([], dtype=np.float32)\nfor i in range(int(n_days_prediction / OP_SEQ_LEN)):\n ip = torch.tensor(\n in_data,\n dtype=torch.float32\n )\n ip = ip.to(device)\n pred = model.predict(ip.view(1, IP_SEQ_LEN, 1))\n in_data = np.append(in_data[-IP_SEQ_LEN+OP_SEQ_LEN:], pred.cpu().numpy())\n out_data = np.append(out_data, pred.cpu().numpy())\n\norig_df = pd.DataFrame({\n 'actual': test_data * pop_fct\n})\nfut_df = pd.DataFrame({\n 'predicted': out_data * pop_fct\n})\n# print(fut_df['predicted'].astype('int').to_csv(sep='|', index=False))\norig_df = orig_df.append(fut_df, ignore_index=True, sort=False)\norig_df['total'] = (orig_df['actual'].fillna(0) + orig_df['predicted'].fillna(0)).cumsum()\n\nstart_date = df.loc[(df.location==c) & (df.total_cases>=100)]['date'].iloc[0]\norig_df['Date'] = pd.Series([start_date + dt.timedelta(days=i) for i in range(len(orig_df))])\nax = orig_df.plot(\n x='Date',\n y=['actual', 'predicted'],\n title=c + ' daily cases',\n figsize=(10,6),\n grid=True\n)\nmn_l = DayLocator()\nax.xaxis.set_minor_locator(mn_l)\nmj_l = AutoDateLocator()\nmj_f = ConciseDateFormatter(mj_l, show_offset=False)\nax.xaxis.set_major_formatter(mj_f)\n# orig_df['total'] = orig_df['total'].astype('int')\n# orig_df['predicted'] = orig_df['predicted'].fillna(0).astype('int')\n# print(orig_df.tail(n_days_prediction))\n\n# arrow\n# peakx = 172\n# peak = orig_df.iloc[peakx]\n# peak_desc = peak['Date'].strftime(\"%d-%b\") + \"\\n\" + str(int(peak['predicted']))\n# _ = ax.annotate(\n# peak_desc, \n# xy=(peak['Date'] - dt.timedelta(days=1), peak['predicted']),\n# xytext=(peak['Date'] - dt.timedelta(days=45), peak['predicted'] * .9),\n# arrowprops={},\n# bbox={'facecolor':'white'}\n# )\n\n# _ = ax.axvline(x=peak['Date'], linewidth=1, color='r')",
"_____no_output_____"
]
],
[
[
"### Statewise prediction",
"_____no_output_____"
]
],
[
[
"r=rq.get('https://api.covid19india.org/v3/min/timeseries.min.json')\nts = r.json()\n\ndata = []\nfor state in ts:\n for date in ts[state]:\n data.append((state, date, ts[state][date]['total'].get('confirmed', 0)))\n\nstates_df = pd.DataFrame(data, columns=['state', 'date', 'total'])\nstates_df['date'] = pd.to_datetime(states_df['date'])\nfirst_case_date = states_df['date'].min()",
"_____no_output_____"
],
[
"# http://www.populationu.com/india-population\nSTT_INFO = {\n 'AN' : {\"name\": \"Andaman & Nicobar Islands\", \"popn\": 450000},\n 'AP' : {\"name\": \"Andhra Pradesh\", \"popn\": 54000000},\n 'AR' : {\"name\": \"Arunachal Pradesh\", \"popn\": 30000000},\n 'AS' : {\"name\": \"Asaam\", \"popn\": 35000000},\n 'BR' : {\"name\": \"Bihar\", \"popn\": 123000000},\n 'CH' : {\"name\": \"Chandigarh\", \"popn\": 1200000},\n 'CT' : {\"name\": \"Chhattisgarh\", \"popn\": 29000000},\n 'DL' : {\"name\": \"Delhi\", \"popn\": 19500000},\n 'DN' : {\"name\": \"Dadra & Nagar Haveli and Daman & Diu\", \"popn\": 700000},\n 'GA' : {\"name\": \"Goa\", \"popn\": 1580000},\n 'GJ' : {\"name\": \"Gujarat\", \"popn\": 65000000},\n 'HP' : {\"name\": \"Himachal Pradesh\", \"popn\": 7400000},\n 'HR' : {\"name\": \"Haryana\", \"popn\": 28000000},\n 'JH' : {\"name\": \"Jharkhand\", \"popn\": 38000000},\n 'JK' : {\"name\": \"Jammu & Kashmir\", \"popn\": 13600000},\n 'KA' : {\"name\": \"Karnataka\", \"popn\": 67000000},\n 'KL' : {\"name\": \"Kerala\", \"popn\": 36000000},\n 'LA' : {\"name\": \"Ladakh\", \"popn\": 325000},\n 'MH' : {\"name\": \"Maharashtra\", \"popn\": 122000000},\n 'ML' : {\"name\": \"Meghalaya\", \"popn\": 3400000},\n 'MN' : {\"name\": \"Manipur\", \"popn\": 3000000},\n 'MP' : {\"name\": \"Madhya Pradesh\", \"popn\": 84000000},\n 'MZ' : {\"name\": \"Mizoram\", \"popn\": 1200000},\n 'NL' : {\"name\": \"Nagaland\", \"popn\": 2200000},\n 'OR' : {\"name\": \"Odisha\", \"popn\": 46000000},\n 'PB' : {\"name\": \"Punjab\", \"popn\": 30000000},\n 'PY' : {\"name\": \"Puducherry\", \"popn\": 1500000},\n 'RJ' : {\"name\": \"Rajasthan\", \"popn\": 80000000},\n 'TG' : {\"name\": \"Telangana\", \"popn\": 39000000},\n 'TN' : {\"name\": \"Tamil Nadu\", \"popn\": 77000000},\n 'TR' : {\"name\": \"Tripura\", \"popn\": 4100000},\n 'UP' : {\"name\": \"Uttar Pradesh\", \"popn\": 235000000},\n 'UT' : {\"name\": \"Uttarakhand\", \"popn\": 11000000},\n 'WB' : {\"name\": \"West Bengal\", \"popn\": 98000000},\n# 'SK' : {\"name\": \"Sikkim\", \"popn\": 681000},\n# 'UN' : {\"name\": \"Unassigned\", \"popn\": 40000000}, #avg pop\n# 'LD' : {\"name\": \"Lakshadweep\", \"popn\": 75000}\n}\n\n# uncomment for India\n# STT_INFO = {\n# 'TT' : {\"name\": \"India\", \"popn\": 1387155000}\n# }",
"_____no_output_____"
]
],
[
[
"#### Dummy state data: fruit country",
"_____no_output_____"
]
],
[
[
"# dummy data for testing\n# SET 1 - 10 states\n# STT_INFO = {\n# 'A': {\"name\": \"Apple\", \"popn\": 10000000},\n# 'B': {\"name\": \"Berry\", \"popn\": 10000000},\n# 'C': {\"name\": \"Cherry\", \"popn\": 10000000},\n# 'D': {\"name\": \"Dates\", \"popn\": 10000000},\n# 'E': {\"name\": \"Elderberry\", \"popn\": 10000000},\n# 'F': {\"name\": \"Fig\", \"popn\": 10000000},\n# 'G': {\"name\": \"Grape\", \"popn\": 10000000},\n# 'H': {\"name\": \"Honeysuckle\", \"popn\": 10000000},\n# 'I': {\"name\": \"Icaco\", \"popn\": 10000000},\n# 'J': {\"name\": \"Jujube\", \"popn\": 10000000},\n# }\n# total = 100\n# SET 2 - 1 agg state\nSTT_INFO = {\n 'Z': {\"name\": \"FruitCountry1000x\", \"popn\": 10000000},\n}\ntotal = 1000\n\n\nr = {\n 'state': [],\n 'date': [],\n 'total': []\n}\n\nstart_date = dt.datetime(day=1, month=3, year=2020)\nend_date = dt.datetime.now()\nwhile start_date <= end_date:\n for s in STT_INFO:\n r['state'].append(s)\n r['date'].append(start_date)\n r['total'].append(total)\n total *= 1.03\n start_date += dt.timedelta(days=1)\nstates_df = pd.DataFrame(r)\nstates_df['date'] = pd.to_datetime(states_df['date'])\nstates_df.tail()",
"_____no_output_____"
]
],
[
[
"#### Predict",
"_____no_output_____"
]
],
[
[
"def expand(df):\n '''Fill missing dates in an irregular timeline'''\n min_date = df['date'].min()\n max_date = df['date'].max()\n idx = pd.date_range(min_date, max_date)\n \n df.index = pd.DatetimeIndex(df.date)\n df = df.drop(columns=['date'])\n return df.reindex(idx, method='pad').reset_index().rename(columns={'index':'date'})\n\ndef prefill(df, min_date):\n '''Fill zeros from first_case_date to df.date.min()'''\n assert(len(df.state.unique()) == 1)\n s = df.state.unique().item()\n min_date = min_date\n max_date = df['date'].max()\n idx = pd.date_range(min_date, max_date)\n \n df.index = pd.DatetimeIndex(df.date)\n df = df.drop(columns=['date'])\n return df.reindex(idx).reset_index().rename(columns={'index':'date'}).fillna({'state':s, 'total':0})",
"_____no_output_____"
],
[
"prediction_offset = 1 # how many days of data to skip\nn_days_prediction = 200 # number of days for prediction\nn_days_data = len(expand(states_df.loc[states_df['state']=='TT']))\nassert(n_days_prediction%OP_SEQ_LEN == 0)\n\nagg_days = n_days_data - prediction_offset + n_days_prediction # number of days for plotting agg curve i.e. prediction + actual data \nstates_agg = np.zeros(agg_days)\n\nax = None\napi = {}\nfor state in STT_INFO:\n pop_fct = STT_INFO[state][\"popn\"] / 1000\n \n state_df = states_df.loc[states_df['state']==state][:-prediction_offset] # skip todays data. covid19 returns incomplete.\n state_df = prefill(expand(state_df), first_case_date)\n state_df['daily'] = state_df['total'] - state_df['total'].shift(1).fillna(0)\n test_data = np.array(state_df['daily'].rolling(7, center=True, min_periods=1).mean() / pop_fct, dtype=np.float32)\n \n in_data = test_data[-IP_SEQ_LEN:]\n out_data = np.array([], dtype=np.float32)\n for i in range(int(n_days_prediction / OP_SEQ_LEN)):\n ip = torch.tensor(\n in_data,\n dtype=torch.float32\n ).to(device)\n try:\n pred = model.predict(ip.view(-1, IP_SEQ_LEN, 1))\n except Exception as e:\n print(state, e)\n in_data = np.append(in_data[-IP_SEQ_LEN+OP_SEQ_LEN:], pred.cpu().numpy())\n out_data = np.append(out_data, pred.cpu().numpy())\n \n sn = STT_INFO[state]['name']\n orig_df = pd.DataFrame({\n 'actual': np.array(test_data * pop_fct, dtype=np.int)\n })\n fut_df = pd.DataFrame({\n 'predicted': np.array(out_data * pop_fct, dtype=np.int)\n })\n # print(fut_df.to_csv(sep='|'))\n orig_df = orig_df.append(fut_df, ignore_index=True, sort=False)\n orig_df[sn] = orig_df['actual'].fillna(0) + orig_df['predicted'].fillna(0)\n orig_df['total'] = orig_df[sn].cumsum()\n states_agg += np.array(orig_df[sn][-agg_days:].fillna(0))\n\n # generate date col for orig_df from state_df\n start_date = state_df['date'].iloc[0]\n orig_df['Date'] = pd.to_datetime([(start_date + dt.timedelta(days=i)).strftime(\"%Y-%m-%d\") for i in range(len(orig_df))])\n# if orig_df[sn].max() < 10000: # or orig_df[sn].max() < 5000:\n# continue\n \n # print state, peak date, peak daily cases, cumulative since beginning\n peak = orig_df.loc[orig_df[sn].idxmax()]\n print(sn, \"|\", peak['Date'].strftime(\"%b %d\"), \"|\", int(peak[sn]), \"|\", int(orig_df['total'].iloc[-1]))\n \n # export data for API\n orig_df['deceased_daily'] = orig_df[sn] * 0.028\n orig_df['recovered_daily'] = orig_df[sn].shift(14, fill_value=0) - orig_df['deceased_daily'].shift(7, fill_value=0)\n orig_df['active_daily'] = orig_df[sn] - orig_df['recovered_daily'] - orig_df['deceased_daily']\n \n api[state] = {}\n for idx, row in orig_df[-agg_days:].iterrows():\n row_date = row['Date'].strftime(\"%Y-%m-%d\")\n api[state][row_date] = {\n \"delta\": {\n \"confirmed\": int(row[sn]),\n \"deceased\": int(row['deceased_daily']),\n \"recovered\": int(row['recovered_daily']),\n \"active\": int(row['active_daily'])\n }\n }\n \n # plot state chart\n ax = orig_df.plot(\n x='Date',\n y=[sn],\n title='Daily Cases',\n figsize=(15,10),\n grid=True,\n ax=ax,\n lw=3\n )\n mn_l = DayLocator()\n ax.xaxis.set_minor_locator(mn_l)\n mj_l = AutoDateLocator()\n mj_f = ConciseDateFormatter(mj_l, show_offset=False)\n ax.xaxis.set_major_formatter(mj_f)\n\n# plot aggregate chart\ncum_df = pd.DataFrame({\n 'states_agg': states_agg \n})\nlast_date = orig_df['Date'].iloc[-1].to_pydatetime()\nstart_date = last_date - dt.timedelta(days=agg_days)\ncum_df['Date'] = pd.to_datetime([(start_date + dt.timedelta(days=i)).strftime(\"%Y-%m-%d\") for i in range(len(cum_df))])\nax = cum_df.plot(\n x='Date',\n y=['states_agg'],\n title='Aggregate daily cases',\n figsize=(15,10),\n grid=True,\n lw=3\n)\nmn_l = DayLocator()\nax.xaxis.set_minor_locator(mn_l)\nmj_l = AutoDateLocator()\nmj_f = ConciseDateFormatter(mj_l, show_offset=False)\nax.xaxis.set_major_formatter(mj_f)\n\n# plot peak in agg\n# peakx = 171\n# peak = cum_df.iloc[peakx]\n# peak_desc = peak['Date'].strftime(\"%d-%b\") + \"\\n\" + str(int(peak['states_agg']))\n# _ = ax.annotate(\n# peak_desc, \n# xy=(peak['Date'] + dt.timedelta(days=1), peak['states_agg']),\n# xytext=(peak['Date'] + dt.timedelta(days=45), peak['states_agg'] * .9),\n# arrowprops={},\n# bbox={'facecolor':'white'}\n# )\n# _ = ax.axvline(x=peak['Date'], linewidth=1, color='r')",
"_____no_output_____"
]
],
[
[
"#### Export JSON for API",
"_____no_output_____"
]
],
[
[
"# aggregate predictions\napi['TT'] = {}\nfor state in api:\n if state == 'TT':\n continue\n for date in api[state]:\n api['TT'][date] = api['TT'].get(date, {'delta':{}, 'total':{}})\n for k in ['delta']: #'total'\n api['TT'][date][k]['confirmed'] = api['TT'][date][k].get('confirmed', 0) + api[state][date][k]['confirmed']\n api['TT'][date][k]['deceased'] = api['TT'][date][k].get('deceased', 0) + api[state][date][k]['deceased']\n api['TT'][date][k]['recovered'] = api['TT'][date][k].get('recovered', 0) + api[state][date][k]['recovered']\n api['TT'][date][k]['active'] = api['TT'][date][k].get('active', 0) + api[state][date][k]['active']\n\n# export\nwith open(\"predictions.json\", \"w\") as f:\n f.write(json.dumps(api, sort_keys=True))",
"_____no_output_____"
]
],
[
[
"#### Export data for video player",
"_____no_output_____"
]
],
[
[
"# aggregate predictions\napi['TT'] = {}\nfor state in api:\n if state == 'TT':\n continue\n for date in api[state]:\n api['TT'][date] = api['TT'].get(date, {})\n api['TT'][date]['c'] = api['TT'][date].get('c', 0) + api[state][date]['delta']['confirmed']\n api['TT'][date]['d'] = api['TT'][date].get('d', 0) + api[state][date]['delta']['deceased']\n api['TT'][date]['r'] = api['TT'][date].get('r', 0) + api[state][date]['delta']['recovered']\n api['TT'][date]['a'] = api['TT'][date].get('a', 0) + api[state][date]['delta']['active']\n\n# cumulative\n# t = {'c':0, 'd':0, 'r':0, 'a':0}\n# for date in sorted(api['TT'].keys()):\n# for k in ['c', 'd', 'r', 'a']:\n# api['TT'][date][k] += t[k] # add cum to today\n# t[k] = api['TT'][date][k] # udpate cum\n\n# read previous and export\nk = (states_df.date.max().to_pydatetime() - dt.timedelta(days=prediction_offset)).strftime(\"%Y-%m-%d\")\ntry:\n with open(\"vp.json\", \"r\") as f:\n out = json.loads(f.read())\nexcept Exception as e:\n out = {}\n\nwith open(\"vp.json\", \"w\") as f:\n out[k] = {'TT': api['TT']}\n f.write(json.dumps(out, sort_keys=True))",
"_____no_output_____"
]
],
[
[
"#### CSV export video player ouput",
"_____no_output_____"
]
],
[
[
"df_csv = pd.DataFrame(out[k]['TT'])\ndf_csv = df_csv.transpose()\ndf_csv['c'].to_csv('vp_' + k + '.csv')",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec63fb7bd816c815bbea90cc4bd32cf7390a9d4d | 83,825 | ipynb | Jupyter Notebook | chapters/chapter7/chapter7-GCP-autosklearn(tpsaRegression).ipynb | arifmudi/Machine-Learning-in-Biotechnology-using-Python | 9f1cd6cc2e32c85fe8214329456de0f5a3d37e50 | [
"MIT"
]
| 22 | 2021-12-13T02:07:55.000Z | 2022-03-31T15:00:47.000Z | chapters/chapter7/chapter7-GCP-autosklearn(tpsaRegression).ipynb | alkhalifas/Machine-Learning-in-Biotechnology-and-Life-Sciences | 9f1cd6cc2e32c85fe8214329456de0f5a3d37e50 | [
"MIT"
]
| null | null | null | chapters/chapter7/chapter7-GCP-autosklearn(tpsaRegression).ipynb | alkhalifas/Machine-Learning-in-Biotechnology-and-Life-Sciences | 9f1cd6cc2e32c85fe8214329456de0f5a3d37e50 | [
"MIT"
]
| 10 | 2021-12-20T20:12:28.000Z | 2022-03-10T18:41:58.000Z | 107.330346 | 43,608 | 0.808398 | [
[
[
"import pandas as pd\nimport numpy as np\nfrom google.cloud import bigquery\nimport missingno as msno\nfrom sklearn.metrics import classification_report\nimport ast",
"_____no_output_____"
],
[
"import autosklearn.classification",
"_____no_output_____"
],
[
"client = bigquery.Client(location=\"US\")\nprint(\"Client creating using default project: {}\".format(client.project))",
"Client creating using default project: biotech-project-321515\n"
],
[
"query = \"\"\"\n SELECT classification, count(residueCount) AS classCount\n FROM `biotech-project-321515.protein_structure_sequence.dataset_pdb_no_dups`\n GROUP BY classification\n\"\"\"\nquery_job = client.query(\n query,\n location=\"US\",\n)\n\ndf = query_job.to_dataframe()\ndf.head(3)",
"_____no_output_____"
],
[
"df.shape",
"_____no_output_____"
],
[
"query = \"\"\"\n SELECT DISTINCT\n dups.*\n FROM (\n SELECT classification, count(residueCount) AS classCount\n FROM `biotech-project-321515.protein_structure_sequence.dataset_pdb_no_dups`\n GROUP BY classification\n ) AS sub\n INNER JOIN `biotech-project-321515.protein_structure_sequence.dataset_pdb_no_dups` AS dups\n ON sub.classification = dups.classification\n WHERE sub.classCount > 13000\n\"\"\"\nquery_job = client.query(\n query,\n location=\"US\",\n)\n\ndf2 = query_job.to_dataframe()\ndf2.head(3)",
"_____no_output_____"
],
[
"df2.shape",
"_____no_output_____"
],
[
"df2.classification.value_counts()",
"_____no_output_____"
],
[
"df2 = df2.drop_duplicates([\"structureId\"])\ndf2.shape",
"_____no_output_____"
],
[
"df2 = df2[[\"classification\", \"residueCount\", \"resolution\", \"structureMolecularWeight\", \"crystallizationTempK\", \"densityMatthews\", \"densityPercentSol\", \"phValue\"]]",
"_____no_output_____"
],
[
"df2 = df2.dropna()\ndf2.shape",
"_____no_output_____"
],
[
"msno.matrix(df2)",
"_____no_output_____"
],
[
"import pandas_gbq\npandas_gbq.to_gbq(df2, 'protein_structure_sequence.dataset_pdb_no_dups_cleaned_twocats', project_id ='biotech-project-321515', if_exists='replace')",
"1it [00:05, 5.75s/it]\n"
],
[
"import autosklearn.classification",
"_____no_output_____"
],
[
"X = df2.drop(columns=[\"classification\"])\ny = df2.classification.values.ravel()",
"_____no_output_____"
],
[
"from sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nX_scaled = scaler.fit_transform(X)",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.25)",
"_____no_output_____"
],
[
"automl = autosklearn.classification.AutoSklearnClassifier(\n time_left_for_this_task=120,\n per_run_time_limit=30,\n tmp_folder='/tmp/autosklearn_protein_tmp5',\n)\nautoml.fit(X_train, y_train, dataset_name='dataset_pdb_no_dups')",
"_____no_output_____"
],
[
"print(automl.leaderboard())",
" rank ensemble_weight type cost duration\nmodel_id \n2 1 0.32 random_forest 0.261821 16.188326\n3 2 0.08 extra_trees 0.275188 15.251657\n10 3 0.22 gradient_boosting 0.288053 9.499943\n9 4 0.16 random_forest 0.294737 13.219077\n6 5 0.08 mlp 0.397995 3.221838\n8 6 0.06 libsvm_svc 0.420217 29.656829\n5 7 0.08 mlp 0.421554 9.592515\n"
],
[
"automl.get_models_with_weights()[0]",
"_____no_output_____"
],
[
"print(automl.show_models())",
"[(0.320000, SimpleClassificationPipeline({'balancing:strategy': 'none', 'classifier:__choice__': 'random_forest', 'data_preprocessing:categorical_transformer:categorical_encoding:__choice__': 'one_hot_encoding', 'data_preprocessing:categorical_transformer:category_coalescence:__choice__': 'minority_coalescer', 'data_preprocessing:numerical_transformer:imputation:strategy': 'mean', 'data_preprocessing:numerical_transformer:rescaling:__choice__': 'standardize', 'feature_preprocessor:__choice__': 'no_preprocessing', 'classifier:random_forest:bootstrap': 'True', 'classifier:random_forest:criterion': 'gini', 'classifier:random_forest:max_depth': 'None', 'classifier:random_forest:max_features': 0.5, 'classifier:random_forest:max_leaf_nodes': 'None', 'classifier:random_forest:min_impurity_decrease': 0.0, 'classifier:random_forest:min_samples_leaf': 1, 'classifier:random_forest:min_samples_split': 2, 'classifier:random_forest:min_weight_fraction_leaf': 0.0, 'data_preprocessing:categorical_transformer:category_coalescence:minority_coalescer:minimum_fraction': 0.01},\ndataset_properties={\n 'task': 1,\n 'sparse': False,\n 'multilabel': False,\n 'multiclass': False,\n 'target_type': 'classification',\n 'signed': False})),\n(0.220000, SimpleClassificationPipeline({'balancing:strategy': 'none', 'classifier:__choice__': 'gradient_boosting', 'data_preprocessing:categorical_transformer:categorical_encoding:__choice__': 'one_hot_encoding', 'data_preprocessing:categorical_transformer:category_coalescence:__choice__': 'minority_coalescer', 'data_preprocessing:numerical_transformer:imputation:strategy': 'mean', 'data_preprocessing:numerical_transformer:rescaling:__choice__': 'none', 'feature_preprocessor:__choice__': 'polynomial', 'classifier:gradient_boosting:early_stop': 'off', 'classifier:gradient_boosting:l2_regularization': 1e-10, 'classifier:gradient_boosting:learning_rate': 0.06339262663805803, 'classifier:gradient_boosting:loss': 'auto', 'classifier:gradient_boosting:max_bins': 255, 'classifier:gradient_boosting:max_depth': 'None', 'classifier:gradient_boosting:max_leaf_nodes': 26, 'classifier:gradient_boosting:min_samples_leaf': 27, 'classifier:gradient_boosting:scoring': 'loss', 'classifier:gradient_boosting:tol': 1e-07, 'data_preprocessing:categorical_transformer:category_coalescence:minority_coalescer:minimum_fraction': 0.3224406204996268, 'feature_preprocessor:polynomial:degree': 3, 'feature_preprocessor:polynomial:include_bias': 'True', 'feature_preprocessor:polynomial:interaction_only': 'True'},\ndataset_properties={\n 'task': 1,\n 'sparse': False,\n 'multilabel': False,\n 'multiclass': False,\n 'target_type': 'classification',\n 'signed': False})),\n(0.160000, SimpleClassificationPipeline({'balancing:strategy': 'none', 'classifier:__choice__': 'random_forest', 'data_preprocessing:categorical_transformer:categorical_encoding:__choice__': 'one_hot_encoding', 'data_preprocessing:categorical_transformer:category_coalescence:__choice__': 'minority_coalescer', 'data_preprocessing:numerical_transformer:imputation:strategy': 'most_frequent', 'data_preprocessing:numerical_transformer:rescaling:__choice__': 'robust_scaler', 'feature_preprocessor:__choice__': 'select_rates_classification', 'classifier:random_forest:bootstrap': 'True', 'classifier:random_forest:criterion': 'entropy', 'classifier:random_forest:max_depth': 'None', 'classifier:random_forest:max_features': 0.2266963951269948, 'classifier:random_forest:max_leaf_nodes': 'None', 'classifier:random_forest:min_impurity_decrease': 0.0, 'classifier:random_forest:min_samples_leaf': 5, 'classifier:random_forest:min_samples_split': 11, 'classifier:random_forest:min_weight_fraction_leaf': 0.0, 'data_preprocessing:categorical_transformer:category_coalescence:minority_coalescer:minimum_fraction': 0.0011097451544720433, 'data_preprocessing:numerical_transformer:rescaling:robust_scaler:q_max': 0.8277023826602068, 'data_preprocessing:numerical_transformer:rescaling:robust_scaler:q_min': 0.23019532091909908, 'feature_preprocessor:select_rates_classification:alpha': 0.1483696469461522, 'feature_preprocessor:select_rates_classification:score_func': 'f_classif', 'feature_preprocessor:select_rates_classification:mode': 'fwe'},\ndataset_properties={\n 'task': 1,\n 'sparse': False,\n 'multilabel': False,\n 'multiclass': False,\n 'target_type': 'classification',\n 'signed': False})),\n(0.080000, SimpleClassificationPipeline({'balancing:strategy': 'weighting', 'classifier:__choice__': 'extra_trees', 'data_preprocessing:categorical_transformer:categorical_encoding:__choice__': 'no_encoding', 'data_preprocessing:categorical_transformer:category_coalescence:__choice__': 'minority_coalescer', 'data_preprocessing:numerical_transformer:imputation:strategy': 'most_frequent', 'data_preprocessing:numerical_transformer:rescaling:__choice__': 'none', 'feature_preprocessor:__choice__': 'feature_agglomeration', 'classifier:extra_trees:bootstrap': 'False', 'classifier:extra_trees:criterion': 'entropy', 'classifier:extra_trees:max_depth': 'None', 'classifier:extra_trees:max_features': 0.9623556751747038, 'classifier:extra_trees:max_leaf_nodes': 'None', 'classifier:extra_trees:min_impurity_decrease': 0.0, 'classifier:extra_trees:min_samples_leaf': 2, 'classifier:extra_trees:min_samples_split': 6, 'classifier:extra_trees:min_weight_fraction_leaf': 0.0, 'data_preprocessing:categorical_transformer:category_coalescence:minority_coalescer:minimum_fraction': 0.1928889925307727, 'feature_preprocessor:feature_agglomeration:affinity': 'euclidean', 'feature_preprocessor:feature_agglomeration:linkage': 'ward', 'feature_preprocessor:feature_agglomeration:n_clusters': 29, 'feature_preprocessor:feature_agglomeration:pooling_func': 'median'},\ndataset_properties={\n 'task': 1,\n 'sparse': False,\n 'multilabel': False,\n 'multiclass': False,\n 'target_type': 'classification',\n 'signed': False})),\n(0.080000, SimpleClassificationPipeline({'balancing:strategy': 'none', 'classifier:__choice__': 'mlp', 'data_preprocessing:categorical_transformer:categorical_encoding:__choice__': 'one_hot_encoding', 'data_preprocessing:categorical_transformer:category_coalescence:__choice__': 'minority_coalescer', 'data_preprocessing:numerical_transformer:imputation:strategy': 'most_frequent', 'data_preprocessing:numerical_transformer:rescaling:__choice__': 'quantile_transformer', 'feature_preprocessor:__choice__': 'feature_agglomeration', 'classifier:mlp:activation': 'relu', 'classifier:mlp:alpha': 0.0013962942610192463, 'classifier:mlp:batch_size': 'auto', 'classifier:mlp:beta_1': 0.9, 'classifier:mlp:beta_2': 0.999, 'classifier:mlp:early_stopping': 'valid', 'classifier:mlp:epsilon': 1e-08, 'classifier:mlp:hidden_layer_depth': 1, 'classifier:mlp:learning_rate_init': 0.001358401370818654, 'classifier:mlp:n_iter_no_change': 32, 'classifier:mlp:num_nodes_per_layer': 29, 'classifier:mlp:shuffle': 'True', 'classifier:mlp:solver': 'adam', 'classifier:mlp:tol': 0.0001, 'data_preprocessing:categorical_transformer:category_coalescence:minority_coalescer:minimum_fraction': 0.013600009654321715, 'data_preprocessing:numerical_transformer:rescaling:quantile_transformer:n_quantiles': 1000, 'data_preprocessing:numerical_transformer:rescaling:quantile_transformer:output_distribution': 'uniform', 'feature_preprocessor:feature_agglomeration:affinity': 'euclidean', 'feature_preprocessor:feature_agglomeration:linkage': 'ward', 'feature_preprocessor:feature_agglomeration:n_clusters': 62, 'feature_preprocessor:feature_agglomeration:pooling_func': 'median', 'classifier:mlp:validation_fraction': 0.1},\ndataset_properties={\n 'task': 1,\n 'sparse': False,\n 'multilabel': False,\n 'multiclass': False,\n 'target_type': 'classification',\n 'signed': False})),\n(0.080000, SimpleClassificationPipeline({'balancing:strategy': 'none', 'classifier:__choice__': 'mlp', 'data_preprocessing:categorical_transformer:categorical_encoding:__choice__': 'one_hot_encoding', 'data_preprocessing:categorical_transformer:category_coalescence:__choice__': 'no_coalescense', 'data_preprocessing:numerical_transformer:imputation:strategy': 'mean', 'data_preprocessing:numerical_transformer:rescaling:__choice__': 'minmax', 'feature_preprocessor:__choice__': 'feature_agglomeration', 'classifier:mlp:activation': 'tanh', 'classifier:mlp:alpha': 3.542372581884717e-07, 'classifier:mlp:batch_size': 'auto', 'classifier:mlp:beta_1': 0.9, 'classifier:mlp:beta_2': 0.999, 'classifier:mlp:early_stopping': 'train', 'classifier:mlp:epsilon': 1e-08, 'classifier:mlp:hidden_layer_depth': 2, 'classifier:mlp:learning_rate_init': 0.00016753718126302934, 'classifier:mlp:n_iter_no_change': 32, 'classifier:mlp:num_nodes_per_layer': 54, 'classifier:mlp:shuffle': 'True', 'classifier:mlp:solver': 'adam', 'classifier:mlp:tol': 0.0001, 'feature_preprocessor:feature_agglomeration:affinity': 'euclidean', 'feature_preprocessor:feature_agglomeration:linkage': 'complete', 'feature_preprocessor:feature_agglomeration:n_clusters': 264, 'feature_preprocessor:feature_agglomeration:pooling_func': 'max'},\ndataset_properties={\n 'task': 1,\n 'sparse': False,\n 'multilabel': False,\n 'multiclass': False,\n 'target_type': 'classification',\n 'signed': False})),\n(0.060000, SimpleClassificationPipeline({'balancing:strategy': 'weighting', 'classifier:__choice__': 'libsvm_svc', 'data_preprocessing:categorical_transformer:categorical_encoding:__choice__': 'no_encoding', 'data_preprocessing:categorical_transformer:category_coalescence:__choice__': 'no_coalescense', 'data_preprocessing:numerical_transformer:imputation:strategy': 'mean', 'data_preprocessing:numerical_transformer:rescaling:__choice__': 'none', 'feature_preprocessor:__choice__': 'feature_agglomeration', 'classifier:libsvm_svc:C': 0.96472524110086, 'classifier:libsvm_svc:gamma': 0.32003431657702625, 'classifier:libsvm_svc:kernel': 'poly', 'classifier:libsvm_svc:max_iter': -1, 'classifier:libsvm_svc:shrinking': 'False', 'classifier:libsvm_svc:tol': 0.0005751778021671754, 'feature_preprocessor:feature_agglomeration:affinity': 'euclidean', 'feature_preprocessor:feature_agglomeration:linkage': 'complete', 'feature_preprocessor:feature_agglomeration:n_clusters': 88, 'feature_preprocessor:feature_agglomeration:pooling_func': 'max', 'classifier:libsvm_svc:coef0': 0.07340987235571972, 'classifier:libsvm_svc:degree': 2},\ndataset_properties={\n 'task': 1,\n 'sparse': False,\n 'multilabel': False,\n 'multiclass': False,\n 'target_type': 'classification',\n 'signed': False})),\n]\n"
],
[
"print(automl.show_models())",
"[(0.320000, SimpleClassificationPipeline({'balancing:strategy': 'none', 'classifier:__choice__': 'random_forest', 'data_preprocessing:categorical_transformer:categorical_encoding:__choice__': 'one_hot_encoding', 'data_preprocessing:categorical_transformer:category_coalescence:__choice__': 'minority_coalescer', 'data_preprocessing:numerical_transformer:imputation:strategy': 'mean', 'data_preprocessing:numerical_transformer:rescaling:__choice__': 'standardize', 'feature_preprocessor:__choice__': 'no_preprocessing', 'classifier:random_forest:bootstrap': 'True', 'classifier:random_forest:criterion': 'gini', 'classifier:random_forest:max_depth': 'None', 'classifier:random_forest:max_features': 0.5, 'classifier:random_forest:max_leaf_nodes': 'None', 'classifier:random_forest:min_impurity_decrease': 0.0, 'classifier:random_forest:min_samples_leaf': 1, 'classifier:random_forest:min_samples_split': 2, 'classifier:random_forest:min_weight_fraction_leaf': 0.0, 'data_preprocessing:categorical_transformer:category_coalescence:minority_coalescer:minimum_fraction': 0.01},\ndataset_properties={\n 'task': 1,\n 'sparse': False,\n 'multilabel': False,\n 'multiclass': False,\n 'target_type': 'classification',\n 'signed': False})),\n(0.220000, SimpleClassificationPipeline({'balancing:strategy': 'none', 'classifier:__choice__': 'gradient_boosting', 'data_preprocessing:categorical_transformer:categorical_encoding:__choice__': 'one_hot_encoding', 'data_preprocessing:categorical_transformer:category_coalescence:__choice__': 'minority_coalescer', 'data_preprocessing:numerical_transformer:imputation:strategy': 'mean', 'data_preprocessing:numerical_transformer:rescaling:__choice__': 'none', 'feature_preprocessor:__choice__': 'polynomial', 'classifier:gradient_boosting:early_stop': 'off', 'classifier:gradient_boosting:l2_regularization': 1e-10, 'classifier:gradient_boosting:learning_rate': 0.06339262663805803, 'classifier:gradient_boosting:loss': 'auto', 'classifier:gradient_boosting:max_bins': 255, 'classifier:gradient_boosting:max_depth': 'None', 'classifier:gradient_boosting:max_leaf_nodes': 26, 'classifier:gradient_boosting:min_samples_leaf': 27, 'classifier:gradient_boosting:scoring': 'loss', 'classifier:gradient_boosting:tol': 1e-07, 'data_preprocessing:categorical_transformer:category_coalescence:minority_coalescer:minimum_fraction': 0.3224406204996268, 'feature_preprocessor:polynomial:degree': 3, 'feature_preprocessor:polynomial:include_bias': 'True', 'feature_preprocessor:polynomial:interaction_only': 'True'},\ndataset_properties={\n 'task': 1,\n 'sparse': False,\n 'multilabel': False,\n 'multiclass': False,\n 'target_type': 'classification',\n 'signed': False})),\n(0.160000, SimpleClassificationPipeline({'balancing:strategy': 'none', 'classifier:__choice__': 'random_forest', 'data_preprocessing:categorical_transformer:categorical_encoding:__choice__': 'one_hot_encoding', 'data_preprocessing:categorical_transformer:category_coalescence:__choice__': 'minority_coalescer', 'data_preprocessing:numerical_transformer:imputation:strategy': 'most_frequent', 'data_preprocessing:numerical_transformer:rescaling:__choice__': 'robust_scaler', 'feature_preprocessor:__choice__': 'select_rates_classification', 'classifier:random_forest:bootstrap': 'True', 'classifier:random_forest:criterion': 'entropy', 'classifier:random_forest:max_depth': 'None', 'classifier:random_forest:max_features': 0.2266963951269948, 'classifier:random_forest:max_leaf_nodes': 'None', 'classifier:random_forest:min_impurity_decrease': 0.0, 'classifier:random_forest:min_samples_leaf': 5, 'classifier:random_forest:min_samples_split': 11, 'classifier:random_forest:min_weight_fraction_leaf': 0.0, 'data_preprocessing:categorical_transformer:category_coalescence:minority_coalescer:minimum_fraction': 0.0011097451544720433, 'data_preprocessing:numerical_transformer:rescaling:robust_scaler:q_max': 0.8277023826602068, 'data_preprocessing:numerical_transformer:rescaling:robust_scaler:q_min': 0.23019532091909908, 'feature_preprocessor:select_rates_classification:alpha': 0.1483696469461522, 'feature_preprocessor:select_rates_classification:score_func': 'f_classif', 'feature_preprocessor:select_rates_classification:mode': 'fwe'},\ndataset_properties={\n 'task': 1,\n 'sparse': False,\n 'multilabel': False,\n 'multiclass': False,\n 'target_type': 'classification',\n 'signed': False})),\n(0.080000, SimpleClassificationPipeline({'balancing:strategy': 'weighting', 'classifier:__choice__': 'extra_trees', 'data_preprocessing:categorical_transformer:categorical_encoding:__choice__': 'no_encoding', 'data_preprocessing:categorical_transformer:category_coalescence:__choice__': 'minority_coalescer', 'data_preprocessing:numerical_transformer:imputation:strategy': 'most_frequent', 'data_preprocessing:numerical_transformer:rescaling:__choice__': 'none', 'feature_preprocessor:__choice__': 'feature_agglomeration', 'classifier:extra_trees:bootstrap': 'False', 'classifier:extra_trees:criterion': 'entropy', 'classifier:extra_trees:max_depth': 'None', 'classifier:extra_trees:max_features': 0.9623556751747038, 'classifier:extra_trees:max_leaf_nodes': 'None', 'classifier:extra_trees:min_impurity_decrease': 0.0, 'classifier:extra_trees:min_samples_leaf': 2, 'classifier:extra_trees:min_samples_split': 6, 'classifier:extra_trees:min_weight_fraction_leaf': 0.0, 'data_preprocessing:categorical_transformer:category_coalescence:minority_coalescer:minimum_fraction': 0.1928889925307727, 'feature_preprocessor:feature_agglomeration:affinity': 'euclidean', 'feature_preprocessor:feature_agglomeration:linkage': 'ward', 'feature_preprocessor:feature_agglomeration:n_clusters': 29, 'feature_preprocessor:feature_agglomeration:pooling_func': 'median'},\ndataset_properties={\n 'task': 1,\n 'sparse': False,\n 'multilabel': False,\n 'multiclass': False,\n 'target_type': 'classification',\n 'signed': False})),\n(0.080000, SimpleClassificationPipeline({'balancing:strategy': 'none', 'classifier:__choice__': 'mlp', 'data_preprocessing:categorical_transformer:categorical_encoding:__choice__': 'one_hot_encoding', 'data_preprocessing:categorical_transformer:category_coalescence:__choice__': 'minority_coalescer', 'data_preprocessing:numerical_transformer:imputation:strategy': 'most_frequent', 'data_preprocessing:numerical_transformer:rescaling:__choice__': 'quantile_transformer', 'feature_preprocessor:__choice__': 'feature_agglomeration', 'classifier:mlp:activation': 'relu', 'classifier:mlp:alpha': 0.0013962942610192463, 'classifier:mlp:batch_size': 'auto', 'classifier:mlp:beta_1': 0.9, 'classifier:mlp:beta_2': 0.999, 'classifier:mlp:early_stopping': 'valid', 'classifier:mlp:epsilon': 1e-08, 'classifier:mlp:hidden_layer_depth': 1, 'classifier:mlp:learning_rate_init': 0.001358401370818654, 'classifier:mlp:n_iter_no_change': 32, 'classifier:mlp:num_nodes_per_layer': 29, 'classifier:mlp:shuffle': 'True', 'classifier:mlp:solver': 'adam', 'classifier:mlp:tol': 0.0001, 'data_preprocessing:categorical_transformer:category_coalescence:minority_coalescer:minimum_fraction': 0.013600009654321715, 'data_preprocessing:numerical_transformer:rescaling:quantile_transformer:n_quantiles': 1000, 'data_preprocessing:numerical_transformer:rescaling:quantile_transformer:output_distribution': 'uniform', 'feature_preprocessor:feature_agglomeration:affinity': 'euclidean', 'feature_preprocessor:feature_agglomeration:linkage': 'ward', 'feature_preprocessor:feature_agglomeration:n_clusters': 62, 'feature_preprocessor:feature_agglomeration:pooling_func': 'median', 'classifier:mlp:validation_fraction': 0.1},\ndataset_properties={\n 'task': 1,\n 'sparse': False,\n 'multilabel': False,\n 'multiclass': False,\n 'target_type': 'classification',\n 'signed': False})),\n(0.080000, SimpleClassificationPipeline({'balancing:strategy': 'none', 'classifier:__choice__': 'mlp', 'data_preprocessing:categorical_transformer:categorical_encoding:__choice__': 'one_hot_encoding', 'data_preprocessing:categorical_transformer:category_coalescence:__choice__': 'no_coalescense', 'data_preprocessing:numerical_transformer:imputation:strategy': 'mean', 'data_preprocessing:numerical_transformer:rescaling:__choice__': 'minmax', 'feature_preprocessor:__choice__': 'feature_agglomeration', 'classifier:mlp:activation': 'tanh', 'classifier:mlp:alpha': 3.542372581884717e-07, 'classifier:mlp:batch_size': 'auto', 'classifier:mlp:beta_1': 0.9, 'classifier:mlp:beta_2': 0.999, 'classifier:mlp:early_stopping': 'train', 'classifier:mlp:epsilon': 1e-08, 'classifier:mlp:hidden_layer_depth': 2, 'classifier:mlp:learning_rate_init': 0.00016753718126302934, 'classifier:mlp:n_iter_no_change': 32, 'classifier:mlp:num_nodes_per_layer': 54, 'classifier:mlp:shuffle': 'True', 'classifier:mlp:solver': 'adam', 'classifier:mlp:tol': 0.0001, 'feature_preprocessor:feature_agglomeration:affinity': 'euclidean', 'feature_preprocessor:feature_agglomeration:linkage': 'complete', 'feature_preprocessor:feature_agglomeration:n_clusters': 264, 'feature_preprocessor:feature_agglomeration:pooling_func': 'max'},\ndataset_properties={\n 'task': 1,\n 'sparse': False,\n 'multilabel': False,\n 'multiclass': False,\n 'target_type': 'classification',\n 'signed': False})),\n(0.060000, SimpleClassificationPipeline({'balancing:strategy': 'weighting', 'classifier:__choice__': 'libsvm_svc', 'data_preprocessing:categorical_transformer:categorical_encoding:__choice__': 'no_encoding', 'data_preprocessing:categorical_transformer:category_coalescence:__choice__': 'no_coalescense', 'data_preprocessing:numerical_transformer:imputation:strategy': 'mean', 'data_preprocessing:numerical_transformer:rescaling:__choice__': 'none', 'feature_preprocessor:__choice__': 'feature_agglomeration', 'classifier:libsvm_svc:C': 0.96472524110086, 'classifier:libsvm_svc:gamma': 0.32003431657702625, 'classifier:libsvm_svc:kernel': 'poly', 'classifier:libsvm_svc:max_iter': -1, 'classifier:libsvm_svc:shrinking': 'False', 'classifier:libsvm_svc:tol': 0.0005751778021671754, 'feature_preprocessor:feature_agglomeration:affinity': 'euclidean', 'feature_preprocessor:feature_agglomeration:linkage': 'complete', 'feature_preprocessor:feature_agglomeration:n_clusters': 88, 'feature_preprocessor:feature_agglomeration:pooling_func': 'max', 'classifier:libsvm_svc:coef0': 0.07340987235571972, 'classifier:libsvm_svc:degree': 2},\ndataset_properties={\n 'task': 1,\n 'sparse': False,\n 'multilabel': False,\n 'multiclass': False,\n 'target_type': 'classification',\n 'signed': False})),\n]\n"
],
[
"predictions = automl.predict(X_test)\nprint(\"classification_report:\", classification_report(y_test, predictions))",
"classification_report: precision recall f1-score support\n\n HYDROLASE 0.75 0.78 0.76 3434\n TRANSFERASE 0.69 0.66 0.67 2611\n\n accuracy 0.73 6045\n macro avg 0.72 0.72 0.72 6045\nweighted avg 0.73 0.73 0.73 6045\n\n"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec64057d27a50075a990efab2e792e51a7d5a372 | 197,437 | ipynb | Jupyter Notebook | notebooks/dataset-projections/cifar10/cifar10-direct-embedding.ipynb | timsainb/ParametricUMAP_paper | 00b4d676647e45619552aec8f2663c0903a83e3f | [
"MIT"
]
| 124 | 2020-09-27T23:59:01.000Z | 2022-03-22T06:27:35.000Z | notebooks/dataset-projections/cifar10/cifar10-direct-embedding.ipynb | kiminh/ParametricUMAP_paper | 00b4d676647e45619552aec8f2663c0903a83e3f | [
"MIT"
]
| 2 | 2021-02-05T18:13:13.000Z | 2021-11-01T14:55:08.000Z | notebooks/dataset-projections/cifar10/cifar10-direct-embedding.ipynb | kiminh/ParametricUMAP_paper | 00b4d676647e45619552aec8f2663c0903a83e3f | [
"MIT"
]
| 16 | 2020-09-28T07:43:21.000Z | 2022-03-21T00:31:34.000Z | 356.384477 | 169,580 | 0.938036 | [
[
[
"# reload packages\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
]
],
[
[
"### Choose GPU (this may not be needed on your computer)",
"_____no_output_____"
]
],
[
[
"%env CUDA_DEVICE_ORDER=PCI_BUS_ID\n%env CUDA_VISIBLE_DEVICES=''",
"env: CUDA_DEVICE_ORDER=PCI_BUS_ID\nenv: CUDA_VISIBLE_DEVICES=''\n"
]
],
[
[
"### load packages",
"_____no_output_____"
]
],
[
[
"from tfumap.umap import tfUMAP",
"/mnt/cube/tsainbur/conda_envs/tpy3/lib/python3.6/site-packages/tqdm/autonotebook/__init__.py:14: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n \" (e.g. in jupyter console)\", TqdmExperimentalWarning)\n"
],
[
"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm.autonotebook import tqdm\nimport umap\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"### Load dataset",
"_____no_output_____"
]
],
[
[
"from tensorflow.keras.datasets import cifar10",
"_____no_output_____"
],
[
"# load dataset\n(train_images, Y_train), (test_images, Y_test) = cifar10.load_data()\nX_train = (train_images/255.).astype('float32')\nX_test = (test_images/255.).astype('float32')\nX_train = X_train.reshape((len(X_train), np.product(np.shape(X_train)[1:])))\nX_test = X_test.reshape((len(X_test), np.product(np.shape(X_test)[1:])))\n\n# subset a validation set\nn_valid = 10000\nX_valid = X_train[-n_valid:]\nY_valid = Y_train[-n_valid:]\nX_train = X_train[:-n_valid]\nY_train = Y_train[:-n_valid]\n\n# flatten X\nX_train_flat = X_train.reshape((len(X_train), np.product(np.shape(X_train)[1:])))\nX_test_flat = X_test.reshape((len(X_test), np.product(np.shape(X_test)[1:])))\nX_valid_flat= X_valid.reshape((len(X_valid), np.product(np.shape(X_valid)[1:])))\nprint(len(X_train), len(X_valid), len(X_test))",
"40000 10000 10000\n"
]
],
[
[
"### Create model and train",
"_____no_output_____"
]
],
[
[
"embedder = tfUMAP(direct_embedding=True, verbose=True, negative_sample_rate=5, training_epochs=100)",
"_____no_output_____"
],
[
"z = embedder.fit_transform(X_train_flat)",
"tfUMAP(direct_embedding=True, negative_sample_rate=5,\n optimizer=<tensorflow.python.keras.optimizer_v2.adadelta.Adadelta object at 0x7f520c14f9e8>,\n tensorboard_logdir='/tmp/tensorboard/20200709-114858',\n training_epochs=100)\nConstruct fuzzy simplicial set\nThu Jul 9 11:48:58 2020 Finding Nearest Neighbors\nThu Jul 9 11:48:58 2020 Building RP forest with 15 trees\nThu Jul 9 11:49:02 2020 parallel NN descent for 15 iterations\n\t 0 / 15\n\t 1 / 15\n\t 2 / 15\n\t 3 / 15\n\t 4 / 15\n\t 5 / 15\nThu Jul 9 11:49:15 2020 Finished Nearest Neighbor Search\nThu Jul 9 11:49:22 2020 Embedding with TensorFlow\n"
]
],
[
[
"### Plot model output",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots( figsize=(8, 8))\nsc = ax.scatter(\n z[:, 0],\n z[:, 1],\n c=Y_train.flatten(),\n cmap=\"tab10\",\n s=0.1,\n alpha=0.5,\n rasterized=True,\n)\nax.axis('equal')\nax.set_title(\"UMAP in Tensorflow embedding\", fontsize=20)\nplt.colorbar(sc, ax=ax);",
"_____no_output_____"
]
],
[
[
"### View loss",
"_____no_output_____"
]
],
[
[
"from tfumap.umap import retrieve_tensors\nimport seaborn as sns",
"_____no_output_____"
],
[
"loss_df = retrieve_tensors(embedder.tensorboard_logdir)\nloss_df[:3]",
"['umap_loss']\n[]\n"
],
[
"ax = sns.lineplot(x=\"step\", y=\"val\", hue=\"group\", data=loss_df[loss_df.variable=='umap_loss'])\nax.set_xscale('log')",
"_____no_output_____"
]
],
[
[
"### Save output",
"_____no_output_____"
]
],
[
[
"from tfumap.paths import ensure_dir, MODEL_DIR",
"_____no_output_____"
],
[
"output_dir = MODEL_DIR/'projections'/ 'cifar10' / 'direct'\nensure_dir(output_dir)",
"_____no_output_____"
],
[
"embedder.save(output_dir)",
"Pickle of model saved\n"
],
[
"loss_df.to_pickle(output_dir / 'loss_df.pickle')",
"_____no_output_____"
],
[
"np.save(output_dir / 'z.npy', z)",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
]
|
ec6409251e021132135f54d49f7bf6e166d8e1fe | 4,956 | ipynb | Jupyter Notebook | AoC 2020 Day 4/AoC 2020 Day 4 - Passport Processing.ipynb | glenem/Advent-of-Code-2020 | c7ad47773bba12eb97348eb7cbe3bc9054b4716b | [
"MIT"
]
| null | null | null | AoC 2020 Day 4/AoC 2020 Day 4 - Passport Processing.ipynb | glenem/Advent-of-Code-2020 | c7ad47773bba12eb97348eb7cbe3bc9054b4716b | [
"MIT"
]
| null | null | null | AoC 2020 Day 4/AoC 2020 Day 4 - Passport Processing.ipynb | glenem/Advent-of-Code-2020 | c7ad47773bba12eb97348eb7cbe3bc9054b4716b | [
"MIT"
]
| null | null | null | 65.210526 | 1,366 | 0.639023 | [
[
[
"# AoC 2020 Day 4: Passport Processing\n\nTask: Count the number of valid passports - those that have all required fields. Treat cid as optional. In your batch file, how many passports are valid?\n\nFields that a passport neets:\n- byr (Birth Year)\n- iyr (Issue Year)\n- eyr (Expiration Year)\n- hgt (Height)\n- hcl (Hair Color)\n- ecl (Eye Color)\n- pid (Passport ID)\n- cid (Country ID)\n",
"_____no_output_____"
]
],
[
[
"# Example problem\nimport pandas as pd\nimport numpy as np\n\nexample_data = np.loadtxt(\"AoC 2020 Day 4 Example Input.txt\", dtype=dict)\n\nexample_data",
"_____no_output_____"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
]
]
|
ec6413a0a371006db5f337c40fd23e1e218c4b4b | 316,485 | ipynb | Jupyter Notebook | tensorflow/examples/udacity/5_word2vec.ipynb | HowieYang0/notmnist-ex | 6d702ab5a3df086e960b05965812976b88167573 | [
"Apache-2.0"
]
| 101 | 2016-12-03T11:40:52.000Z | 2017-12-23T02:02:03.000Z | tensorflow/examples/udacity/5_word2vec.ipynb | HowieYang0/notmnist-ex | 6d702ab5a3df086e960b05965812976b88167573 | [
"Apache-2.0"
]
| 9 | 2016-12-14T03:27:46.000Z | 2017-09-13T02:29:07.000Z | tensorflow/examples/udacity/5_word2vec.ipynb | HowieYang0/notmnist-ex | 6d702ab5a3df086e960b05965812976b88167573 | [
"Apache-2.0"
]
| 47 | 2016-12-04T12:37:24.000Z | 2018-01-14T18:13:07.000Z | 352.826087 | 273,932 | 0.898899 | [
[
[
"Deep Learning\n=============\n\nAssignment 5\n------------\n\nThe goal of this assignment is to train a Word2Vec skip-gram model over [Text8](http://mattmahoney.net/dc/textdata) data.",
"_____no_output_____"
]
],
[
[
"# These are all the modules we'll be using later. Make sure you can import them\n# before proceeding further.\n%matplotlib inline\nfrom __future__ import print_function\nimport collections\nimport math\nimport numpy as np\nimport os\nimport random\nimport tensorflow as tf\nimport zipfile\nfrom matplotlib import pylab\nfrom six.moves import range\nfrom six.moves.urllib.request import urlretrieve\nfrom sklearn.manifold import TSNE",
"_____no_output_____"
]
],
[
[
"Download the data from the source website if necessary.",
"_____no_output_____"
]
],
[
[
"url = 'http://mattmahoney.net/dc/'\n\ndef maybe_download(filename, expected_bytes):\n \"\"\"Download a file if not present, and make sure it's the right size.\"\"\"\n if not os.path.exists(filename):\n filename, _ = urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified %s' % filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename\n\nfilename = maybe_download('text8.zip', 31344016)",
"Found and verified text8.zip\n"
]
],
[
[
"Read the data into a string.",
"_____no_output_____"
]
],
[
[
"def read_data(filename):\n \"\"\"Extract the first file enclosed in a zip file as a list of words\"\"\"\n with zipfile.ZipFile(filename) as f:\n data = tf.compat.as_str(f.read(f.namelist()[0])).split()\n return data\n \nwords = read_data(filename)\nprint('Data size %d' % len(words))",
"Data size 17005207\n"
]
],
[
[
"Build the dictionary and replace rare words with UNK token.",
"_____no_output_____"
]
],
[
[
"vocabulary_size = 50000\n\ndef build_dataset(words):\n count = [['UNK', -1]]\n count.extend(collections.Counter(words).most_common(vocabulary_size - 1))\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n unk_count = 0\n for word in words:\n if word in dictionary:\n index = dictionary[word]\n else:\n index = 0 # dictionary['UNK']\n unk_count = unk_count + 1\n data.append(index)\n count[0][1] = unk_count\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) \n return data, count, dictionary, reverse_dictionary\n\ndata, count, dictionary, reverse_dictionary = build_dataset(words)\nprint('Most common words (+UNK)', count[:5])\nprint('Sample data', data[:10])\ndel words # Hint to reduce memory.",
"Most common words (+UNK) [['UNK', 418391], ('the', 1061396), ('of', 593677), ('and', 416629), ('one', 411764)]\nSample data [5243, 3083, 12, 6, 195, 2, 3136, 46, 59, 156]\n"
]
],
[
[
"Function to generate a training batch for the skip-gram model.",
"_____no_output_____"
]
],
[
[
"data_index = 0\n\ndef generate_batch(batch_size, num_skips, skip_window):\n global data_index\n assert batch_size % num_skips == 0\n assert num_skips <= 2 * skip_window\n batch = np.ndarray(shape=(batch_size), dtype=np.int32)\n labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n span = 2 * skip_window + 1 # [ skip_window target skip_window ]\n buffer = collections.deque(maxlen=span)\n for _ in range(span):\n buffer.append(data[data_index])\n data_index = (data_index + 1) % len(data)\n for i in range(batch_size // num_skips):\n target = skip_window # target label at the center of the buffer\n targets_to_avoid = [ skip_window ]\n for j in range(num_skips):\n while target in targets_to_avoid:\n target = random.randint(0, span - 1)\n targets_to_avoid.append(target)\n batch[i * num_skips + j] = buffer[skip_window]\n labels[i * num_skips + j, 0] = buffer[target]\n buffer.append(data[data_index])\n data_index = (data_index + 1) % len(data)\n return batch, labels\n\nprint('data:', [reverse_dictionary[di] for di in data[:8]])\n\nfor num_skips, skip_window in [(2, 1), (4, 2)]:\n data_index = 0\n batch, labels = generate_batch(batch_size=8, num_skips=num_skips, skip_window=skip_window)\n print('\\nwith num_skips = %d and skip_window = %d:' % (num_skips, skip_window))\n print(' batch:', [reverse_dictionary[bi] for bi in batch])\n print(' labels:', [reverse_dictionary[li] for li in labels.reshape(8)])",
"data: ['anarchism', 'originated', 'as', 'a', 'term', 'of', 'abuse', 'first']\n\nwith num_skips = 2 and skip_window = 1:\n batch: ['originated', 'originated', 'as', 'as', 'a', 'a', 'term', 'term']\n labels: ['as', 'anarchism', 'a', 'originated', 'term', 'as', 'a', 'of']\n\nwith num_skips = 4 and skip_window = 2:\n batch: ['as', 'as', 'as', 'as', 'a', 'a', 'a', 'a']\n labels: ['anarchism', 'originated', 'term', 'a', 'as', 'of', 'originated', 'term']\n"
]
],
[
[
"Train a skip-gram model.",
"_____no_output_____"
]
],
[
[
"batch_size = 128\nembedding_size = 128 # Dimension of the embedding vector.\nskip_window = 1 # How many words to consider left and right.\nnum_skips = 2 # How many times to reuse an input to generate a label.\n# We pick a random validation set to sample nearest neighbors. here we limit the\n# validation samples to the words that have a low numeric ID, which by\n# construction are also the most frequent. \nvalid_size = 16 # Random set of words to evaluate similarity on.\nvalid_window = 100 # Only pick dev samples in the head of the distribution.\nvalid_examples = np.array(random.sample(range(valid_window), valid_size))\nnum_sampled = 64 # Number of negative examples to sample.\n\ngraph = tf.Graph()\n\nwith graph.as_default(), tf.device('/cpu:0'):\n\n # Input data.\n train_dataset = tf.placeholder(tf.int32, shape=[batch_size])\n train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n valid_dataset = tf.constant(valid_examples, dtype=tf.int32)\n \n # Variables.\n embeddings = tf.Variable(\n tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))\n softmax_weights = tf.Variable(\n tf.truncated_normal([vocabulary_size, embedding_size],\n stddev=1.0 / math.sqrt(embedding_size)))\n softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))\n \n # Model.\n # Look up embeddings for inputs.\n embed = tf.nn.embedding_lookup(embeddings, train_dataset)\n # Compute the softmax loss, using a sample of the negative labels each time.\n loss = tf.reduce_mean(\n tf.nn.sampled_softmax_loss(softmax_weights, softmax_biases, embed,\n train_labels, num_sampled, vocabulary_size))\n\n # Optimizer.\n # Note: The optimizer will optimize the softmax_weights AND the embeddings.\n # This is because the embeddings are defined as a variable quantity and the\n # optimizer's `minimize` method will by default modify all variable quantities \n # that contribute to the tensor it is passed.\n # See docs on `tf.train.Optimizer.minimize()` for more details.\n optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)\n \n # Compute the similarity between minibatch examples and all embeddings.\n # We use the cosine distance:\n norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n normalized_embeddings = embeddings / norm\n valid_embeddings = tf.nn.embedding_lookup(\n normalized_embeddings, valid_dataset)\n similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))",
"_____no_output_____"
],
[
"num_steps = 100001\n\nwith tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n print('Initialized')\n average_loss = 0\n for step in range(num_steps):\n batch_data, batch_labels = generate_batch(\n batch_size, num_skips, skip_window)\n feed_dict = {train_dataset : batch_data, train_labels : batch_labels}\n _, l = session.run([optimizer, loss], feed_dict=feed_dict)\n average_loss += l\n if step % 2000 == 0:\n if step > 0:\n average_loss = average_loss / 2000\n # The average loss is an estimate of the loss over the last 2000 batches.\n print('Average loss at step %d: %f' % (step, average_loss))\n average_loss = 0\n # note that this is expensive (~20% slowdown if computed every 500 steps)\n if step % 10000 == 0:\n sim = similarity.eval()\n for i in range(valid_size):\n valid_word = reverse_dictionary[valid_examples[i]]\n top_k = 8 # number of nearest neighbors\n nearest = (-sim[i, :]).argsort()[1:top_k+1]\n log = 'Nearest to %s:' % valid_word\n for k in range(top_k):\n close_word = reverse_dictionary[nearest[k]]\n log = '%s %s,' % (log, close_word)\n print(log)\n final_embeddings = normalized_embeddings.eval()",
"Initialized\nAverage loss at step 0 : 8.58149623871\nNearest to been: unfavourably, marmara, ancestral, legal, bogart, glossaries, worst, rooms,\nNearest to time: conformist, strawberries, sindhi, waterfall, xia, nominates, psp, sensitivity,\nNearest to over: overlord, panda, golden, semigroup, rawlings, involved, shreveport, handling,\nNearest to not: hymenoptera, reintroducing, lamiaceae, because, davao, omnipotent, combustion, debilitating,\nNearest to three: catalog, koza, gn, braque, holstein, postgresql, luddite, justine,\nNearest to if: chilled, vince, fiddler, represented, sandinistas, happiness, lya, glands,\nNearest to there: coast, photosynthetic, kimmei, legally, inner, illyricum, formats, fullmetal,\nNearest to between: chuvash, prinz, suitability, wolfe, guideline, computability, diminutive, paulo,\nNearest to from: tanganyika, workshop, elphinstone, spearhead, resurrected, kevlar, shangri, loves,\nNearest to state: sextus, wuppertal, glaring, inches, unrounded, courageous, adler, connie,\nNearest to on: gino, phocas, rhine, jg, macrocosm, jackass, jays, theorie,\nNearest to and: standings, towed, reyes, willard, equality, juggling, wladislaus, faked,\nNearest to eight: gresham, dogg, moko, tennis, superseded, telegraphy, scramble, vinod,\nNearest to they: prisons, divisor, coder, ribeira, willingness, factional, nne, lotta,\nNearest to more: blues, fur, sterling, tangier, khwarizmi, discouraged, cal, deicide,\nNearest to other: enemies, bogged, brassicaceae, lascaux, dispense, alexandrians, crimea, dou,\nAverage loss at step 2000 : 4.39983723116\nAverage loss at step 4000 : 3.86921076906\nAverage loss at step 6000 : 3.72542127335\nAverage loss at step 8000 : 3.57835536212\nAverage loss at step 10000 : 3.61056993055\nNearest to been: glossaries, legal, unfavourably, be, hadad, wore, scarcity, were,\nNearest to time: strawberries, conformist, gleichschaltung, waterfall, molality, nominates, baal, dole,\nNearest to over: golden, semigroup, catus, motorways, brick, shehri, mussolini, overlord,\nNearest to not: hinayana, it, often, they, boots, also, noaa, lindsey,\nNearest to three: four, seven, six, five, nine, eight, two, zero,\nNearest to if: glands, euros, wallpaper, redefine, toho, confuse, unsound, shepherd,\nNearest to there: it, they, fullmetal, pace, legally, harpsichord, mma, bug,\nNearest to between: chuvash, wandering, from, kirsch, pursuant, eurocents, suitability, jackie,\nNearest to from: into, in, workshop, to, at, misogynist, elphinstone, spearhead,\nNearest to state: sextus, glaring, connie, adler, esoteric, didactic, handedness, presidents,\nNearest to on: in, at, for, ruminants, wakefulness, torrey, foley, gino,\nNearest to and: or, who, but, zelda, of, for, thirst, chisel,\nNearest to eight: nine, six, seven, five, four, three, zero, two,\nNearest to they: he, prisons, there, we, hydrate, it, not, cumbersome,\nNearest to more: skye, blues, trypomastigotes, deicide, most, readable, used, sterling,\nNearest to other: trochaic, hush, surveyors, joachim, differentiation, attackers, reverence, attestation,\nAverage loss at step 12000 : 3.66169466591\nAverage loss at step 14000 : 3.60342905837\nAverage loss at step 16000 : 3.57761328053\nAverage loss at step 18000 : 3.57667332476\nAverage loss at step 20000 : 3.53310145146\nNearest to been: be, become, was, hadad, unfavourably, were, wore, partido,\nNearest to time: gleichschaltung, strawberries, year, nominates, conformist, etch, admittedly, treasuries,\nNearest to over: golden, semigroup, motorways, rawlings, triangle, trey, ustawa, mattingly,\nNearest to not: they, boots, often, dieppe, still, hinayana, nearly, be,\nNearest to three: two, four, five, seven, eight, six, nine, one,\nNearest to if: wallpaper, euros, before, toho, unsound, so, bg, pfc,\nNearest to there: they, it, he, usually, which, we, not, transactions,\nNearest to between: from, with, about, near, reactance, eurocents, wandering, voltaire,\nNearest to from: into, workshop, by, between, in, on, elphinstone, under,\nNearest to state: glaring, esoteric, succeeding, sextus, vorarlberg, presidents, depends, connie,\nNearest to on: in, at, upon, during, from, janis, foley, nubian,\nNearest to and: or, thirst, but, where, s, who, pfaff, including,\nNearest to eight: nine, seven, six, five, four, three, zero, one,\nNearest to they: there, he, we, not, it, you, prisons, who,\nNearest to more: less, most, deicide, skye, trypomastigotes, interventionism, toed, drummond,\nNearest to other: such, joachim, hush, attackers, surveyors, trochaic, differentiation, reverence,\nAverage loss at step 22000 : 3.59519316927\nAverage loss at step 24000 : 3.55378576797\nAverage loss at step 26000 : 3.56455037558\nAverage loss at step 28000 : 3.5040882225\nAverage loss at step 30000 : 3.39208897972\nNearest to been: become, be, were, was, spotless, hadad, by, hausdorff,\nNearest to time: gleichschaltung, year, day, nominates, jesus, strawberries, way, admittedly,\nNearest to over: golden, semigroup, motorways, rawlings, interventionism, counternarcotics, adaption, brick,\nNearest to not: often, they, it, never, still, nor, boots, pki,\nNearest to three: four, six, two, eight, five, seven, nine, zero,\nNearest to if: when, before, so, should, toho, where, bg, wallpaper,\nNearest to there: they, it, which, usually, he, that, also, now,\nNearest to between: with, from, in, panasonic, presupposes, churchmen, hijacking, where,\nNearest to from: into, elphinstone, workshop, between, through, speculates, sosa, in,\nNearest to state: esoteric, glaring, presidents, vorarlberg, atmosphere, succeeding, lute, connie,\nNearest to on: upon, in, janis, during, torrey, against, infield, catalans,\nNearest to and: or, thirst, in, but, of, sobib, cleaves, including,\nNearest to eight: nine, six, four, seven, three, zero, five, one,\nNearest to they: we, there, he, you, it, these, who, i,\nNearest to more: less, most, deicide, faster, toed, very, skye, tonic,\nNearest to other: different, attackers, joachim, various, such, many, differentiation, these,\nAverage loss at step 32000 : 3.49501452419\nAverage loss at step 34000 : 3.48593705952\nAverage loss at step 36000 : 3.50112806576\nAverage loss at step"
],
[
"num_points = 400\n\ntsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)\ntwo_d_embeddings = tsne.fit_transform(final_embeddings[1:num_points+1, :])",
"_____no_output_____"
],
[
"def plot(embeddings, labels):\n assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'\n pylab.figure(figsize=(15,15)) # in inches\n for i, label in enumerate(labels):\n x, y = embeddings[i,:]\n pylab.scatter(x, y)\n pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',\n ha='right', va='bottom')\n pylab.show()\n\nwords = [reverse_dictionary[i] for i in range(1, num_points+1)]\nplot(two_d_embeddings, words)",
"_____no_output_____"
]
],
[
[
"---\n\nProblem\n-------\n\nAn alternative to skip-gram is another Word2Vec model called [CBOW](http://arxiv.org/abs/1301.3781) (Continuous Bag of Words). In the CBOW model, instead of predicting a context word from a word vector, you predict a word from the sum of all the word vectors in its context. Implement and evaluate a CBOW model trained on the text8 dataset.\n\n---",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
]
]
|
ec64154f1a1238897bd4ceb1c2f5ab73d9bfa315 | 2,826 | ipynb | Jupyter Notebook | aulas/perceptron_sklearn.ipynb | danilovbarbosa/machine_learning | 55b7c752c24b988f6a2f3f77d9ad82ca5d990e3b | [
"MIT"
]
| null | null | null | aulas/perceptron_sklearn.ipynb | danilovbarbosa/machine_learning | 55b7c752c24b988f6a2f3f77d9ad82ca5d990e3b | [
"MIT"
]
| null | null | null | aulas/perceptron_sklearn.ipynb | danilovbarbosa/machine_learning | 55b7c752c24b988f6a2f3f77d9ad82ca5d990e3b | [
"MIT"
]
| null | null | null | 2,826 | 2,826 | 0.726115 | [
[
[
"from google.colab import drive\n\ninicial_path = '/content/drive'\ndrive.mount(inicial_path)\n\nstringPath = inicial_path + '/My Drive/Inteligência Artificial/UA 11/aula 20/dataset2.csv'",
"Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"
]
],
[
[
"##Importando",
"_____no_output_____"
]
],
[
[
"from sklearn import linear_model, model_selection, metrics\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"##Implementando",
"_____no_output_____"
]
],
[
[
"# Carrega os elementos do dataset\ndataset = pd.read_csv(stringPath)\nx = dataset.iloc[:, 1:].values\ny = dataset.iloc[:, 0].values\n\n# Cria um objeto Perceptron\nperceptron = linear_model.Perceptron()\n\n# Split em conjunto de treino e teste\nx_train, x_test, y_train, y_test = model_selection.train_test_split(x, y, test_size=0.2, random_state=0)\n\n# Treinamento\nclassificador = perceptron.fit(x_train, y_train)\n\n# Validação\ny_predict = classificador.predict(x_test)\n\n# Acurácia\nprint(metrics.accuracy_score(y_test, y_predict))",
"0.8\n"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec6415f2e8ad2e00e4fc737d89405b158a2b581a | 84,681 | ipynb | Jupyter Notebook | Basic/draw_prime_matrix.ipynb | luoshao23/ML_algorithm | 6e94fdd0718cd892118fd036c7c5851cf3e6d796 | [
"MIT"
]
| 4 | 2017-06-19T06:33:38.000Z | 2019-01-31T12:07:12.000Z | Basic/draw_prime_matrix.ipynb | luoshao23/ML_algorithm | 6e94fdd0718cd892118fd036c7c5851cf3e6d796 | [
"MIT"
]
| null | null | null | Basic/draw_prime_matrix.ipynb | luoshao23/ML_algorithm | 6e94fdd0718cd892118fd036c7c5851cf3e6d796 | [
"MIT"
]
| 1 | 2017-12-06T08:41:06.000Z | 2017-12-06T08:41:06.000Z | 682.91129 | 81,988 | 0.949174 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"def isPrimes2(n):\n if n > 1:\n if n == 2:\n return True\n if n % 2 == 0:\n return False\n for x in range(3, int(np.sqrt(n) + 1), 2):\n if n % x == 0:\n return False\n return True\n return False\n\n",
"_____no_output_____"
],
[
"max_ = 50000\ndlt = 0.1\nedge = int(np.sqrt(max_)) // 2 + 1\nfig, ax = plt.subplots(1, 1, figsize=(12, 12))\nax.set_xlim(-dlt * edge, dlt * edge)\nax.set_ylim(-dlt * edge, dlt * edge)\n\n\ndef p(max_, dlt=0.1):\n d = 1\n dx, dy = 0, -dlt\n x, y = 0, 0\n r = 1\n \n# ax.text(x, y, d)\n ax.scatter(x, y, c='k')\n\n while d <= max_:\n\n for _ in range(2):\n for _ in range(r):\n x += dx\n y += dy\n d += 1\n if d > max_:\n return \n# print(x, y, d)\n if isPrimes2(d):\n# ax.text(x, y, d)\n ax.scatter(x, y, c='k')\n\n dx, dy = dy, dx\n dy = -dy\n r += 1\n\np(max_, dlt)\nplt.show()\n \n \n \n ",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code"
]
]
|
ec6417e025f3220440e5c72f25b6ddccee7bf4fc | 449,919 | ipynb | Jupyter Notebook | content/post/the-guts-of-gams/the-guts-of-gams.ipynb | TomKealy/academic-kickstart | b912577bf140efdfd360574522693ae022d5f54e | [
"MIT"
]
| null | null | null | content/post/the-guts-of-gams/the-guts-of-gams.ipynb | TomKealy/academic-kickstart | b912577bf140efdfd360574522693ae022d5f54e | [
"MIT"
]
| null | null | null | content/post/the-guts-of-gams/the-guts-of-gams.ipynb | TomKealy/academic-kickstart | b912577bf140efdfd360574522693ae022d5f54e | [
"MIT"
]
| null | null | null | 702.998438 | 83,740 | 0.95034 | [
[
[
"This post will explain some of the internals of GAMs: how to estimate the feature functions. First we'll fit some simple splines on some wage data, then we'll fit more complicated splines on some accelerometer data, with a highly non-linear realtionship between in the input and the output.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport patsy\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.preprocessing import PolynomialFeatures\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\n\n%matplotlib inline",
"/Users/thomas.kealy/anaconda3/lib/python3.6/site-packages/statsmodels/compat/pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead.\n from pandas.core import datetools\n"
]
],
[
[
"GAMs are smooth, semi-parametric models of the form:\n\n$$ y = \\sum_{i=0}^{n-1} \\beta_i f_i\\left(x_i\\right) $$\n\nwhere \\\\(y\\\\) is the dependent variable, \\\\(x_i\\\\) are the independent variables, \\\\(\\beta\\\\) are the model coefficients, and \\\\(f_i\\\\) are the feature functions.\n\nWe build the \\\\(f_i\\\\) using a type of function called a spline; splines allow us to automatically model non-linear relationships without having to manually try out many different transformations on each variable. \n\nFirst of all, we'll use `patsy` to construct a few spline bases and fit generalised linear models with `statsmodels`. Then, we'll dive into constructing splines ourselves; following Simon Wood's book we'll use penalised regression splines.\n\nFirstly, we'll use `patsy` to create some basic pline models. The data we're using comes from https://vincentarelbundock.github.io/Rdatasets/doc/ISLR/Wage.html. It's plotted below:",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('Wage.csv')\nage_grid = np.arange(df.age.min(), df.age.max()).reshape(-1,1)\nplt.scatter(df.age, df.wage, facecolor='None', edgecolor='k', alpha=0.1)",
"_____no_output_____"
]
],
[
[
"GAMs are essentially linear models, but in a very special (and useful!) basis made of regression splines. We can use the `bs()` function in `patsy` to create such a basis for us:",
"_____no_output_____"
]
],
[
[
"transformed_x1 = patsy.dmatrix(\"bs(df.age, knots=(25,40,60), degree=3, include_intercept=False)\", {\"df.age\": df.age}, return_type='dataframe')\nfit1 = sm.GLM(df.wage, transformed_x1).fit()",
"_____no_output_____"
],
[
"fit1.params",
"_____no_output_____"
],
[
"age_grid = np.arange(df.age.min(), df.age.max()).reshape(-1,1)\npred = fit1.predict(patsy.dmatrix(\"bs(age_grid, knots=(25,40,60), include_intercept=False)\",\n{\"age_grid\": age_grid}, return_type='dataframe'))\nplt.scatter(df.age, df.wage, facecolor='None', edgecolor='k', alpha=0.1)\nplt.plot(age_grid, pred, color='b', label='Specifying three knots')\nplt.xlim(15,85)\nplt.ylim(0,350)\nplt.xlabel('age')\nplt.ylabel('wage')",
"_____no_output_____"
]
],
[
[
"Here we have prespecified knots at ages 25, 40, and 60. This produces a spline with six basis functions. A cubic spline has 7 degrees of freedom: one for the intercept, and two for each order. We could also have specified knot points at uniform quantiles of the data:",
"_____no_output_____"
]
],
[
[
"# Specifying 6 degrees of freedom\ntransformed_x2 = patsy.dmatrix(\"bs(df.age, df=6, include_intercept=False)\",\n{\"df.age\": df.age}, return_type='dataframe')\nfit2 = sm.GLM(df.wage, transformed_x2).fit()\nfit2.params",
"_____no_output_____"
],
[
"age_grid = np.arange(df.age.min(), df.age.max()).reshape(-1,1)\npred = fit2.predict(patsy.dmatrix(\"bs(age_grid, df=6, include_intercept=False)\",\n{\"age_grid\": age_grid}, return_type='dataframe'))\nplt.scatter(df.age, df.wage, facecolor='None', edgecolor='k', alpha=0.1)\nplt.plot(age_grid, pred, color='b', label='Specifying three knots')\nplt.xlim(15,85)\nplt.ylim(0,350)\nplt.xlabel('age')\nplt.ylabel('wage')",
"_____no_output_____"
]
],
[
[
"Finally, we can also fit natural splines with the `cr()` function:",
"_____no_output_____"
]
],
[
[
"# Specifying 4 degrees of freedom\ntransformed_x3 = patsy.dmatrix(\"cr(df.age, df=4)\", {\"df.age\": df.age}, return_type='dataframe')\nfit3 = sm.GLM(df.wage, transformed_x3).fit()\nfit3.params",
"_____no_output_____"
],
[
"pred = fit3.predict(patsy.dmatrix(\"cr(age_grid, df=4)\", {\"age_grid\": age_grid}, return_type='dataframe'))\nplt.scatter(df.age, df.wage, facecolor='None', edgecolor='k', alpha=0.1)\nplt.plot(age_grid, pred, color='g', label='Natural spline df=4')\nplt.legend()\nplt.xlim(15,85)\nplt.ylim(0,350)\nplt.xlabel('age')\nplt.ylabel('wage')",
"_____no_output_____"
]
],
[
[
"Let's see how these fits all stack together:",
"_____no_output_____"
]
],
[
[
"# Generate a sequence of age values spanning the range\nage_grid = np.arange(df.age.min(), df.age.max()).reshape(-1,1)\n# Make some predictions\npred1 = fit1.predict(patsy.dmatrix(\"bs(age_grid, knots=(25,40,60), include_intercept=False)\",\n{\"age_grid\": age_grid}, return_type='dataframe'))\npred2 = fit2.predict(patsy.dmatrix(\"bs(age_grid, df=6, include_intercept=False)\",\n{\"age_grid\": age_grid}, return_type='dataframe'))\npred3 = fit3.predict(patsy.dmatrix(\"cr(age_grid, df=4)\", {\"age_grid\": age_grid}, return_type='dataframe'))\n# Plot the splines and error bands\nplt.scatter(df.age, df.wage, facecolor='None', edgecolor='k', alpha=0.1)\nplt.plot(age_grid, pred1, color='b', label='Specifying three knots')\nplt.plot(age_grid, pred2, color='r', label='Specifying df=6')\nplt.plot(age_grid, pred3, color='g', label='Natural spline df=4')\nplt.legend()\nplt.xlim(15,85)\nplt.ylim(0,350)\nplt.xlabel('age')\nplt.ylabel('wage')",
"_____no_output_____"
],
[
"from matplotlib import pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport patsy\nimport scipy as sp\nimport seaborn as sns\nfrom statsmodels import api as sm\n\n%matplotlib inline",
"_____no_output_____"
],
[
"df = pd.read_csv('mcycle.csv')\ndf = df.drop('Unnamed: 0', axis=1)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(8, 6))\nblue = sns.color_palette()[0]\nax.scatter(df.times, df.accel, c=blue, alpha=0.5)\nax.set_xlabel('time')\nax.set_ylabel('Acceleration')",
"_____no_output_____"
]
],
[
[
"As discussed earlier: GAMs are smooth, semi-parametric models of the form:\n\n$$ y = \\sum_{i=0}^{n-1} \\beta_i f_i\\left(x_i\\right) $$\n\nwhere \\\\(y\\\\) is the dependent variable, \\\\(x_i\\\\) are the independent variables, \\\\(\\beta\\\\) are the model coefficients, and \\\\(f_i\\\\) are the feature functions.\n\nWe build the \\\\(f_i\\\\) using a type of function called a spline. Since our data is 1D, we can model it as:\n\n$$ y = \\beta_0 + f\\left( x \\right) + \\varepsilon $$\n\nWe must also choose a basis for \\\\( f \\\\):\n\n$$ f \\left( x \\right) = \\beta_1 B_1\\left(x\\right) + \\ldots + \\beta_k B_k\\left(x\\right) $$\n\nWe define \n\n$$ X = \\left[1, x_1, \\ldots, x_k \\right] $$\n\nso we can write:\n\n$ y = \\beta_0 + f\\left( x \\right) + \\varepsilon = X\\beta + \\varepsilon $$\n\nWe choose to minimise the sum of squares again, this time with a regularisation term:\n\n$$ \\frac{1}{2} \\lVert y - X\\beta \\rVert + \\lambda \\int_0^1 f''\\left(x\\right)^2 dx $$\n\nYou can show (you, not me!) that the second term can always be written:\n\n$$ \\int_0^1 f''\\left(x\\right)^2 dx = \\beta^T S \\beta $$\n\nwhere \\\\( S \\\\) is a postive (semi)-definiate matrix (i.e. all it's eigenvalues are positive or 0). Therefore our objective function becomes:\n\n$$ \\frac{1}{2} \\lVert y - X\\beta \\rVert + \\lambda \\beta^T S \\beta dx $$\n \nand we can use the techniques we've developed fitting linear models to fit additive models! We'll start by fitting a univariate spline, then maybe something more complicated.",
"_____no_output_____"
]
],
[
[
"def R(x, z):\n return ((z - 0.5)**2 - 1 / 12) * ((x - 0.5)**2 - 1 / 12) / 4 - ((np.abs(x - z) - 0.5)**4 - 0.5 * (np.abs(x - z) - 0.5)**2 + 7 / 240) / 24\n\nR = np.frompyfunc(R, 2, 1)\n\ndef R_(x):\n return R.outer(x, knots).astype(np.float64)",
"_____no_output_____"
],
[
"q = 20\n\nknots = df.times.quantile(np.linspace(0, 1, q))",
"_____no_output_____"
],
[
"y, X = patsy.dmatrices('accel ~ times + R_(times)', data=df)",
"_____no_output_____"
],
[
"S = np.zeros((q + 2, q + 2))\nS[2:, 2:] = R_(knots)",
"_____no_output_____"
],
[
"B = np.zeros_like(S)\nB[2:, 2:] = np.real_if_close(sp.linalg.sqrtm(S[2:, 2:]), tol=10**8)",
"/Users/thomas.kealy/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: ComplexWarning: Casting complex values to real discards the imaginary part\n \n"
],
[
"def fit(y, X, B, lambda_=1.0):\n # build the augmented matrices\n y_ = np.vstack((y, np.zeros((q + 2, 1))))\n X_ = np.vstack((X, np.sqrt(lambda_) * B))\n \n return sm.OLS(y_, X_).fit()",
"_____no_output_____"
],
[
"min_time = df.times.min()\nmax_time = df.times.max()\n\nplot_x = np.linspace(min_time, max_time, 100)\nplot_X = patsy.dmatrix('times + R_(times)', {'times': plot_x})\n\nresults = fit(y, X, B)\n\nfig, ax = plt.subplots(figsize=(8, 6))\nblue = sns.color_palette()[0]\nax.scatter(df.times, df.accel, c=blue, alpha=0.5)\nax.plot(plot_x, results.predict(plot_X))\nax.set_xlabel('time')\nax.set_ylabel('accel')\nax.set_title(r'$\\lambda = {}$'.format(1.0))",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec641a22a04dd29fdc8407dd513ac42d01e50a77 | 35,633 | ipynb | Jupyter Notebook | 01_the_machine_learning_landscape_copy.ipynb | Jnewgeek/handson-ml | 8fa44a394604d097d03687737bcaef2af001f542 | [
"Apache-2.0"
]
| null | null | null | 01_the_machine_learning_landscape_copy.ipynb | Jnewgeek/handson-ml | 8fa44a394604d097d03687737bcaef2af001f542 | [
"Apache-2.0"
]
| null | null | null | 01_the_machine_learning_landscape_copy.ipynb | Jnewgeek/handson-ml | 8fa44a394604d097d03687737bcaef2af001f542 | [
"Apache-2.0"
]
| null | null | null | 119.573826 | 15,440 | 0.84433 | [
[
[
"# 兼容Python2 和 Python3\nfrom __future__ import division,print_function,unicode_literals\n\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np\nimport pandas as pd\nimport warnings\nwarnings.filterwarnings(action=\"ignore\",message=\"^internal gelsd\")\n\n# 设置绘图参数\nmpl.rc(\"axes\",labelsize=14)\nmpl.rc(\"xtick\",labelsize=12)\nmpl.rc(\"ytick\",labelsize=12)\nplt.rcParams[\"font.sans-serif\"]=[\"SimHei\"]\nplt.rcParams[\"axes.unicode_minus\"]=False\n\n# 设置图片存储路径\nPROJECT_ROOT_DIR = \".\"\nCHAPTER_ID = \"fundamentals\"\n\n# 存储图片函数\ndef save_fig(fig_id,tight_layout=True):\n path=os.path.join(PROJECT_ROOT_DIR,\"images\",CHAPTER_ID,fig_id+\".png\")\n print(\"Saving figure %s\"%fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path,format=\"png\",dpi=300)",
"_____no_output_____"
],
[
"# 导入线性回归模块\nimport copy\nfrom sklearn.linear_model import LinearRegression as LR\nfrom sklearn.neighbors import KNeighborsRegressor as KNN\n\n# load data\ndatapath = os.path.join(\"datasets\", \"lifesat\", \"\")\noecd_bli = pd.read_csv(datapath + \"oecd_bli_2015.csv\", thousands=',')\ngdp_per_capita = pd.read_csv(datapath + \"gdp_per_capita.csv\",thousands=',',delimiter='\\t',\n encoding='latin1', na_values=\"n/a\")\n\n# connect the two dataframe\ndef prepare_country_stats(select=\"keep\",oecd=oecd_bli.copy(), gdp=gdp_per_capita.copy()):\n if select.lower() not in [\"keep\",\"remove\",\"full\"]:\n raise ValueError(\"prepare_country_stats(select,*karg), select must be in ('keep','remove','full') !\")\n oecd=pd.pivot_table(oecd[oecd[\"INEQUALITY\"]==\"TOT\"],columns=\"Indicator\",index=\"Country\",values=\"Value\")\n gdp.rename(columns={\"2015\": \"GDP per capita\"}, inplace=True)\n gdp.set_index(\"Country\",inplace=True)\n # 连表\n full_country_stats = pd.merge(left=oecd, right=gdp,\n left_index=True, right_index=True).sort_values(by=\"GDP per capita\")\n remove_indices=[0, 1, 6, 8, 33, 34, 35]\n keep_indices=list(set(range(36))-set(remove_indices))\n if select.lower()==\"keep\":\n return full_country_stats[[\"GDP per capita\",\"Life satisfaction\"]].iloc[keep_indices]\n elif select.lower()==\"remove\":\n return full_country_stats[[\"GDP per capita\",\"Life satisfaction\"]].iloc[remove_indices]\n else:\n return full_country_stats[[\"GDP per capita\",\"Life satisfaction\"]]\n\ncountry_stats = prepare_country_stats()\n# set model data\nX=np.c_[country_stats[\"GDP per capita\"]]\ny=np.c_[country_stats[\"Life satisfaction\"]]\n\n%matplotlib inline\ncountry_stats.plot(kind=\"scatter\",x=\"GDP per capita\",y=\"Life satisfaction\")\n\n# 训练模型\nmodel=LR()\nmodel.fit(X,y)\n\n# 预测新值\nX_new=[[22587]]\nprint(\"< 线性回归 > GDP: %d —————> Life satisfaction:%.2f\"%(X_new[0][0],model.predict(X_new)[0][0]))\n\n# k近邻算法\nclf=KNN()\nclf.fit(X,y)\nprint(\"< KNN近邻 > GDP: %d —————> Life satisfaction:%.2f\"%(X_new[0][0],clf.predict(X_new)[0][0]))",
"< 线性回归 > GDP: 22587 —————> Life satisfaction:5.96\n< KNN近邻 > GDP: 22587 —————> Life satisfaction:5.58\n"
],
[
"full_country_stats=prepare_country_stats(\"full\",oecd_bli.copy(),gdp_per_capita.copy())\n\n# 分割数据\nremove_indices=[0, 1, 6, 8, 33, 34, 35]\nkeep_indices=list(set(range(36))-set(remove_indices))\n\nsample_data=full_country_stats.iloc[keep_indices]\nmiss_data=full_country_stats.iloc[remove_indices]\n\nfull_country_stats.head()",
"_____no_output_____"
],
[
"# 绘图\nsample_data.plot(kind=\"scatter\",x=\"GDP per capita\",y=\"Life satisfaction\",figsize=(5,3))\nplt.axis([0,60000,0,10])\nposition_text = {\n \"Hungary\": (5000, 1),\n \"Korea\": (18000, 1.7),\n \"France\": (29000, 2.4),\n \"Australia\": (40000, 3.0),\n \"United States\": (52000, 3.8),\n}\nfor country, pos_text in position_text.items():\n pos_data_x, pos_data_y = sample_data.loc[country]\n country = \"U.S.\" if country == \"United States\" else country\n plt.annotate(country, xy=(pos_data_x, pos_data_y), xytext=pos_text,\n arrowprops=dict(facecolor='black', width=0.5, shrink=0.1, headwidth=5))\n plt.plot(pos_data_x, pos_data_y, \"ro\")\nsave_fig('money_happy_scatterplot')",
"Saving figure money_happy_scatterplot\n"
],
[
"sample_data.to_csv(os.path.join(\"datasets\", \"lifesat\", \"lifesat.csv\"))",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code"
]
]
|
ec642303e81f34e3ee321004eb97a3c770eedade | 138,086 | ipynb | Jupyter Notebook | huggingface_t5_3_1.ipynb | skywalker00001/Conterfactual-Reasoning-Project | 48464dcd073507801fdea50dbb3baac10644470f | [
"MIT"
]
| null | null | null | huggingface_t5_3_1.ipynb | skywalker00001/Conterfactual-Reasoning-Project | 48464dcd073507801fdea50dbb3baac10644470f | [
"MIT"
]
| null | null | null | huggingface_t5_3_1.ipynb | skywalker00001/Conterfactual-Reasoning-Project | 48464dcd073507801fdea50dbb3baac10644470f | [
"MIT"
]
| null | null | null | 39.007345 | 604 | 0.46823 | [
[
[
"<a href=\"https://colab.research.google.com/github/skywalker00001/Conterfactual-Reasoning-Project/blob/main/huggingface_t5_3_1.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Preparation",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\ndrive.mount('/content/drive')\nroot = 'drive/MyDrive/LM/'",
"Mounted at /content/drive\n"
],
[
"!pip install sentencepiece\n!pip install transformers -q\n!pip install wandb -q",
"Collecting sentencepiece\n Downloading sentencepiece-0.1.96-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2 MB)\n\u001b[K |████████████████████████████████| 1.2 MB 4.2 MB/s \n\u001b[?25hInstalling collected packages: sentencepiece\nSuccessfully installed sentencepiece-0.1.96\n\u001b[K |████████████████████████████████| 3.5 MB 4.2 MB/s \n\u001b[K |████████████████████████████████| 596 kB 69.9 MB/s \n\u001b[K |████████████████████████████████| 6.8 MB 48.9 MB/s \n\u001b[K |████████████████████████████████| 67 kB 5.5 MB/s \n\u001b[K |████████████████████████████████| 895 kB 73.9 MB/s \n\u001b[K |████████████████████████████████| 1.7 MB 4.2 MB/s \n\u001b[K |████████████████████████████████| 143 kB 73.0 MB/s \n\u001b[K |████████████████████████████████| 180 kB 66.8 MB/s \n\u001b[K |████████████████████████████████| 63 kB 1.1 MB/s \n\u001b[?25h Building wheel for pathtools (setup.py) ... \u001b[?25l\u001b[?25hdone\n"
],
[
"# Importing stock libraries\nimport numpy as np\nimport pandas as pd\nimport time\nfrom tqdm import tqdm\nimport os\nimport regex as re\nimport torch\nfrom torch import cuda\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler\n\n# Importing the T5 modules from huggingface/transformers\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration\n\n# WandB – Import the wandb library\nimport wandb",
"_____no_output_____"
],
[
"# Checking out the GPU we have access to. This is output is from the google colab version. \n!nvidia-smi",
"Wed Feb 9 23:11:34 2022 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 460.32.03 Driver Version: 460.32.03 CUDA Version: 11.2 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 Tesla P100-PCIE... Off | 00000000:00:04.0 Off | 0 |\n| N/A 35C P0 26W / 250W | 0MiB / 16280MiB | 0% Default |\n| | | N/A |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n"
],
[
"# # Setting up the device for GPU usage\n\ndevice = 'cuda' if cuda.is_available() else 'cpu'\nprint(\"Device is: \", device)\n\n# Set random seeds and deterministic pytorch for reproducibility\nSEED = 42\ntorch.manual_seed(SEED) # pytorch random seed\nnp.random.seed(SEED) # numpy random seed\ntorch.backends.cudnn.deterministic = True",
"Device is: cuda\n"
],
[
"# Login to wandb to log the model run and all the parameters\n# 7229adacb32965027d73056a6927efd0365a00bc\n!wandb login",
"\u001b[34m\u001b[1mwandb\u001b[0m: You can find your API key in your browser here: https://wandb.ai/authorize\n\u001b[34m\u001b[1mwandb\u001b[0m: Paste an API key from your profile and hit enter, or press ctrl+c to quit: \n\u001b[34m\u001b[1mwandb\u001b[0m: Appending key for api.wandb.ai to your netrc file: /root/.netrc\n"
],
[
"# Global Parameter\nmodel_version = \"3.1\"\n\n# WandB – Initialize a new run\nwandb.init(project=\"counterfactual\"+model_version)\n\n# WandB – Config is a variable that holds and saves hyperparameters and inputs\n# Defining some key variables that will be used later on in the training \nconfig = wandb.config # Initialize config\nconfig.TRAIN_BATCH_SIZE = 16 # input batch size for training (default: 64)\nconfig.VALID_BATCH_SIZE = 32 # input batch size for testing (default: 1000)\nconfig.TRAIN_EPOCHS = 51 # number of epochs to train (default: 10)\nconfig.VAL_EPOCHS = 1 \nconfig.LEARNING_RATE = 1e-4 # learning rate (default: 0.01)\nconfig.SEED = 42 # random seed (default: 42)\nconfig.SOURCE_LEN = 150\nconfig.TARGET_LEN = 110\nconfig.LOAD_PATH = root+'models/model'+model_version+'.tar'\nconfig.SAVE_PATH = root+'models/model'+model_version+'.tar'",
"\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mskywalk3r\u001b[0m (use `wandb login --relogin` to force relogin)\n"
],
[
"PRETRAINED_MODEL_NAME = \"t5-base\"\n# tokenzier for encoding the text\ntokenizer = T5Tokenizer.from_pretrained(PRETRAINED_MODEL_NAME)\n# Defining the model. We are using t5-base model and added a Language model layer on top for generation of Summary. \n# Further this model is sent to device (GPU/TPU) for using the hardware.\nmodel = T5ForConditionalGeneration.from_pretrained(PRETRAINED_MODEL_NAME)\nmodel = model.to(device)",
"_____no_output_____"
],
[
"# Defining the optimizer that will be used to tune the weights of the network in the training session. \noptimizer = torch.optim.Adam(params = model.parameters(), lr=config.LEARNING_RATE)",
"_____no_output_____"
]
],
[
[
"# Load dataframe",
"_____no_output_____"
]
],
[
[
"#training df\nsmall_path = root + '/TimeTravel/train_supervised_small.json'\nsmall_df = pd.read_json(small_path, lines=True)\nsmall_df.head()",
"_____no_output_____"
],
[
"print(small_df.loc[:,\"edited_ending\"][0])",
"['I paid the cashier and patiently waited at the counter for my drink.', 'When she handed me the drink, the lid came off and spilled on me.', 'The coffee hurt and I had to go home and change clothes.']\n"
],
[
"# text_a: source, text_b: target\ntext_a, text_b = [], []\n\nfor i in range(len(small_df)):\n text_a.append(\"premise: \" + small_df.loc[i, 'premise'] + \" initial: \" + \\\n small_df.loc[i, 'initial'] + \" counterfactual: \" + small_df.loc[i, 'counterfactual'] + \\\n \" original_ending: \" + small_df.loc[i, 'original_ending'])\n #text_a.append(re.sub(re_pat, df.loc[i, 'edit1'], df.loc[i, 'original1']))\n text_b.append(\"edited_ending: \" + small_df.loc[i, 'edited_ending'][0] +\" \"+ small_df.loc[i, 'edited_ending'][1] +\" \"+ \\\n small_df.loc[i, 'edited_ending'][2])",
"_____no_output_____"
],
[
"train_df = pd.DataFrame({'source_text': text_a, 'target_text': text_b}) \ntrain_df.head()",
"_____no_output_____"
],
[
"print(train_df.loc[0, \"source_text\"])\nprint(\"-------------\")\nprint(train_df.loc[0, \"target_text\"])",
"premise: On my way to work I stopped to get some coffee. initial: I went through the drive through and placed my order. counterfactual: I went inside to place my order. original_ending: I paid the cashier and patiently waited for my drink. When she handed me the drink, the lid came off and spilled on me. The coffee hurt and I had to go home and change clothes.\n-------------\nedited_ending: I paid the cashier and patiently waited at the counter for my drink. When she handed me the drink, the lid came off and spilled on me. The coffee hurt and I had to go home and change clothes.\n"
],
[
"print(train_df.shape)",
"(16752, 2)\n"
],
[
"#train_df = train_df[0:4000]",
"_____no_output_____"
],
[
"source_lens = train_df.source_text.apply(lambda x: len(tokenizer.encode_plus(x, return_tensors='pt').input_ids.squeeze())).to_list()\ntarget_lens = train_df.target_text.apply(lambda x: len(tokenizer.encode_plus(x, return_tensors='pt').input_ids.squeeze())).to_list()\n\nprint(\"Max source length is: \", max(source_lens))\nprint(\"Max target length is: \", max(target_lens))",
"Max source length is: 135\nMax target length is: 84\n"
],
[
"# valid df\nlarge_path = root + '/TimeTravel/train_supervised_large.json'\ndf_large = pd.read_json(large_path, lines=True)\nprint(len(df_large))",
"28363\n"
],
[
"small_ids = []\nfor i in range(len(small_df)):\n small_ids.append(small_df.loc[i, 'story_id'])\n\nprint(len(small_ids))",
"16752\n"
],
[
"df_large = df_large[~df_large.story_id.isin(small_ids)]\ndf_large = df_large.reset_index() # must reset index after delete rows\nprint(len(df_large))",
"11613\n"
],
[
"# select data not in training set\n#part_df_large = df_large[0:100]\npart_df_large = df_large[0:1000]\npart_df_large = part_df_large.reset_index()\nprint(len(part_df_large))",
"1000\n"
],
[
"text, gt = [],[] # gt for ground truth\n\nfor i in range(len(part_df_large)):\n text.append(\"premise: \" + part_df_large.loc[i, 'premise'] + \\\n \" initial: \" + part_df_large.loc[i, 'initial'] + \\\n \" counterfactual: \" + part_df_large.loc[i, 'counterfactual'] + \\\n \" original_ending: \" + part_df_large.loc[i, 'original_ending'])\n gt.append(\"edited_ending: \" + part_df_large.loc[i, 'edited_ending'][0] +\" \"+ \\\n part_df_large.loc[i, 'edited_ending'][1] +\" \"+ part_df_large.loc[i, 'edited_ending'][2])\n\nprint(len(text))",
"1000\n"
],
[
"valid_df = pd.DataFrame({'source_text': text, 'target_text': gt}) \nvalid_df.head()",
"_____no_output_____"
]
],
[
[
"# Dataset and Dataloader",
"_____no_output_____"
]
],
[
[
"# Creating a custom dataset for reading the dataframe and loading it into the dataloader to pass it to the neural network at a later stage for finetuning the model and to prepare it for predictions\n\nclass CustomDataset(Dataset):\n\n def __init__(self, dataframe, tokenizer, ori_len, con_len):\n self.tokenizer = tokenizer\n self.data = dataframe\n self.ori_len = ori_len\n self.con_len = con_len\n self.original = self.data.source_text\n self.counterfactual = self.data.target_text\n \n\n def __len__(self):\n return len(self.counterfactual)\n\n def __getitem__(self, index):\n original = str(self.original[index])\n # original = ' '.join(original.split())\n\n counterfactual = str(self.counterfactual[index])\n # counterfactual = ' '.join(counterfactual.split())\n\n source = self.tokenizer.encode_plus(original, max_length= self.ori_len, padding='max_length', return_tensors='pt')\n target = self.tokenizer.encode_plus(counterfactual, max_length= self.con_len, padding='max_length', return_tensors='pt')\n\n source_ids = source['input_ids'].squeeze()\n source_mask = source['attention_mask'].squeeze()\n target_ids = target['input_ids'].squeeze()\n target_mask = target['attention_mask'].squeeze()\n\n return {\n 'source_ids': source_ids.to(dtype=torch.long), \n 'source_mask': source_mask.to(dtype=torch.long), \n 'target_ids': target_ids.to(dtype=torch.long),\n 'target_ids_y': target_ids.to(dtype=torch.long)\n }",
"_____no_output_____"
],
[
"trainingset = CustomDataset(dataframe=train_df, tokenizer=tokenizer, ori_len=config.SOURCE_LEN , con_len=config.TARGET_LEN )\nvalidset = CustomDataset(dataframe=valid_df, tokenizer=tokenizer, ori_len=config.SOURCE_LEN , con_len=config.TARGET_LEN )",
"_____no_output_____"
],
[
"# pick up a data sample\nsample_idx = 4\nsample = trainingset[sample_idx]\n\nsource_ids = sample[\"source_ids\"]\nsource_mask = sample[\"source_mask\"]\ntarget_ids = sample[\"target_ids\"]\ntarget_ids_y = sample[\"target_ids_y\"]\n\nprint(source_ids)",
"tensor([ 3, 17398, 10, 13136, 849, 6102, 15, 160, 11718, 640,\n 8, 1679, 7, 5, 2332, 10, 366, 255, 966, 4363,\n 44, 8, 15415, 6, 3, 9, 3, 31062, 15, 4283,\n 5, 3485, 89, 25481, 10, 366, 255, 966, 4363, 44,\n 8, 15415, 6, 3, 9, 20, 49, 4283, 5, 926,\n 834, 9303, 10, 451, 737, 31, 17, 43, 631, 97,\n 12, 9563, 11, 10719, 1587, 8, 3, 31062, 15, 5,\n 328, 130, 321, 7673, 15, 26, 91, 5, 451, 3725,\n 530, 95, 11, 718, 8, 2095, 5, 1, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n"
],
[
"print(train_df.iloc[sample_idx].target_text)\n\nsen = tokenizer.decode(target_ids, skip_special_tokens=False) # skip_special_tokens=True will be completely same.\nprint(sen)",
"edited_ending: She didn't have enough time to brake and drove towards the deer, They were both knocked out. She eventually got up and called the police.\nedited_ending: She didn't have enough time to brake and drove towards the deer, They were both knocked out. She eventually got up and called the police.</s> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad>\n"
],
[
"# DataLoader\n\ntrain_params = {\n 'batch_size': config.TRAIN_BATCH_SIZE,\n 'shuffle': True,\n 'num_workers': 2\n }\n\nval_params = {\n 'batch_size': config.VALID_BATCH_SIZE,\n 'shuffle': False,\n 'num_workers': 2\n }\n\ntraining_loader = DataLoader(trainingset, **train_params)\nval_loader = DataLoader(validset, **val_params)\nprint(len(training_loader))\nprint(len(val_loader))",
"1047\n32\n"
]
],
[
[
"# Define train() and val()",
"_____no_output_____"
]
],
[
[
"def save_model(epoch, model, optimizer, loss, PATH):\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': loss\n }, PATH)",
"_____no_output_____"
],
[
"def load_model(PATH):\n checkpoint = torch.load(PATH)\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n epoch = checkpoint['epoch']\n loss = checkpoint['loss']\n return model, optimizer, epoch, loss",
"_____no_output_____"
],
[
"# Creating the training function. This will be called in the main function. It is run depending on the epoch value.\n# The model is put into train mode and then we wnumerate over the training loader and passed to the defined network \n\ndef train(epoch, tokenizer, model, device, loader, optimizer):\n model.train()\n for i,data in enumerate(loader):\n #len(loader)=10xx\n ids = data['source_ids'].to(device, dtype = torch.long)\n mask = data['source_mask'].to(device, dtype = torch.long)\n y = data['target_ids'].to(device, dtype = torch.long)\n\n # padded ids (pad=0) are set to -100, which means ignore for loss calculation\n y[y[: ,:] == tokenizer.pad_token_id ] = -100\n label_ids = y.to(device)\n\n outputs = model(input_ids = ids, attention_mask = mask, labels=label_ids)\n loss = outputs[0]\n #logit = outputs[1]\n \n if i%50 == 0:\n wandb.log({\"Training Loss\": loss.item()})\n\n if i%600==0:\n print(f'Epoch: {epoch}, Loss: {loss.item()}')\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n # xm.optimizer_step(optimizer)\n # xm.mark_step()\n \n if (epoch % 5 == 0):\n save_model(epoch, model, optimizer, loss.item(), config.SAVE_PATH)\n",
"_____no_output_____"
],
[
"def validate(tokenizer, model, device, loader):\n model.eval()\n predictions = []\n actuals = []\n raws = []\n with torch.no_grad():\n for i, data in enumerate(loader):\n y = data['target_ids'].to(device, dtype = torch.long)\n ids = data['source_ids'].to(device, dtype = torch.long)\n mask = data['source_mask'].to(device, dtype = torch.long)\n\n generated_ids = model.generate(\n input_ids = ids,\n attention_mask = mask, \n num_beams=2,\n max_length=config.TARGET_LEN, \n repetition_penalty=2.5, \n length_penalty=1.0, \n early_stopping=True\n )\n raw = [tokenizer.decode(i, skip_special_tokens=True, clean_up_tokenization_spaces=True) for i in ids]\n preds = [tokenizer.decode(i, skip_special_tokens=True, clean_up_tokenization_spaces=True) for i in generated_ids]\n target = [tokenizer.decode(i, skip_special_tokens=True, clean_up_tokenization_spaces=True)for i in y]\n if i%3==0:\n print(f'valid Completed {(i+1)* config.VALID_BATCH_SIZE}')\n\n raws.extend(raw)\n predictions.extend(preds)\n actuals.extend(target)\n return raws, predictions, actuals",
"_____no_output_____"
]
],
[
[
"# main",
"_____no_output_____"
]
],
[
[
"import time\n# Helper function to print time between epochs\ndef epoch_time(start_time, end_time):\n elapsed_time = end_time - start_time\n elapsed_mins = int(elapsed_time / 60)\n elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n return elapsed_mins, elapsed_secs",
"_____no_output_____"
],
[
"# Log metrics with wandb\n#wandb.watch(model, log=\"all\")\n\n# Training loop\nprint('Initiating Fine-Tuning for the model on counterfactual dataset:')\n\n\nfor epoch in range(config.TRAIN_EPOCHS):\n#for epoch in tqdm(range(config.TRAIN_EPOCHS)):\n start_time = time.time()\n train(epoch, tokenizer, model, device, training_loader, optimizer)\n end_time = time.time()\n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n print(f'Epoch: {epoch:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')\n\n# Mark the run as finished\nwandb.finish()",
"Initiating Fine-Tuning for the model on counterfactual dataset:\nEpoch: 0, Loss: 3.4192261695861816\nEpoch: 0, Loss: 0.8359001874923706\nEpoch: 00 | Epoch Time: 10m 15s\nEpoch: 1, Loss: 0.9867029786109924\nEpoch: 1, Loss: 0.6519565582275391\nEpoch: 01 | Epoch Time: 10m 0s\nEpoch: 2, Loss: 0.8386741876602173\nEpoch: 2, Loss: 0.5638874769210815\nEpoch: 02 | Epoch Time: 10m 0s\nEpoch: 3, Loss: 0.688003659248352\nEpoch: 3, Loss: 0.48243409395217896\nEpoch: 03 | Epoch Time: 10m 0s\nEpoch: 4, Loss: 0.5413828492164612\nEpoch: 4, Loss: 0.5361905097961426\nEpoch: 04 | Epoch Time: 10m 0s\nEpoch: 5, Loss: 0.5124238133430481\nEpoch: 5, Loss: 0.49614372849464417\nEpoch: 05 | Epoch Time: 10m 12s\nEpoch: 6, Loss: 0.36898714303970337\nEpoch: 6, Loss: 0.5739389061927795\nEpoch: 06 | Epoch Time: 10m 0s\nEpoch: 7, Loss: 0.5189867615699768\nEpoch: 7, Loss: 0.33989858627319336\nEpoch: 07 | Epoch Time: 9m 59s\nEpoch: 8, Loss: 0.48177459836006165\nEpoch: 8, Loss: 0.5473092794418335\nEpoch: 08 | Epoch Time: 10m 0s\nEpoch: 9, Loss: 0.41842886805534363\nEpoch: 9, Loss: 0.4664848744869232\nEpoch: 09 | Epoch Time: 10m 0s\nEpoch: 10, Loss: 0.49663037061691284\nEpoch: 10, Loss: 0.5266245007514954\nEpoch: 10 | Epoch Time: 10m 12s\nEpoch: 11, Loss: 0.42779216170310974\nEpoch: 11, Loss: 0.29277732968330383\nEpoch: 11 | Epoch Time: 9m 59s\nEpoch: 12, Loss: 0.39399686455726624\nEpoch: 12, Loss: 0.3503819704055786\nEpoch: 12 | Epoch Time: 9m 59s\nEpoch: 13, Loss: 0.34568777680397034\nEpoch: 13, Loss: 0.3605706989765167\nEpoch: 13 | Epoch Time: 9m 59s\nEpoch: 14, Loss: 0.3241945207118988\nEpoch: 14, Loss: 0.29521676898002625\nEpoch: 14 | Epoch Time: 9m 59s\nEpoch: 15, Loss: 0.21949131786823273\nEpoch: 15, Loss: 0.21016576886177063\nEpoch: 15 | Epoch Time: 10m 11s\nEpoch: 16, Loss: 0.23567348718643188\nEpoch: 16, Loss: 0.26739874482154846\nEpoch: 16 | Epoch Time: 9m 59s\nEpoch: 17, Loss: 0.2194230705499649\nEpoch: 17, Loss: 0.19873543083667755\nEpoch: 17 | Epoch Time: 9m 59s\nEpoch: 18, Loss: 0.1872090995311737\nEpoch: 18, Loss: 0.26092198491096497\nEpoch: 18 | Epoch Time: 9m 59s\nEpoch: 19, Loss: 0.18494686484336853\nEpoch: 19, Loss: 0.18824121356010437\nEpoch: 19 | Epoch Time: 9m 59s\nEpoch: 20, Loss: 0.19446420669555664\nEpoch: 20, Loss: 0.206057608127594\nEpoch: 20 | Epoch Time: 10m 12s\nEpoch: 21, Loss: 0.18267016112804413\nEpoch: 21, Loss: 0.1715538501739502\nEpoch: 21 | Epoch Time: 10m 0s\nEpoch: 22, Loss: 0.152441143989563\nEpoch: 22, Loss: 0.150435671210289\nEpoch: 22 | Epoch Time: 10m 0s\nEpoch: 23, Loss: 0.158951997756958\nEpoch: 23, Loss: 0.17140167951583862\nEpoch: 23 | Epoch Time: 9m 59s\nEpoch: 24, Loss: 0.097857266664505\nEpoch: 24, Loss: 0.1955243945121765\nEpoch: 24 | Epoch Time: 9m 59s\nEpoch: 25, Loss: 0.17591549456119537\nEpoch: 25, Loss: 0.17546427249908447\nEpoch: 25 | Epoch Time: 10m 12s\nEpoch: 26, Loss: 0.11558157950639725\nEpoch: 26, Loss: 0.14941653609275818\nEpoch: 26 | Epoch Time: 10m 0s\nEpoch: 27, Loss: 0.07351997494697571\nEpoch: 27, Loss: 0.14452993869781494\nEpoch: 27 | Epoch Time: 10m 0s\nEpoch: 28, Loss: 0.09547118842601776\nEpoch: 28, Loss: 0.12079210579395294\nEpoch: 28 | Epoch Time: 10m 0s\nEpoch: 29, Loss: 0.08220355212688446\nEpoch: 29, Loss: 0.07305309921503067\nEpoch: 29 | Epoch Time: 10m 0s\nEpoch: 30, Loss: 0.0687272697687149\nEpoch: 30, Loss: 0.08131875097751617\nEpoch: 30 | Epoch Time: 10m 12s\nEpoch: 31, Loss: 0.07797063142061234\nEpoch: 31, Loss: 0.06660052388906479\nEpoch: 31 | Epoch Time: 10m 0s\nEpoch: 32, Loss: 0.06829015910625458\nEpoch: 32, Loss: 0.08563121408224106\nEpoch: 32 | Epoch Time: 10m 0s\nEpoch: 33, Loss: 0.03990611433982849\nEpoch: 33, Loss: 0.04895392432808876\nEpoch: 33 | Epoch Time: 10m 0s\nEpoch: 34, Loss: 0.08336804807186127\nEpoch: 34, Loss: 0.05480656772851944\nEpoch: 34 | Epoch Time: 10m 0s\nEpoch: 35, Loss: 0.07358294725418091\nEpoch: 35, Loss: 0.07487856596708298\nEpoch: 35 | Epoch Time: 10m 12s\nEpoch: 36, Loss: 0.04918975010514259\nEpoch: 36, Loss: 0.08889202028512955\nEpoch: 36 | Epoch Time: 10m 0s\nEpoch: 37, Loss: 0.0566418431699276\nEpoch: 37, Loss: 0.03676319494843483\nEpoch: 37 | Epoch Time: 10m 0s\nEpoch: 38, Loss: 0.024736404418945312\nEpoch: 38, Loss: 0.040765974670648575\nEpoch: 38 | Epoch Time: 10m 0s\nEpoch: 39, Loss: 0.019186152145266533\nEpoch: 39, Loss: 0.03654420003294945\nEpoch: 39 | Epoch Time: 10m 0s\nEpoch: 40, Loss: 0.029543470591306686\nEpoch: 40, Loss: 0.04859786853194237\nEpoch: 40 | Epoch Time: 10m 12s\nEpoch: 41, Loss: 0.026402462273836136\nEpoch: 41, Loss: 0.04164007306098938\nEpoch: 41 | Epoch Time: 10m 0s\nEpoch: 42, Loss: 0.03297251835465431\nEpoch: 42, Loss: 0.02565242350101471\nEpoch: 42 | Epoch Time: 10m 0s\nEpoch: 43, Loss: 0.039225444197654724\nEpoch: 43, Loss: 0.0622183196246624\nEpoch: 43 | Epoch Time: 10m 0s\nEpoch: 44, Loss: 0.046513594686985016\nEpoch: 44, Loss: 0.036620184779167175\nEpoch: 44 | Epoch Time: 10m 0s\nEpoch: 45, Loss: 0.03649183362722397\nEpoch: 45, Loss: 0.05073566362261772\nEpoch: 45 | Epoch Time: 10m 12s\nEpoch: 46, Loss: 0.04989836364984512\nEpoch: 46, Loss: 0.06523306667804718\nEpoch: 46 | Epoch Time: 10m 0s\nEpoch: 47, Loss: 0.012372500263154507\nEpoch: 47, Loss: 0.05289134383201599\nEpoch: 47 | Epoch Time: 10m 0s\nEpoch: 48, Loss: 0.031456951051950455\nEpoch: 48, Loss: 0.0388609804213047\nEpoch: 48 | Epoch Time: 10m 0s\nEpoch: 49, Loss: 0.027290072292089462\nEpoch: 49, Loss: 0.029819805175065994\nEpoch: 49 | Epoch Time: 10m 0s\nEpoch: 50, Loss: 0.018867217004299164\nEpoch: 50, Loss: 0.025691332295536995\nEpoch: 50 | Epoch Time: 10m 11s\n"
],
[
"# Load model\n# model = T5ForConditionalGeneration.from_pretrained(PRETRAINED_MODEL_NAME)\n# model = model.to(device)\n# optimizer = torch.optim.Adam(params = model.parameters(), lr=config.LEARNING_RATE)\n\n# model, optimizer, epoch, loss = load_model(config.LOAD_PATH)",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"# Validation loop and saving the resulting file with predictions and acutals in a dataframe.\n# Saving the dataframe as predictions.csv\nprint('Now inferecing:')\nstart_time = time.time()\nraws, predictions, actuals = validate(tokenizer, model, device, val_loader)\nend_time = time.time()\nepoch_mins, epoch_secs = epoch_time(start_time, end_time)\nprint(f'Time: {epoch_mins}m {epoch_secs}s')\n\nfinal_df = pd.DataFrame({'raw_text': raws, 'ground_truth': actuals, 'generated_text': predictions})\n#final_df.to_csv(root + 'results/' + 'output' + model_version + '.csv')\nfinal_df.to_excel(root + 'results/' + 'output' + model_version + '.xlsx')\nprint('Output Files generated for review')",
"Now inferecing:\nvalid Completed 32\nvalid Completed 128\nvalid Completed 224\nvalid Completed 320\nvalid Completed 416\nvalid Completed 512\nvalid Completed 608\nvalid Completed 704\nvalid Completed 800\nvalid Completed 896\nvalid Completed 992\nTime: 3m 13s\nOutput Files generated for review\n"
],
[
"print(len(actuals))",
"1000\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec642ac34560277e2e2c26943f4236ff82002ce8 | 11,741 | ipynb | Jupyter Notebook | notebooks/algorithms.ipynb | real-slim-chadi/DSIRP | 1fab557ece1145d8f513cf4db15f4b6b976e1619 | [
"MIT"
]
| null | null | null | notebooks/algorithms.ipynb | real-slim-chadi/DSIRP | 1fab557ece1145d8f513cf4db15f4b6b976e1619 | [
"MIT"
]
| null | null | null | notebooks/algorithms.ipynb | real-slim-chadi/DSIRP | 1fab557ece1145d8f513cf4db15f4b6b976e1619 | [
"MIT"
]
| null | null | null | 23.112205 | 292 | 0.552082 | [
[
[
"# Algorithms",
"_____no_output_____"
],
[
"[Click here to run this chapter on Colab](https://colab.research.google.com/github/AllenDowney/DSIRP/blob/main/notebooks/algorithms.ipynb)",
"_____no_output_____"
],
[
"## Searching for anagrams\n\nIn this notebook we'll implement algorithms for two tasks:\n\n* Testing a pair of words to see if they are anagrams of each other, that is, if you can rearrange the letters in one word to spell the other.\n\n* Searching a list of words for all pairs that are anagrams of each other.\n\nThere is a point to these examples, which I will explain at the end.",
"_____no_output_____"
],
[
"**Exercise 1:** Write a function that takes two words and returns `True` if they are anagrams. Test your function with the examples below.",
"_____no_output_____"
]
],
[
[
"def is_anagram(word1, word2):\n return False",
"_____no_output_____"
],
[
"is_anagram('tachymetric', 'mccarthyite') # True",
"_____no_output_____"
],
[
"is_anagram('post', 'top') # False, letter not present",
"_____no_output_____"
],
[
"is_anagram('pott', 'top') # False, letter present but not enough copies",
"_____no_output_____"
],
[
"is_anagram('top', 'post') # False, letters left over at the end",
"_____no_output_____"
],
[
"is_anagram('topss', 'postt') # False",
"_____no_output_____"
]
],
[
[
"**Exercise 2:** Use `timeit` to see how fast your function is for these examples:",
"_____no_output_____"
]
],
[
[
"%timeit is_anagram('tops', 'spot')",
"_____no_output_____"
],
[
"%timeit is_anagram('tachymetric', 'mccarthyite')",
"_____no_output_____"
]
],
[
[
"NOTE: How can we compare algorithms running on different computers?",
"_____no_output_____"
],
[
"## Searching for anagram pairs",
"_____no_output_____"
],
[
"**Exercise 3:** Write a function that takes a word list and returns a list of all anagram pairs.",
"_____no_output_____"
]
],
[
[
"short_word_list = ['proudest', 'stop', 'pots', 'tops', 'sprouted']",
"_____no_output_____"
],
[
"def all_anagram_pairs(word_list):\n return []",
"_____no_output_____"
],
[
"all_anagram_pairs(short_word_list)",
"_____no_output_____"
]
],
[
[
"The following cell downloads a file containing a list of English words.",
"_____no_output_____"
]
],
[
[
"from os.path import basename, exists\n\ndef download(url):\n filename = basename(url)\n if not exists(filename):\n from urllib.request import urlretrieve\n local, _ = urlretrieve(url, filename)\n print('Downloaded ' + local)\n \ndownload('https://github.com/AllenDowney/DSIRP/raw/main/american-english')",
"_____no_output_____"
]
],
[
[
"The following function reads a file and returns a set of words (I used a set because after we convert words to lower case, there are some repeats.)",
"_____no_output_____"
]
],
[
[
"def read_words(filename):\n \"\"\"Read lines from a file and split them into words.\"\"\"\n res = set()\n for line in open(filename):\n for word in line.split():\n res.add(word.strip().lower())\n return res",
"_____no_output_____"
],
[
"word_list = read_words('american-english')\nlen(word_list)",
"_____no_output_____"
]
],
[
[
"**Exercise 4:** Loop through the word list and print all words that are anagrams of `stop`.",
"_____no_output_____"
],
[
"Now run `all_anagram_pairs` with the full `word_list`:",
"_____no_output_____"
]
],
[
[
"# pairs = all_anagram_pairs(word_list)",
"_____no_output_____"
]
],
[
[
"**Exercise 5:** While that's running, let's estimate how long it's going to take.",
"_____no_output_____"
],
[
"## A better algorithm\n\n**Exercise 6:** Write a better algorithm! Hint: make a dictionary. How much faster is your algorithm?",
"_____no_output_____"
]
],
[
[
"def all_anagram_lists(word_list):\n \"\"\"Finds all anagrams in a list of words.\n\n word_list: sequence of strings\n \"\"\"\n return {}",
"_____no_output_____"
],
[
"%time anagram_map = all_anagram_lists(word_list)",
"_____no_output_____"
],
[
"len(anagram_map)",
"_____no_output_____"
]
],
[
[
"## Summary\n\nWhat is the point of the examples in this notebook?\n\n* The different versions of `is_anagram` show that, when inputs are small, it is hard to say which algorithm will be the fastest. It often depends on details of the implementation. Anyway, the differences tend to be small, so it might not matter much in practice.\n\n* The different algorithms we used to search for anagram pairs show that, when inputs are large, we can often tell which algorithm will be fastest. And the difference between a fast algorithm and a slow one can be huge!",
"_____no_output_____"
],
[
"## Exercises\n\nBefore you work on these exercises, you might want to read the Python [Sorting How-To](https://docs.python.org/3/howto/sorting.html). It uses `lambda` to define an anonymous function, which [you can read about here](https://www.w3schools.com/python/python_lambda.asp).\n\n**Exercise 7:**\nMake a dictionary like `anagram_map` that contains only keys that map to a list with more than one element. You can use a `for` loop to make a new dictionary, or a [dictionary comprehension](https://www.freecodecamp.org/news/dictionary-comprehension-in-python-explained-with-examples/).",
"_____no_output_____"
],
[
"**Exercise 8:**\nFind the longest word with at least one anagram. Suggestion: use the `key` argument of `sort` or `sorted` ([see here](https://stackoverflow.com/questions/8966538/syntax-behind-sortedkey-lambda)).",
"_____no_output_____"
],
[
"**Exercise 9:**\nFind the largest list of words that are anagrams of each other.",
"_____no_output_____"
],
[
"**Exercise 10:**\nWrite a function that takes an integer `word_length` and finds the longest list of words with the given length that are anagrams of each other.",
"_____no_output_____"
],
[
"**Exercise 11:**\nAt this point we have a data structure that contains lists of words that are anagrams, but we have not actually enumerated all pairs.\nWrite a function that takes `anagram_map` and returns a list of all anagram pairs.\nHow many are there?",
"_____no_output_____"
],
[
"*Data Structures and Information Retrieval in Python*\n\nCopyright 2021 Allen Downey\n\nLicense: [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International](https://creativecommons.org/licenses/by-nc-sa/4.0/)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
]
|
ec64307d96ed56bd3a82c4a003d3f5446b4390aa | 99,927 | ipynb | Jupyter Notebook | Lecture-Notes/2018/Day5.ipynb | unmeshvrije/python-for-beginners | d8943130bfd2499a458d92d5f6db97170fd53810 | [
"Apache-2.0"
]
| 7 | 2019-08-13T15:36:50.000Z | 2021-09-09T20:37:21.000Z | Lecture-Notes/2018/Day5.ipynb | unmeshvrije/python-for-beginners | d8943130bfd2499a458d92d5f6db97170fd53810 | [
"Apache-2.0"
]
| 2 | 2019-07-04T08:30:38.000Z | 2019-07-16T13:44:45.000Z | Lecture-Notes/2018/Day5.ipynb | unmeshvrije/python-for-beginners | d8943130bfd2499a458d92d5f6db97170fd53810 | [
"Apache-2.0"
]
| 4 | 2019-07-29T10:57:24.000Z | 2021-03-17T15:02:36.000Z | 116.059233 | 81,422 | 0.858617 | [
[
[
"x = 5 # This is global by default. you don't have to say global x\ndef fun():\n # Here, if you want to change x, then write\n global x\n x = 6\n print(\"something\")\n \nprint(x)\nfun()\nprint(x)",
"5\nsomething\n6\n"
],
[
"import pandas as pd",
"_____no_output_____"
],
[
"import random",
"_____no_output_____"
],
[
"help(random.randrange)",
"Help on method randrange in module random:\n\nrandrange(start, stop=None, step=1, _int=<class 'int'>) method of random.Random instance\n Choose a random item from range(start, stop[, step]).\n \n This fixes the problem with randint() which includes the\n endpoint; in Python this is usually not what you want.\n\n"
],
[
"help(random.random)",
"Help on built-in function random:\n\nrandom(...) method of random.Random instance\n random() -> x in the interval [0, 1).\n\n"
],
[
"help(round)",
"Help on built-in function round in module builtins:\n\nround(...)\n round(number[, ndigits]) -> number\n \n Round a number to a given precision in decimal digits (default 0 digits).\n This returns an int when called with one argument, otherwise the\n same type as the number. ndigits may be negative.\n\n"
],
[
"# To generate random numbers without 0 : Not so smart way\nfor i in range(5):\n number = round(random.random() * 25 , 2) # Gives me numbers from 0 to 25\n while True:\n if number != 0:\n break\n number = round(random.random() * 25, 2)\n print(number)",
"3.73\n10.7\n21.77\n5.37\n21.45\n"
],
[
"# To generate random numbers withotu 0 : A smarter way\nfor i in range(5):\n number = round(random.random()* 25,2) + float(1.0)\n print(number)",
"15.18\n1.43\n7.68\n21.58\n7.98\n"
],
[
"data = \"num1,num2,num3,num4,num5\\n\"\nfor n in range(20):\n row = \"\"\n for i in range(5):\n # str will properly display the float to 2 decimal places\n number = str(round(random.random()* 25,2)) #+ float(1.0)\n row += str(number)\n if i != 4:\n row += \",\"\n \n row += \"\\n\" # Add a new line after the row\n # row = row + \"\\n\"\n data += row # add row to the data\n\n# Here I have a string \"data\" that contains\n# 20 rows of 5 comma separated numbers\n#print (data)\n# with clause\n# open function\n# fout is the object of file\n# \"w\" write mode : will overwrite the file\nwith open (\"test.csv\", \"w\") as fout:\n fout.write(data)\n\n ",
"_____no_output_____"
],
[
"print (data)",
"num1,num2,num3,num4,num5\n8.86,13.97,22.17,22.64,8.28\n5.98,11.87,19.75,8.74,1.31\n22.89,3.14,4.36,4.17,12.73\n7.4,5.05,4.64,17.2,18.67\n1.63,7.71,3.57,1.59,19.48\n18.33,7.32,24.65,9.2,2.9\n6.52,20.93,6.88,21.71,14.63\n7.56,6.21,21.39,12.35,23.27\n15.37,18.57,23.04,0.98,21.4\n16.47,17.6,17.58,23.56,15.11\n7.15,22.63,2.27,23.26,22.02\n18.42,23.52,18.35,4.81,18.51\n19.14,0.1,20.61,13.87,18.39\n3.33,5.24,7.47,14.05,20.54\n0.55,0.17,19.81,20.88,15.72\n21.35,19.47,13.96,5.74,22.36\n22.09,16.71,14.39,17.2,15.2\n1.56,7.13,15.04,7.57,23.67\n13.12,23.27,7.18,9.99,18.88\n24.99,10.45,18.13,17.72,5.06\n\n"
],
[
"!pwd # Present Working Directory . It is not a Python command\n\n",
"/Users/unmesh/programming/python-for-beginners\r\n"
],
[
"# as \"pd\" allows us to be lazy\nimport pandas as pd",
"_____no_output_____"
],
[
"df = pd.read_csv(\"test.csv\")",
"_____no_output_____"
],
[
"type(df)",
"_____no_output_____"
],
[
"df.describe()",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"plt.plot(df.get_values())\nplt.savefig(\"myplot.png\") # saves the file\nplt.show() # will show the plot",
"_____no_output_____"
]
],
[
[
"## Generators",
"_____no_output_____"
]
],
[
[
"numbers = range(5)",
"_____no_output_____"
],
[
"# This function will return different value \n# three times\n# you call the function with next()\ndef simple_generator():\n yield 1\n yield 2\n yield 3",
"_____no_output_____"
],
[
"# Assign the generator to mg object\nmg = simple_generator()\n# next(mg) will give you the next value yielded by the function\nnext(mg)",
"_____no_output_____"
],
[
"next(mg)",
"_____no_output_____"
],
[
"next(mg)",
"_____no_output_____"
],
[
"next(mg)",
"_____no_output_____"
],
[
"# Generator function\n# to generate fibonacci numbers\ndef next_fibonacci_number():\n yield 0\n yield 1\n first = 0\n second = 1\n while True:\n third = first + second\n first = second\n second = third\n yield third",
"_____no_output_____"
],
[
"mf = next_fibonacci_number()\nnext(mf)",
"_____no_output_____"
],
[
"next(mf)",
"_____no_output_____"
],
[
"print(next(mf), next(mf), next(mf), next(mf), next(mf))",
"5 8 13 21 34\n"
],
[
"def dont_write_such_a_function():\n print(\"Hello\")\n return 5\n print (\"I still want to do a lot with my life\")",
"_____no_output_____"
],
[
"x = dont_write_such_a_function()\nprint(x)",
"Hello\n5\n"
]
],
[
[
"## Function with variable number of arguments",
"_____no_output_____"
]
],
[
[
"# print() function takes variable number of arguments\n# nums is a number\n# kwargs is a dictionary\ndef average(*nums, **kwargs): # passed in a list\n sum = 0\n for n in nums: # n will have values from the list nums\n sum += n\n return sum / kwargs['divide']\n\n",
"_____no_output_____"
],
[
"# # passing two positional params\n# and one keyword parameter\naverage(3,4, divide=10) ",
"_____no_output_____"
],
[
"average(1,2,3,4,5)",
"_____no_output_____"
],
[
"average(1,2,3,4,5,6,7,8,9,10)",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
]
|
ec6438f3d1e9309be84d5b0a262a1627a88ddd1d | 510,117 | ipynb | Jupyter Notebook | notebooks/practice/support_vector_machine.ipynb | chenyunzheng/opencv-machine-learning | 28510bb0393033fdc8f892c1909aed92aaac2db2 | [
"MIT"
]
| null | null | null | notebooks/practice/support_vector_machine.ipynb | chenyunzheng/opencv-machine-learning | 28510bb0393033fdc8f892c1909aed92aaac2db2 | [
"MIT"
]
| null | null | null | notebooks/practice/support_vector_machine.ipynb | chenyunzheng/opencv-machine-learning | 28510bb0393033fdc8f892c1909aed92aaac2db2 | [
"MIT"
]
| null | null | null | 717.464135 | 149,804 | 0.951344 | [
[
[
"# dataset made by scikit-learn\nfrom sklearn import datasets\nx, y = datasets.make_classification(n_samples=100, n_features=2, n_classes=2, n_redundant=0,random_state=7816)\nx.shape, y.shape",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nplt.style.use('ggplot')\nplt.figure(figsize=(10,6))\nplt.scatter(x[:,0], x[:,1], c=y, s=30)\nplt.xlabel('feature 1')\nplt.ylabel('feature 2')",
"_____no_output_____"
],
[
"### preprocessing the data\nimport numpy as np\nx = x.astype(np.float32)\ny = y * 2 - 1",
"_____no_output_____"
],
[
"### split train & test data\nfrom sklearn import model_selection as ms\nx_train, x_test, y_train, y_test = ms.train_test_split(x, y, test_size=0.2, random_state=42)",
"_____no_output_____"
],
[
"x_train.shape, y_train.shape",
"_____no_output_____"
],
[
"### train SVM model using opencv\nimport cv2\n# create SVM model\nsvm = cv2.ml.SVM_create()\n# set kernel\nsvm.setKernel(cv2.ml.SVM_LINEAR)\n# train\nsvm.train(x_train, cv2.ml.ROW_SAMPLE, y_train)\n# predict\n_, y_pred = svm.predict(x_test)\n# score\nfrom sklearn import metrics\nmetrics.accuracy_score(y_test, y_pred)",
"_____no_output_____"
],
[
"import re\ndef validateEmail(email):\n flag = False\n if re.match(\"^.+\\\\@(\\\\[?)[a-zA-Z0-9\\\\-\\\\.]+\\\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\\\]?)$\", email) != None:\n flag = True\n return flag",
"_____no_output_____"
],
[
"validateEmail('[email protected]')",
"_____no_output_____"
],
[
"def plot_decision_boundary_test(svm, x_test, y_test):\n x_min, x_max = x_test[:, 0].min() - 1, x_test[:, 0].max() + 1\n y_min, y_max = x_test[:, 1].min() - 1, x_test[:, 1].max() + 1\n h = 0.02 # step size in mesh\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # use xx,yy to make hypothetical data points\n return xx, yy",
"_____no_output_____"
],
[
"xx, yy = plot_decision_boundary_test(svm, x_test, y_test);\n# yy[0,0]\n# _data = np.array([xx.ravel(), yy.ravel()]).T\n# print(_data[0])\ndd = np.c_[xx.ravel().astype(np.float32), yy.ravel().astype(np.float32)]\ndd[0]",
"_____no_output_____"
],
[
"def plot_decision_boundary(svm, x_test, y_test):\n x_min, x_max = x_test[:, 0].min() - 1, x_test[:, 0].max() + 1\n y_min, y_max = x_test[:, 1].min() - 1, x_test[:, 1].max() + 1\n h = 0.02 # step size in mesh\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # use xx,yy to make hypothetical data points\n x_hypo = np.c_[xx.ravel().astype(np.float32), yy.ravel().astype(np.float32)]\n _, zz = svm.predict(x_hypo)\n print(zz.shape)\n print(xx.shape)\n zz = zz.reshape(xx.shape)\n# plt.figure(figsize=(10,6))\n plt.contourf(xx,yy,zz,cmap=plt.cm.coolwarm, alpha=0.8)\n plt.scatter(x_test[:,0],x_test[:,1],c=y_test,s=200)",
"_____no_output_____"
],
[
"plot_decision_boundary(svm, x_test, y_test)",
"(96237, 1)\n(333, 289)\n"
],
[
"# compare the decision boundaries generated by different kernels\n# SVM_LINEAR, SVM_POLY, SVM_RBF, SVM_SIGMOID, SVM_INTER\nkernels = [cv2.ml.SVM_LINEAR, cv2.ml.SVM_RBF, cv2.ml.SVM_SIGMOID, cv2.ml.SVM_INTER]\nplt.figure(figsize=(12,8))\nfor idx,kernel in enumerate(kernels):\n svm = cv2.ml.SVM_create()\n svm.setKernel(kernel)\n svm.train(x_train, cv2.ml.ROW_SAMPLE, y_train)\n _, y_pred = svm.predict(x_test)\n accuracy = metrics.accuracy_score(y_test, y_pred)\n plt.subplot(2,2,idx+1)\n plot_decision_boundary(svm, x_test, y_test)\n plt.title('accuracy = %.2f' % accuracy)",
"(96237, 1)\n(333, 289)\n(96237, 1)\n(333, 289)\n(96237, 1)\n(333, 289)\n(96237, 1)\n(333, 289)\n"
],
[
"### detect pedestrains in images\ndef extract_tar(tarfilePath, extractdir):\n try:\n import tarfile\n except ImportError:\n raise ImportError('You do not have the tarfile installed. '\n 'Try unzipping the file outside of Python.')\n tar = tarfile.open(tarfilePath)\n tar.extractall(path=extractdir)\n tar.close()\n print('%s successfully extracted to %s' % (tarfilePath, extractdir))",
"_____no_output_____"
],
[
"tarfilePath = '../data/chapter6/pedestrians128x64.tar.gz'\nextractdir = '../data/chapter6/pedestrians128x64'\nextract_tar(tarfilePath, extractdir)",
"../data/chapter6/pedestrians128x64.tar.gz successfully extracted to ../data/chapter6/pedestrians128x64\n"
],
[
"# visualize some sample images - version1\nimport cv2\nfrom matplotlib import pyplot as plt\nplt.figure(figsize=(10,6))\nfor i in range(5):\n img = plt.imread('../data/chapter6/pedestrians128x64/pedestrians128x64/per00%d.ppm' % (i + 100))\n plt.subplot(1,5,i+1)\n plt.imshow(img)\n plt.axis('off')",
"_____no_output_____"
],
[
"# visualize some sample images - version2\nimport cv2\nimport matplotlib.pyplot as plt\n\nfor i in range(5):\n filename = '../data/chapter6/pedestrians128x64/pedestrians128x64/per0010%d.ppm' % i\n img = cv2.imread(filename)\n plt.subplot(1,5,i+1)\n plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n plt.axis('off')",
"_____no_output_____"
],
[
"### the histogram of oriented gradients (HOG) - edge directions\n# create HOG Descriptor\nwin_size = (48, 96)\nblock_size = (16, 16)\ncell_size = (8, 8)\ncell_stride = (8, 8)\nnum_bins = 9\nhog = cv2.HOGDescriptor(win_size,block_size,cell_stride,cell_size,num_bins)",
"_____no_output_____"
],
[
"tarfilePath = '../data/chapter6/pedestrians_neg.tar.gz'\nextractdir = '../data/chapter6/pedestrians_neg'\nextract_tar(tarfilePath, extractdir)",
"../data/chapter6/pedestrians_neg.tar.gz successfully extracted to ../data/chapter6/pedestrians_neg\n"
],
[
"# create the negative samples - non-pedestrians images\nimport os\nimport numpy as np\n\nnp.random.RandomState(42)\nnon_pedestrians_dir = '../data/chapter6/pedestrians_neg/pedestrians_neg'\nhroi = 128\nwroi = 64\nx_neg = []\nfor negfile in os.listdir(non_pedestrians_dir):\n imgpath = '%s/%s' % (non_pedestrians_dir,negfile)\n img = cv2.imread(imgpath)\n img = cv2.resize(img, (512, 512))\n for i in range(5):\n rand_y = np.random.randint(0, img.shape[0] - hroi)\n rand_x = np.random.randint(0, img.shape[1] - wroi)\n roi = img[rand_y : rand_y + hroi, rand_x : rand_x + wroi, :]\n if(i == 1):\n cv2.imwrite(imgpath + '_cut.jpg', roi)\n x_neg.append(hog.compute(roi, (64, 64)))",
"_____no_output_____"
],
[
"# create the positive samples - pedestrians images\nimport random\nrandom.seed(42)\nx_pos = []\nfor i in random.sample(range(900), 400):\n imgpath = '../data/chapter6/pedestrians128x64/pedestrians128x64/per%05d.ppm' % i\n img = cv2.imread(imgpath)\n if img is None:\n print('Could not find image %s' % imgpath)\n continue\n x_pos.append(hog.compute(img, (64, 64)))",
"Could not find image ../data/chapter6/pedestrians128x64/pedestrians128x64/per00000.ppm\n"
],
[
"# convert to float32 for opencv\nx_neg = np.array(x_neg, dtype=np.float32)\nx_pos = np.array(x_pos, dtype=np.float32)\nprint(x_neg.shape)\nprint(x_pos.shape)",
"(250, 1980, 1)\n(399, 1980, 1)\n"
],
[
"# label 1 for pedestrians and -1 for non-pedestrians\ny_pos = np.ones(x_pos.shape[0], dtype=np.int32)\ny_neg = -np.ones(x_neg.shape[0], dtype=np.int32)",
"_____no_output_____"
],
[
"# the whole x and y\nx = np.concatenate((x_pos, x_neg))\ny = np.concatenate((y_pos, y_neg))",
"_____no_output_____"
],
[
"# split train & test sets\nfrom sklearn import model_selection as ms\nx_train, x_test, y_train, y_test = ms.train_test_split(x, y, test_size=0.2, random_state=42)",
"_____no_output_____"
],
[
"x_train.shape",
"_____no_output_____"
],
[
"### implement the SVM\n# create and train SVM\nimport cv2\ndef train_svm(x_train, y_train):\n svm = cv2.ml.SVM_create()\n svm.train(x_train, cv2.ml.ROW_SAMPLE, y_train)\n return svm\n\nfrom sklearn import metrics\n# socre the SVM model\ndef score_svm(svm, x, y):\n _, y_pred = svm.predict(x)\n return metrics.accuracy_score(y, y_pred)",
"_____no_output_____"
],
[
"svm = train_svm(x_train, y_train)\nprint(score_svm(svm, x_train, y_train))\nprint(score_svm(svm, x_test, y_test))",
"1.0\n0.6461538461538462\n"
],
[
"### bootstrapping svm model - the maximum iteration = 3\nscore_train = []\nsocre_test = []\nfor i in range(3):\n svm = train_svm(x_train, y_train)\n score_train.append(score_svm(svm, x_train, y_train))\n socre_test.append(score_svm(svm, x_test, y_test))\n _, y_pred = svm.predict(x_test)\n diff = np.logical_and(y_pred.ravel() == 1, y_test.ravel() == -1)\n if not np.any(diff):\n print('no more false positives: done')\n break\n x_train = np.concatenate((x_train, x_test[diff, :]), axis=0)\n y_train = np.concatenate((y_train, y_test[diff]), axis=0)\n\nprint(score_train)\nprint(socre_test)",
"no more false positives: done\n[1.0, 1.0]\n[0.6461538461538462, 1.0]\n"
],
[
"### using opencv built-in SVM to detect pedestrians\nimport cv2\n\nimg_path = '../data/chapter6/pedestrian_test.jpg'\nimg = cv2.imread(img_path)\n\npdetect = cv2.HOGDescriptor_getDefaultPeopleDetector()\nhogdef = cv2.HOGDescriptor()\nhogdef.setSVMDetector(pdetect)\nfound, _ = hogdef.detectMultiScale(img)",
"_____no_output_____"
],
[
"from matplotlib import pyplot as plt\nfrom matplotlib import patches\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n# ax.axis('off')\n# show the bounding box\nfor f in found:\n ax.add_patch(patches.Rectangle((f[0], f[1]), f[2], f[3], color='y', linewidth=3, fill=False))",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec6441395ac16ddf8fe1b3423450ae4b16dac55a | 1,524 | ipynb | Jupyter Notebook | Determinant_of_Matrix.ipynb | kenaceeee/Linear-Algebra | 10ee2e71a75c238224db31d015071c9008cc01c9 | [
"Apache-2.0"
]
| null | null | null | Determinant_of_Matrix.ipynb | kenaceeee/Linear-Algebra | 10ee2e71a75c238224db31d015071c9008cc01c9 | [
"Apache-2.0"
]
| null | null | null | Determinant_of_Matrix.ipynb | kenaceeee/Linear-Algebra | 10ee2e71a75c238224db31d015071c9008cc01c9 | [
"Apache-2.0"
]
| null | null | null | 24.983607 | 248 | 0.459318 | [
[
[
"<a href=\"https://colab.research.google.com/github/kenaceeee/Linear-Algebra-58019/blob/main/Determinant_of_Matrix.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"import numpy as np\nA = np.array([[1,2,-1],[4,6,-2],[-1,3,3]])\nprint(A)\nprint(round(np.linalg.det(A)))",
"[[ 1 2 -1]\n [ 4 6 -2]\n [-1 3 3]]\n-14\n"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
]
]
|
ec6449b8908a3f69ae055f6d9afad885f1f4e954 | 9,590 | ipynb | Jupyter Notebook | ccmi_2019_03_06/.ipynb_checkpoints/2019-03-06_05-03 UCSF ssGSEA-checkpoint.ipynb | g2nb/workshop-notebooks | 1e22f9569438dd509f3148959ca5b87d78ea5157 | [
"BSD-3-Clause"
]
| null | null | null | ccmi_2019_03_06/.ipynb_checkpoints/2019-03-06_05-03 UCSF ssGSEA-checkpoint.ipynb | g2nb/workshop-notebooks | 1e22f9569438dd509f3148959ca5b87d78ea5157 | [
"BSD-3-Clause"
]
| null | null | null | ccmi_2019_03_06/.ipynb_checkpoints/2019-03-06_05-03 UCSF ssGSEA-checkpoint.ipynb | g2nb/workshop-notebooks | 1e22f9569438dd509f3148959ca5b87d78ea5157 | [
"BSD-3-Clause"
]
| 1 | 2022-01-12T20:17:50.000Z | 2022-01-12T20:17:50.000Z | 35.783582 | 587 | 0.636079 | [
[
[
"# Single-sample GSEA projection (ssGSEA)",
"_____no_output_____"
],
[
"## Background",
"_____no_output_____"
],
[
"Traditional gene set enrichment analysis assesses the differential coordinate up- or down-regulation of a biological process or pathway between groups of samples belonging to two phenotypes. The ability to assess that enrichment in individual samples, especially independently of pre-assigned phenotype labels, provides the opportunity to analyze transcription data at a higher level, by using gene sets/pathways instead of genes, resulting in a much more biologically interpretable set of features. Single-sample Gene Set Enrichment Analysis (ssGSEA) Projection accomplishes this.",
"_____no_output_____"
],
[
"**ssGSEA projects a single sample’s gene expression profile from the space of single genes onto the space of gene sets**. It does this via the ssGSEA enrichment score, which represents the degree to which the genes in a particular gene set are coordinately up- or down- regulated within a sample. \n\nAny supervised or unsupervised machine learning technique or other statistical analysis can then be applied to the resulting projected dataset. The benefit is that the **ssGSEA projection transforms the data to a higher-level (pathways instead of genes) space representing a more biologically interpretable set of features on which analytic methods can be applied.**\n\nAnother benefit of ssGSEA projection is **dimensionality reduction**. Typically the number of gene sets employed in the enrichment analysis is substantially smaller than the number of genes targeted by a gene expression assay, and they are more robust and less noisy, resulting in significant benefits for downstream analysis.",
"_____no_output_____"
],
[
"## Before you begin",
"_____no_output_____"
],
[
"You must log in to a GenePattern server. In this notebook we will use **```GenePattern Cloud``` **\n\n<div class=\"alert alert-info\">\n<ul><li>Sign in to GenePattern by entering your username and password into the form below. </li></ul>\n</div>",
"_____no_output_____"
]
],
[
[
"# Requires GenePattern Notebook: pip install genepattern-notebook\nimport gp\nimport genepattern\n\n# Username and password removed for security reasons.\ngenepattern.display(genepattern.session.register(\"https://cloud.genepattern.org/gp\", \"\", \"\"))",
"_____no_output_____"
]
],
[
[
"## Project gene expression dataset into the space of oncogenic gene sets",
"_____no_output_____"
],
[
"We will use the GenePattern ssGSEAProjection analysis to transform the set of TCGA breast cancer samples into a dataset where each row corresponds to a pathway from the [MSigDB oncogenic gene sets collection](http://software.broadinstitute.org/gsea/msigdb/genesets.jsp?collection=C6), and each column is a sample. Each value in the new dataset will therefore represent the up- or downregulation of a pathway (row) within a sample (column).",
"_____no_output_____"
],
[
"<div class=\"alert alert-info\">\n<h3>Instructions</h3>\n\n<ol>\n<li>Insert a *GenePattern Analysis Cell* to run the ssGSEAProjection module.\n<ol>\n<li>Make sure this cell is selected by clicking once on it.</li>\n<li>In the menu above, select `Insert`, then `Insert Cell Below`.</li>\n<li>Turn that cell int a *GenePattern Analysis cell* (click on `Cell`, then select `Cell Type`, and select `GenePattern`).</li>\n<li>In the search menu that pops up type `ssgsea` and select `ssGSEAProjection`.</li><br>\n</ol>\n</li>\n<li>For the <strong><em>input gct file</em></strong> parameter, click and drag <a href=\"https://datasets.genepattern.org/data/ccmi_tutorial/2017-12-15/BRCA_HUGO_symbols.preprocessed.gct\" target=\"_blank\">BRCA_HUGO_symbols.preprocessed.gct</a> into the <em>"Enter Path or URL" </em>text box</li> \n<li>For the <strong><em>gene sets database files</em></strong> parameter, select <em>c6.all.v6.2.symbols.gmt [Oncogenic Signatures]</em>.</li>\n<li>Click <strong><em>Run</em></strong> on the analysis below.</li>\n</ol>\n\n</div>",
"_____no_output_____"
],
[
"## Visualize projected pathways as a heat map",
"_____no_output_____"
],
[
"We will use the GenePattern heat map viewer to visualize the resulting projection of genes into pathways.",
"_____no_output_____"
],
[
"<div class=\"alert alert-info\">\n<h3>Instructions</h3>\n1. Insert a *GenePattern Analysis Cell* to run the HeatMapViewer module.\n<ol>\n<li>Make sure this cell is selected by clicking once on it.</li>\n<li>In the menu above, select `Insert`, then `Insert Cell Below`.</li>\n<li>Turn that cell int a *GenePattern Analysis cell* (click on `Cell`, then select `Cell Type`, and select `GenePattern`).</li>\n<li>In the search menu that pops up type `ssgsea` and select `HeatMapViewer`.</li><br>\n</ol>\n</li>\n1. In the **dataset** parameter below, click on the dropdown and select `BRCA_HUGO_symbols.preprocessed.PROJ.gct`\n1. Click **Run**.",
"_____no_output_____"
]
],
[
[
"heatmapviewer_task = gp.GPTask(genepattern.session.get(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.visualizer:00010')\nheatmapviewer_job_spec = heatmapviewer_task.make_job_spec()\nheatmapviewer_job_spec.set_parameter(\"dataset\", \"\")\ngenepattern.display(heatmapviewer_task)",
"_____no_output_____"
]
],
[
[
"## Project data onto hallmark pathways",
"_____no_output_____"
],
[
"[MSigDB Hallmark gene sets](http://software.broadinstitute.org/gsea/msigdb/genesets.jsp?collection=H) summarize and represent specific well-defined biological states or processes and display coherent expression. In this exercise you will project the expression dataset onto the hallmark gene set collection.",
"_____no_output_____"
],
[
"<div class=\"alert alert-info\">\n<h3>Instructions</h3>\n\n1. Create a new ssGSEA cell\n2. Populate it with the result dataset\n3. Select the **h.all.v6.2.symbols.gmt [Hallmarks]** gene sets database file\n4. Run the cell\n5. Create a new HeatMapVisualizer cell and visualize the analysis results in it\n\n**Hint**: if you need to re-run an analysis with some parameters changed, you can click on the gear icon in the job result panel (the panel with the title **Job ######** and select **Duplicate analysis**.\n",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
]
|
ec64965efe0431bfde903f67f6b5d29c42f263a4 | 133,070 | ipynb | Jupyter Notebook | light_weight_roberta_base.ipynb | nishipy/clrp | 2113dc6a1b8e0d42f670388ad25484dd8d3a7ad0 | [
"MIT"
]
| null | null | null | light_weight_roberta_base.ipynb | nishipy/clrp | 2113dc6a1b8e0d42f670388ad25484dd8d3a7ad0 | [
"MIT"
]
| null | null | null | light_weight_roberta_base.ipynb | nishipy/clrp | 2113dc6a1b8e0d42f670388ad25484dd8d3a7ad0 | [
"MIT"
]
| null | null | null | 44.88027 | 342 | 0.51125 | [
[
[
"<a href=\"https://colab.research.google.com/github/nishipy/clrp/blob/main/light_weight_roberta_base.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Overview\nThis is kernel is almost the same as [Lightweight Roberta solution in PyTorch](https://www.kaggle.com/andretugan/lightweight-roberta-solution-in-pytorch), but instead of \"roberta-base\", it starts from [Maunish's pre-trained model](https://www.kaggle.com/maunish/clrp-roberta-base).\n\nAcknowledgments: some ideas were taken from kernels by [Torch](https://www.kaggle.com/rhtsingh) and [Maunish](https://www.kaggle.com/maunish).\n\nIn addition, we use the [stratified_kfold train dataset](https://www.kaggle.com/takeshikobayashi/commonlit-train-datasetfor) training the model.",
"_____no_output_____"
],
[
"## Original notebook\n- Lightweight Roberta solution\n - https://www.kaggle.com/andretugan/pre-trained-roberta-solution-in-pytorch\n- pretraied with MLM\n - https://www.kaggle.com/maunish/clrp-pytorch-roberta-pretrain",
"_____no_output_____"
],
[
"# Prepare",
"_____no_output_____"
],
[
"## Checking GPU status",
"_____no_output_____"
]
],
[
[
"gpu_info = !nvidia-smi\ngpu_info = '\\n'.join(gpu_info)\nif gpu_info.find('failed') >= 0:\n print('Select the Runtime > \"Change runtime type\" menu to enable a GPU accelerator, ')\n print('and then re-execute this cell.')\nelse:\n print(gpu_info)",
"Sat Jul 3 14:07:57 2021 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 465.27 Driver Version: 460.32.03 CUDA Version: 11.2 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 Tesla P100-PCIE... Off | 00000000:00:04.0 Off | 0 |\n| N/A 35C P0 27W / 250W | 0MiB / 16280MiB | 0% Default |\n| | | N/A |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n"
]
],
[
[
"## Download dataset from kaggle",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"Mounted at /content/drive\n"
]
],
[
[
"### kaggle.json",
"_____no_output_____"
]
],
[
[
"!mkdir -p /root/.kaggle/\n!cp ./drive/MyDrive/kaggle/commonlit/kaggle.json ~/.kaggle/kaggle.json\n!chmod 600 ~/.kaggle/kaggle.json",
"_____no_output_____"
]
],
[
[
"### Competition dataset",
"_____no_output_____"
]
],
[
[
"!mkdir -p ../input/commonlitreadabilityprize/\n!kaggle competitions download -c commonlitreadabilityprize -p ../input/commonlitreadabilityprize/\n!cp -f ./drive/MyDrive/kaggle/commonlit/train_stratiKfold.csv.zip ../input/commonlitreadabilityprize/",
"Warning: Looks like you're using an outdated API Version, please consider updating (server 1.5.12 / client 1.5.4)\nDownloading train.csv.zip to ../input/commonlitreadabilityprize\n 0% 0.00/1.13M [00:00<?, ?B/s]\n100% 1.13M/1.13M [00:00<00:00, 79.0MB/s]\nDownloading sample_submission.csv to ../input/commonlitreadabilityprize\n 0% 0.00/108 [00:00<?, ?B/s]\n100% 108/108 [00:00<00:00, 108kB/s]\nDownloading test.csv to ../input/commonlitreadabilityprize\n 0% 0.00/6.79k [00:00<?, ?B/s]\n100% 6.79k/6.79k [00:00<00:00, 10.7MB/s]\n"
],
[
"!unzip -o ../input/commonlitreadabilityprize/train.csv.zip -d ../input/commonlitreadabilityprize/\n!unzip -o ../input/commonlitreadabilityprize/train_stratiKfold.csv.zip -d ../input/commonlitreadabilityprize/",
"Archive: ../input/commonlitreadabilityprize/train.csv.zip\n inflating: ../input/commonlitreadabilityprize/train.csv \nArchive: ../input/commonlitreadabilityprize/train_stratiKfold.csv.zip\n inflating: ../input/commonlitreadabilityprize/train_stratiKfold.csv \n"
],
[
"!ls ../input/commonlitreadabilityprize/",
"sample_submission.csv train.csv train_stratiKfold.csv\ntest.csv\t train.csv.zip train_stratiKfold.csv.zip\n"
]
],
[
[
"### Model pre-trained with MLM \n- Notebook\n - https://www.kaggle.com/maunish/clrp-pytorch-roberta-pretrain\n- Model data\n - https://www.kaggle.com/maunish/clrp-roberta-base",
"_____no_output_____"
]
],
[
[
"!mkdir -p ../input/commonlitreadabilityprize/pretrained-model/\n!kaggle datasets download maunish/clrp-roberta-base -p ../input/commonlitreadabilityprize/pretrained-model/",
"Downloading clrp-roberta-base.zip to ../input/commonlitreadabilityprize/pretrained-model\n100% 3.00G/3.01G [00:43<00:00, 76.7MB/s]\n100% 3.01G/3.01G [00:43<00:00, 74.5MB/s]\n"
],
[
"!unzip -o ../input/commonlitreadabilityprize/pretrained-model/clrp-roberta-base.zip -d ../input/commonlitreadabilityprize/pretrained-model/",
"Archive: ../input/commonlitreadabilityprize/pretrained-model/clrp-roberta-base.zip\n inflating: ../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base/config.json \n inflating: ../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base/merges.txt \n inflating: ../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base/pytorch_model.bin \n inflating: ../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base/special_tokens_map.json \n inflating: ../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base/tokenizer_config.json \n inflating: ../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base/training_args.bin \n inflating: ../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base/vocab.json \n inflating: ../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base_chk/checkpoint-600/config.json \n inflating: ../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base_chk/checkpoint-600/optimizer.pt \n inflating: ../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base_chk/checkpoint-600/pytorch_model.bin \n inflating: ../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base_chk/checkpoint-600/scheduler.pt \n inflating: ../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base_chk/checkpoint-600/trainer_state.json \n inflating: ../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base_chk/checkpoint-600/training_args.bin \n inflating: ../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base_chk/checkpoint-800/config.json \n inflating: ../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base_chk/checkpoint-800/optimizer.pt \n inflating: ../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base_chk/checkpoint-800/pytorch_model.bin \n inflating: ../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base_chk/checkpoint-800/scheduler.pt \n inflating: ../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base_chk/checkpoint-800/trainer_state.json \n inflating: ../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base_chk/checkpoint-800/training_args.bin \n inflating: ../input/commonlitreadabilityprize/pretrained-model/text.txt \n"
]
],
[
[
"# Install dependencies",
"_____no_output_____"
]
],
[
[
"!pip install transformers accelerate datasets",
"Collecting transformers\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/fd/1a/41c644c963249fd7f3836d926afa1e3f1cc234a1c40d80c5f03ad8f6f1b2/transformers-4.8.2-py3-none-any.whl (2.5MB)\n\u001b[K |████████████████████████████████| 2.5MB 12.8MB/s \n\u001b[?25hCollecting accelerate\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/f7/fa/d173d923c953d930702066894abf128a7e5258c6f64cf088d2c5a83f46a3/accelerate-0.3.0-py3-none-any.whl (49kB)\n\u001b[K |████████████████████████████████| 51kB 9.2MB/s \n\u001b[?25hCollecting datasets\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/08/a2/d4e1024c891506e1cee8f9d719d20831bac31cb5b7416983c4d2f65a6287/datasets-1.8.0-py3-none-any.whl (237kB)\n\u001b[K |████████████████████████████████| 245kB 53.8MB/s \n\u001b[?25hRequirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from transformers) (2.23.0)\nRequirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.7/dist-packages (from transformers) (1.19.5)\nRequirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from transformers) (20.9)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.7/dist-packages (from transformers) (2019.12.20)\nCollecting sacremoses\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/75/ee/67241dc87f266093c533a2d4d3d69438e57d7a90abb216fa076e7d475d4a/sacremoses-0.0.45-py3-none-any.whl (895kB)\n\u001b[K |████████████████████████████████| 901kB 46.3MB/s \n\u001b[?25hCollecting tokenizers<0.11,>=0.10.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/d4/e2/df3543e8ffdab68f5acc73f613de9c2b155ac47f162e725dcac87c521c11/tokenizers-0.10.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (3.3MB)\n\u001b[K |████████████████████████████████| 3.3MB 20.3MB/s \n\u001b[?25hRequirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.7/dist-packages (from transformers) (4.41.1)\nCollecting huggingface-hub==0.0.12\n Downloading https://files.pythonhosted.org/packages/2f/ee/97e253668fda9b17e968b3f97b2f8e53aa0127e8807d24a547687423fe0b/huggingface_hub-0.0.12-py3-none-any.whl\nRequirement already satisfied: pyyaml in /usr/local/lib/python3.7/dist-packages (from transformers) (3.13)\nRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from transformers) (3.0.12)\nRequirement already satisfied: importlib-metadata; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from transformers) (4.5.0)\nRequirement already satisfied: torch>=1.4.0 in /usr/local/lib/python3.7/dist-packages (from accelerate) (1.9.0+cu102)\nCollecting pyaml>=20.4.0\n Downloading https://files.pythonhosted.org/packages/15/c4/1310a054d33abc318426a956e7d6df0df76a6ddfa9c66f6310274fb75d42/pyaml-20.4.0-py2.py3-none-any.whl\nRequirement already satisfied: pyarrow<4.0.0,>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from datasets) (3.0.0)\nRequirement already satisfied: multiprocess in /usr/local/lib/python3.7/dist-packages (from datasets) (0.70.12.2)\nCollecting fsspec\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/0e/3a/666e63625a19883ae8e1674099e631f9737bd5478c4790e5ad49c5ac5261/fsspec-2021.6.1-py3-none-any.whl (115kB)\n\u001b[K |████████████████████████████████| 122kB 57.6MB/s \n\u001b[?25hRequirement already satisfied: dill in /usr/local/lib/python3.7/dist-packages (from datasets) (0.3.4)\nRequirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from datasets) (1.1.5)\nCollecting xxhash\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/7d/4f/0a862cad26aa2ed7a7cd87178cbbfa824fc1383e472d63596a0d018374e7/xxhash-2.0.2-cp37-cp37m-manylinux2010_x86_64.whl (243kB)\n\u001b[K |████████████████████████████████| 245kB 56.3MB/s \n\u001b[?25hRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (3.0.4)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (2.10)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (1.24.3)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (2021.5.30)\nRequirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging->transformers) (2.4.7)\nRequirement already satisfied: click in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (7.1.2)\nRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (1.15.0)\nRequirement already satisfied: joblib in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (1.0.1)\nRequirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from huggingface-hub==0.0.12->transformers) (3.7.4.3)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata; python_version < \"3.8\"->transformers) (3.4.1)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas->datasets) (2018.9)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas->datasets) (2.8.1)\nInstalling collected packages: sacremoses, tokenizers, huggingface-hub, transformers, pyaml, accelerate, fsspec, xxhash, datasets\nSuccessfully installed accelerate-0.3.0 datasets-1.8.0 fsspec-2021.6.1 huggingface-hub-0.0.12 pyaml-20.4.0 sacremoses-0.0.45 tokenizers-0.10.3 transformers-4.8.2 xxhash-2.0.2\n"
],
[
"import os\nimport math\nimport random\nimport time\n\nimport numpy as np\nimport pandas as pd\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\n\nfrom transformers import AdamW\nfrom transformers import AutoTokenizer\nfrom transformers import AutoModel\nfrom transformers import AutoConfig\nfrom transformers import get_cosine_schedule_with_warmup\n\nfrom sklearn.model_selection import KFold\n\nimport gc\ngc.enable()",
"_____no_output_____"
]
],
[
[
"# Set constant",
"_____no_output_____"
]
],
[
[
"NUM_FOLDS = 5\nNUM_EPOCHS = 3\nBATCH_SIZE = 16\nMAX_LEN = 248\nEVAL_SCHEDULE = [(0.50, 16), (0.49, 8), (0.48, 4), (0.47, 2), (-1., 1)]\nROBERTA_PATH = \"../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base/\"\nTOKENIZER_PATH = \"../input/commonlitreadabilityprize/pretrained-model/clrp_roberta_base/\"\n#ROBERTA_PATH = \"../input/clrp-roberta-base/clrp_roberta_base\"\n#TOKENIZER_PATH = \"../input/clrp-roberta-base/clrp_roberta_base\"\nDEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"",
"_____no_output_____"
]
],
[
[
"# Define utility functions",
"_____no_output_____"
]
],
[
[
"def set_random_seed(random_seed):\n random.seed(random_seed)\n np.random.seed(random_seed)\n os.environ[\"PYTHONHASHSEED\"] = str(random_seed)\n\n torch.manual_seed(random_seed)\n torch.cuda.manual_seed(random_seed)\n torch.cuda.manual_seed_all(random_seed)\n\n torch.backends.cudnn.deterministic = True",
"_____no_output_____"
]
],
[
[
"train_dfには、Stratified kfold済みのデータセットを利用する。",
"_____no_output_____"
]
],
[
[
"#Use stratified k-fold train dataset\n#train_df = pd.read_csv(\"/kaggle/input/commonlitreadabilityprize/train.csv\")\ntrain_df = pd.read_csv(\"../input/commonlitreadabilityprize/train_stratiKfold.csv\")\n\n# Remove incomplete entries if any.\ntrain_df.drop(train_df[(train_df.target == 0) & (train_df.standard_error == 0)].index,\n inplace=True)\ntrain_df.reset_index(drop=True, inplace=True)\n\ntest_df = pd.read_csv(\"../input/commonlitreadabilityprize/test.csv\")\nsubmission_df = pd.read_csv(\"../input/commonlitreadabilityprize/sample_submission.csv\")",
"_____no_output_____"
],
[
"#TokenizerはRoberta-baseと同じ\ntokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH)",
"_____no_output_____"
]
],
[
[
"# Dataset",
"_____no_output_____"
]
],
[
[
"class LitDataset(Dataset):\n def __init__(self, df, inference_only=False):\n super().__init__()\n\n self.df = df \n self.inference_only = inference_only\n self.text = df.excerpt.tolist()\n #改行を消してみる。元のNotebookではここはコメントアウトされている\n self.text = [text.replace(\"\\n\", \" \") for text in self.text]\n \n if not self.inference_only:\n self.target = torch.tensor(df.target.values, dtype=torch.float32) \n \n self.encoded = tokenizer.batch_encode_plus(\n self.text,\n padding = 'max_length', \n max_length = MAX_LEN,\n truncation = True,\n return_attention_mask=True\n ) \n \n\n def __len__(self):\n return len(self.df)\n\n \n def __getitem__(self, index): \n input_ids = torch.tensor(self.encoded['input_ids'][index])\n attention_mask = torch.tensor(self.encoded['attention_mask'][index])\n \n if self.inference_only:\n return (input_ids, attention_mask) \n else:\n target = self.target[index]\n return (input_ids, attention_mask, target)",
"_____no_output_____"
]
],
[
[
"# Model\nThe model is inspired by the one from [Maunish](https://www.kaggle.com/maunish/clrp-roberta-svm).",
"_____no_output_____"
]
],
[
[
"class LitModel(nn.Module):\n def __init__(self):\n super().__init__()\n\n config = AutoConfig.from_pretrained(ROBERTA_PATH)\n #config.jsonに書いてある設定値を更新する\n config.update({\"output_hidden_states\":True, \n \"hidden_dropout_prob\": 0.0,\n \"layer_norm_eps\": 1e-7}) \n \n self.roberta = AutoModel.from_pretrained(ROBERTA_PATH, config=config) \n \n self.attention = nn.Sequential( \n nn.Linear(768, 512), \n nn.Tanh(), \n nn.Linear(512, 1),\n nn.Softmax(dim=1)\n ) \n\n self.regressor = nn.Sequential( \n nn.Linear(768, 1) \n )\n \n\n def forward(self, input_ids, attention_mask):\n roberta_output = self.roberta(input_ids=input_ids,\n attention_mask=attention_mask) \n\n # There are a total of 13 layers of hidden states.\n # 1 for the embedding layer, and 12 for the 12 Roberta layers.\n # We take the hidden states from the last Roberta layer.\n last_layer_hidden_states = roberta_output.hidden_states[-1]\n\n # The number of cells is MAX_LEN.\n # The size of the hidden state of each cell is 768 (for roberta-base).\n # In order to condense hidden states of all cells to a context vector,\n # we compute a weighted average of the hidden states of all cells.\n # We compute the weight of each cell, using the attention neural network.\n weights = self.attention(last_layer_hidden_states)\n \n # weights.shape is BATCH_SIZE x MAX_LEN x 1\n # last_layer_hidden_states.shape is BATCH_SIZE x MAX_LEN x 768 \n # Now we compute context_vector as the weighted average.\n # context_vector.shape is BATCH_SIZE x 768\n context_vector = torch.sum(weights * last_layer_hidden_states, dim=1) \n \n # Now we reduce the context vector to the prediction score.\n return self.regressor(context_vector)",
"_____no_output_____"
],
[
"#MSEで評価\ndef eval_mse(model, data_loader):\n \"\"\"Evaluates the mean squared error of the |model| on |data_loader|\"\"\"\n model.eval() \n mse_sum = 0\n\n with torch.no_grad():\n for batch_num, (input_ids, attention_mask, target) in enumerate(data_loader):\n input_ids = input_ids.to(DEVICE)\n attention_mask = attention_mask.to(DEVICE) \n target = target.to(DEVICE) \n \n pred = model(input_ids, attention_mask) \n\n mse_sum += nn.MSELoss(reduction=\"sum\")(pred.flatten(), target).item()\n \n\n return mse_sum / len(data_loader.dataset)",
"_____no_output_____"
],
[
"def predict(model, data_loader):\n \"\"\"Returns an np.array with predictions of the |model| on |data_loader|\"\"\"\n model.eval()\n\n result = np.zeros(len(data_loader.dataset)) \n index = 0\n \n with torch.no_grad():\n for batch_num, (input_ids, attention_mask) in enumerate(data_loader):\n input_ids = input_ids.to(DEVICE)\n attention_mask = attention_mask.to(DEVICE)\n \n pred = model(input_ids, attention_mask) \n\n result[index : index + pred.shape[0]] = pred.flatten().to(\"cpu\")\n index += pred.shape[0]\n\n return result",
"_____no_output_____"
],
[
"def train(model, model_path, train_loader, val_loader,\n optimizer, scheduler=None, num_epochs=NUM_EPOCHS): \n best_val_rmse = None\n best_epoch = 0\n step = 0\n last_eval_step = 0\n eval_period = EVAL_SCHEDULE[0][1] \n\n start = time.time()\n\n for epoch in range(num_epochs): \n val_rmse = None \n\n for batch_num, (input_ids, attention_mask, target) in enumerate(train_loader):\n input_ids = input_ids.to(DEVICE)\n attention_mask = attention_mask.to(DEVICE) \n target = target.to(DEVICE) \n\n optimizer.zero_grad()\n \n model.train()\n\n pred = model(input_ids, attention_mask)\n \n mse = nn.MSELoss(reduction=\"mean\")(pred.flatten(), target)\n \n mse.backward()\n\n optimizer.step()\n if scheduler:\n scheduler.step()\n \n if step >= last_eval_step + eval_period:\n # Evaluate the model on val_loader.\n elapsed_seconds = time.time() - start\n num_steps = step - last_eval_step\n print(f\"\\n{num_steps} steps took {elapsed_seconds:0.3} seconds\")\n last_eval_step = step\n \n val_rmse = math.sqrt(eval_mse(model, val_loader)) \n\n print(f\"Epoch: {epoch} batch_num: {batch_num}\", \n f\"val_rmse: {val_rmse:0.4}\")\n\n for rmse, period in EVAL_SCHEDULE:\n if val_rmse >= rmse:\n eval_period = period\n break \n \n if not best_val_rmse or val_rmse < best_val_rmse: \n best_val_rmse = val_rmse\n best_epoch = epoch\n torch.save(model.state_dict(), model_path)\n print(f\"New best_val_rmse: {best_val_rmse:0.4}\")\n else: \n print(f\"Still best_val_rmse: {best_val_rmse:0.4}\",\n f\"(from epoch {best_epoch})\") \n \n start = time.time()\n \n step += 1\n \n \n return best_val_rmse",
"_____no_output_____"
],
[
"def create_optimizer(model):\n named_parameters = list(model.named_parameters()) \n \n roberta_parameters = named_parameters[:197] \n attention_parameters = named_parameters[199:203]\n regressor_parameters = named_parameters[203:]\n \n attention_group = [params for (name, params) in attention_parameters]\n regressor_group = [params for (name, params) in regressor_parameters]\n\n parameters = []\n parameters.append({\"params\": attention_group})\n parameters.append({\"params\": regressor_group})\n\n for layer_num, (name, params) in enumerate(roberta_parameters):\n weight_decay = 0.0 if \"bias\" in name else 0.01\n\n lr = 2e-5\n\n if layer_num >= 69: \n lr = 5e-5\n\n if layer_num >= 133:\n lr = 1e-4\n\n parameters.append({\"params\": params,\n \"weight_decay\": weight_decay,\n \"lr\": lr})\n\n return AdamW(parameters)",
"_____no_output_____"
]
],
[
[
"## Run",
"_____no_output_____"
]
],
[
[
"gc.collect()\n\nSEED = 1000\nlist_val_rmse = []\n\nfor fold in range(NUM_FOLDS): \n print(f\"\\nFold {fold + 1}/{NUM_FOLDS}\")\n model_path = f\"model_{fold + 1}.pth\"\n \n set_random_seed(SEED + fold)\n\n #Stratified kfold train dataset用に修正\n train_dataset = LitDataset(train_df[train_df['kfold']!=fold]) \n val_dataset = LitDataset(train_df[train_df['kfold']==fold]) \n \n train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE,\n drop_last=True, shuffle=True, num_workers=2) \n val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE,\n drop_last=False, shuffle=False, num_workers=2) \n \n set_random_seed(SEED + fold) \n \n model = LitModel().to(DEVICE)\n \n optimizer = create_optimizer(model) \n scheduler = get_cosine_schedule_with_warmup(\n optimizer,\n num_training_steps=NUM_EPOCHS * len(train_loader),\n num_warmup_steps=50) \n \n list_val_rmse.append(train(model, model_path, train_loader,\n val_loader, optimizer, scheduler=scheduler))\n\n del model\n gc.collect()\n \n print(\"\\nPerformance estimates:\")\n print(list_val_rmse)\n print(\"Mean:\", np.array(list_val_rmse).mean())\n\n\n#kfold = KFold(n_splits=NUM_FOLDS, random_state=SEED, shuffle=True)\n#\n# for fold, (train_indices, val_indices) in enumerate(kfold.split(train_df)): \n# print(f\"\\nFold {fold + 1}/{NUM_FOLDS}\")\n# model_path = f\"model_{fold + 1}.pth\"\n \n# set_random_seed(SEED + fold)\n \n# train_dataset = LitDataset(train_df.loc[train_indices]) \n# val_dataset = LitDataset(train_df.loc[val_indices]) \n \n# train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE,\n# drop_last=True, shuffle=True, num_workers=2) \n# val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE,\n# drop_last=False, shuffle=False, num_workers=2) \n \n# set_random_seed(SEED + fold) \n \n# model = LitModel().to(DEVICE)\n \n# optimizer = create_optimizer(model) \n# scheduler = get_cosine_schedule_with_warmup(\n# optimizer,\n# num_training_steps=NUM_EPOCHS * len(train_loader),\n# num_warmup_steps=50) \n \n# list_val_rmse.append(train(model, model_path, train_loader,\n# val_loader, optimizer, scheduler=scheduler))\n\n# del model\n# gc.collect()\n \n# print(\"\\nPerformance estimates:\")\n# print(list_val_rmse)\n# print(\"Mean:\", np.array(list_val_rmse).mean())\n ",
"\nFold 1/5\n"
]
],
[
[
"# Inference",
"_____no_output_____"
]
],
[
[
"test_dataset = LitDataset(test_df, inference_only=True)",
"_____no_output_____"
],
[
"all_predictions = np.zeros((len(list_val_rmse), len(test_df)))\n\ntest_dataset = LitDataset(test_df, inference_only=True)\ntest_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE,\n drop_last=False, shuffle=False, num_workers=2)\n\nfor index in range(len(list_val_rmse)): \n model_path = f\"model_{index + 1}.pth\"\n print(f\"\\nUsing {model_path}\")\n \n model = LitModel()\n model.load_state_dict(torch.load(model_path)) \n model.to(DEVICE)\n \n all_predictions[index] = predict(model, test_loader)\n \n del model\n gc.collect()",
"\nUsing model_1.pth\n"
],
[
"predictions = all_predictions.mean(axis=0)\nsubmission_df.target = predictions\nprint(submission_df)\nsubmission_df.to_csv(\"submission.csv\", index=False)",
" id target\n0 c0f722661 -0.429293\n1 f0953f0a5 -0.629245\n2 0df072751 -0.310021\n3 04caf4e0c -2.507907\n4 0e63f8bea -1.677336\n5 12537fe78 -1.408924\n6 965e592c0 0.144062\n"
]
],
[
[
"# Upload data",
"_____no_output_____"
]
],
[
[
"!date +\"%Y%m%d%I%M%S\"",
"20210703041510\n"
],
[
"!mkdir -p ./output/\n!cp -f ./model* ./output/\n!cp -f ./drive/MyDrive/kaggle/commonlit/Lightweight-Roberta-base/dataset-metadata.json ./output/\n!sed -i -e \"s/lightweight-roberta-base/lightweight-roberta-base-`date +\"%Y%m%d%I%M%S\"`/\" ./output/dataset-metadata.json",
"_____no_output_____"
],
[
"!cat ./output/dataset-metadata.json",
"{\n \"licenses\": [\n {\n \"name\": \"CC0-1.0\"\n }\n ], \n \"id\": \"iamnishipy/lightweight-roberta-base-20210703041757\", \n \"title\": \"Lightweight-Roberta-base\"\n}"
],
[
"!mkdir -p ./output/\n!cp -f ./model* ./output/\n!cp -f ./drive/MyDrive/kaggle/commonlit/Lightweight-Roberta-base/dataset-metadata.json ./output/\n!sed -i -e \"s/lightweight-roberta-base/lightweight-roberta-base-`date +\"%Y%m%d%I%M%S\"`/\" ./output/dataset-metadata.json\n!kaggle datasets create -p ./output/",
"Starting upload for file model_3.pth\nWarning: Looks like you're using an outdated API Version, please consider updating (server 1.5.12 / client 1.5.4)\n100% 477M/477M [00:19<00:00, 25.6MB/s]\nUpload successful: model_3.pth (477MB)\nStarting upload for file model_5.pth\n100% 477M/477M [00:18<00:00, 26.4MB/s]\nUpload successful: model_5.pth (477MB)\nStarting upload for file model_4.pth\n100% 477M/477M [00:18<00:00, 26.8MB/s]\nUpload successful: model_4.pth (477MB)\nStarting upload for file model_2.pth\n100% 477M/477M [00:19<00:00, 25.9MB/s]\nUpload successful: model_2.pth (477MB)\nStarting upload for file model_1.pth\n100% 477M/477M [00:18<00:00, 27.1MB/s]\nUpload successful: model_1.pth (477MB)\nYour private Dataset is being created. Please check progress at /api/v1/datasets/status//iamnishipy/lightweight-roberta-base-20210703041831\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
]
|
ec6499f90afd6586e56a1e3c5e1975eda3555ac3 | 254,231 | ipynb | Jupyter Notebook | Jupyter/OLD/Real Graph Data - Cora.ipynb | cshen6/GraphNN | de12823b7275f611e1bc4ce71897f809d4fd9daa | [
"Apache-2.0"
]
| null | null | null | Jupyter/OLD/Real Graph Data - Cora.ipynb | cshen6/GraphNN | de12823b7275f611e1bc4ce71897f809d4fd9daa | [
"Apache-2.0"
]
| null | null | null | Jupyter/OLD/Real Graph Data - Cora.ipynb | cshen6/GraphNN | de12823b7275f611e1bc4ce71897f809d4fd9daa | [
"Apache-2.0"
]
| null | null | null | 72.492444 | 198 | 0.613399 | [
[
[
"import numpy as np\nimport pandas as pd\nimport time",
"_____no_output_____"
],
[
"Edges = pd.read_csv(\"https://raw.githubusercontent.com/cshen6/GraphNN/main/Data/Cora/cora%20edges.csv\",header=None)\nEdges",
"_____no_output_____"
],
[
"Edges = np.array(Edges)",
"_____no_output_____"
],
[
"Labels = pd.read_csv(\"https://raw.githubusercontent.com/cshen6/GraphNN/main/Data/Cora/cora%20node_labels.csv\",header=None)\nLabels.max()",
"_____no_output_____"
],
[
"y = np.array(Labels)",
"_____no_output_____"
],
[
"A = np.zeros((2709,2709))",
"_____no_output_____"
],
[
"for i in range (0,5429):\n A[Edges[i,0],Edges[i,1]]=1",
"_____no_output_____"
],
[
"sum(A[:,299])",
"_____no_output_____"
],
[
"A=np.delete(A,0,1)\nA=np.delete(A,0,0)\nA.shape",
"_____no_output_____"
]
],
[
[
"# Split Data",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import RepeatedKFold\nrkf = RepeatedKFold(n_splits=5, n_repeats=5, random_state=5)\nfor train_index, test_index in rkf.split(A):\n print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n X_train, X_test = A[train_index], A[test_index]\n y_train, y_test = y[train_index], y[test_index]",
"TRAIN: [ 0 1 3 ... 2705 2706 2707] TEST: [ 2 4 12 15 26 27 32 33 39 47 52 54 65 68\n 71 72 78 86 87 108 124 126 133 135 139 141 142 146\n 149 150 163 170 171 173 176 177 178 182 183 186 187 194\n 197 202 207 219 220 226 227 230 231 232 234 242 246 248\n 258 266 269 275 278 280 281 292 293 295 299 314 342 345\n 346 347 353 360 378 385 394 395 399 401 410 413 424 426\n 428 430 432 437 441 442 443 444 452 463 464 466 470 472\n 473 477 480 483 487 488 495 498 503 506 517 519 521 546\n 548 549 557 559 563 569 573 586 589 599 601 607 610 611\n 613 618 621 622 626 652 653 659 662 663 668 669 675 680\n 681 687 689 704 729 731 734 747 749 752 753 755 758 762\n 763 767 769 780 781 791 796 797 811 814 816 819 824 825\n 837 840 845 847 852 856 857 859 869 878 886 890 894 896\n 903 906 907 909 924 928 935 944 947 966 975 976 982 1002\n 1003 1015 1018 1023 1024 1033 1049 1057 1059 1062 1065 1070 1071 1073\n 1075 1081 1092 1096 1097 1103 1104 1106 1108 1110 1113 1116 1118 1123\n 1125 1128 1130 1141 1147 1149 1150 1154 1155 1163 1167 1170 1173 1175\n 1178 1180 1184 1186 1190 1191 1195 1196 1200 1203 1204 1212 1213 1215\n 1220 1235 1252 1269 1276 1289 1291 1293 1296 1297 1302 1307 1310 1312\n 1317 1320 1326 1330 1335 1340 1355 1356 1370 1376 1382 1387 1396 1406\n 1408 1423 1428 1437 1442 1445 1446 1451 1455 1462 1465 1474 1476 1478\n 1481 1483 1485 1487 1488 1489 1494 1507 1513 1523 1531 1538 1541 1545\n 1546 1548 1557 1559 1561 1563 1575 1580 1581 1585 1597 1598 1601 1605\n 1606 1608 1615 1619 1620 1631 1638 1641 1646 1654 1657 1663 1666 1667\n 1673 1675 1676 1683 1685 1701 1704 1718 1728 1730 1733 1738 1739 1746\n 1763 1767 1771 1773 1792 1796 1799 1804 1811 1816 1825 1833 1837 1842\n 1843 1844 1845 1847 1848 1857 1861 1879 1881 1884 1886 1892 1908 1910\n 1911 1915 1920 1923 1925 1936 1957 1959 1965 1972 1975 1986 1992 1996\n 2006 2008 2016 2019 2023 2024 2027 2028 2032 2036 2041 2043 2052 2055\n 2060 2061 2063 2065 2067 2085 2093 2108 2120 2132 2133 2134 2141 2145\n 2146 2151 2152 2157 2159 2166 2169 2170 2171 2173 2174 2182 2183 2186\n 2195 2200 2201 2207 2210 2213 2234 2238 2244 2246 2249 2251 2259 2265\n 2268 2269 2273 2276 2280 2282 2284 2287 2288 2291 2294 2296 2297 2308\n 2314 2320 2327 2333 2339 2342 2344 2345 2351 2360 2367 2384 2389 2395\n 2396 2404 2406 2412 2416 2423 2432 2436 2440 2447 2452 2454 2465 2471\n 2472 2474 2478 2481 2483 2489 2493 2499 2503 2508 2511 2512 2513 2523\n 2536 2537 2542 2549 2554 2557 2562 2564 2567 2569 2573 2576 2577 2579\n 2586 2594 2603 2608 2609 2618 2621 2624 2626 2633 2634 2645 2646 2654\n 2657 2658 2670 2673 2674 2675 2684 2686 2693 2700]\nTRAIN: [ 2 3 4 ... 2704 2706 2707] TEST: [ 0 1 6 7 10 11 20 29 34 35 43 45 50 51\n 62 70 73 75 88 90 96 98 100 104 112 115 116 117\n 118 120 121 132 136 137 156 164 165 166 169 172 175 181\n 184 191 199 201 208 210 217 222 236 237 247 259 261 279\n 282 283 285 286 288 297 300 303 304 315 316 326 328 334\n 336 338 343 348 354 356 362 363 368 369 370 390 392 412\n 416 417 419 421 425 427 431 433 440 447 448 449 467 469\n 475 478 479 482 492 501 509 511 513 523 527 532 533 539\n 540 543 544 552 556 564 568 575 576 578 587 600 602 603\n 606 615 617 619 620 629 633 649 651 660 661 664 666 667\n 676 678 683 684 695 697 698 701 703 707 708 710 711 716\n 720 739 748 772 783 787 789 795 806 807 812 823 830 831\n 833 838 851 855 863 866 871 874 887 888 889 908 915 919\n 929 932 934 936 938 945 949 952 953 954 955 958 962 965\n 969 973 979 980 985 989 991 994 995 997 999 1000 1001 1008\n 1012 1013 1034 1036 1039 1046 1064 1067 1072 1074 1076 1083 1087 1090\n 1095 1098 1109 1114 1115 1127 1132 1134 1138 1142 1152 1157 1165 1183\n 1188 1214 1217 1218 1219 1228 1232 1236 1239 1240 1254 1257 1260 1262\n 1266 1271 1272 1274 1277 1288 1314 1316 1323 1324 1329 1332 1342 1343\n 1346 1348 1349 1353 1354 1358 1363 1373 1381 1383 1391 1397 1402 1404\n 1405 1411 1412 1415 1419 1420 1427 1432 1433 1434 1435 1441 1448 1449\n 1450 1456 1463 1468 1469 1486 1492 1517 1518 1519 1521 1528 1532 1554\n 1562 1564 1566 1576 1584 1587 1591 1607 1610 1611 1613 1623 1628 1632\n 1639 1642 1643 1650 1652 1653 1656 1680 1691 1693 1700 1705 1709 1711\n 1712 1714 1717 1720 1721 1724 1727 1735 1742 1744 1747 1748 1750 1754\n 1756 1757 1759 1760 1762 1764 1768 1769 1781 1786 1790 1791 1795 1812\n 1813 1818 1819 1824 1827 1828 1829 1856 1858 1864 1865 1867 1877 1880\n 1885 1896 1899 1903 1932 1933 1938 1942 1946 1948 1955 1960 1961 1963\n 1976 1981 1994 1995 2001 2010 2017 2034 2035 2037 2040 2044 2051 2069\n 2074 2076 2082 2084 2090 2094 2095 2096 2098 2109 2111 2113 2128 2143\n 2147 2154 2158 2165 2175 2189 2192 2193 2197 2198 2199 2203 2211 2214\n 2218 2220 2222 2232 2236 2239 2241 2243 2257 2263 2270 2277 2278 2292\n 2293 2301 2305 2311 2312 2321 2322 2325 2326 2329 2332 2338 2353 2357\n 2358 2372 2375 2377 2378 2394 2399 2400 2408 2409 2410 2413 2418 2419\n 2422 2424 2427 2428 2433 2439 2444 2445 2451 2453 2458 2466 2476 2479\n 2480 2484 2485 2486 2488 2492 2494 2496 2500 2501 2506 2509 2515 2522\n 2526 2528 2533 2543 2546 2547 2552 2561 2565 2572 2580 2593 2601 2613\n 2614 2616 2622 2623 2629 2632 2635 2639 2647 2648 2649 2659 2662 2665\n 2678 2680 2681 2682 2683 2689 2690 2692 2699 2705]\nTRAIN: [ 0 1 2 ... 2705 2706 2707] TEST: [ 3 8 9 21 23 37 40 46 57 58 60 61 76 77\n 79 80 81 84 85 92 95 99 101 106 107 119 123 130\n 138 145 148 151 153 159 161 179 189 195 196 198 203 209\n 212 213 218 221 228 235 238 240 253 254 265 267 277 284\n 301 302 308 311 322 323 327 335 337 339 344 358 361 366\n 371 376 379 383 384 386 388 396 398 402 403 404 436 438\n 446 453 456 457 458 461 462 468 489 496 504 514 515 529\n 531 534 545 550 551 561 562 565 570 574 577 579 581 583\n 591 592 616 624 628 631 635 636 642 648 654 656 657 671\n 672 674 690 691 699 702 714 723 728 732 733 735 737 746\n 756 757 759 766 773 777 779 782 785 788 790 793 798 800\n 802 815 817 822 827 828 832 834 836 839 842 849 861 862\n 864 867 870 891 893 901 902 910 917 923 931 939 943 948\n 957 959 963 964 968 970 977 978 987 988 993 996 1005 1009\n 1011 1014 1016 1017 1020 1021 1022 1027 1029 1030 1031 1035 1042 1047\n 1051 1052 1056 1058 1060 1061 1069 1082 1089 1091 1102 1105 1107 1117\n 1121 1124 1131 1143 1153 1158 1166 1171 1172 1174 1181 1187 1189 1192\n 1197 1198 1201 1207 1208 1223 1224 1227 1229 1231 1244 1245 1246 1250\n 1251 1259 1263 1264 1268 1270 1275 1279 1282 1283 1284 1286 1294 1298\n 1300 1305 1315 1318 1321 1328 1333 1334 1336 1338 1341 1344 1345 1352\n 1365 1367 1368 1384 1390 1394 1395 1398 1400 1403 1410 1414 1417 1422\n 1430 1431 1436 1438 1443 1447 1457 1460 1471 1472 1475 1484 1491 1496\n 1499 1508 1509 1510 1511 1516 1525 1526 1533 1534 1537 1539 1542 1543\n 1549 1550 1556 1558 1568 1579 1583 1595 1599 1602 1603 1614 1616 1621\n 1624 1636 1637 1647 1655 1658 1659 1660 1661 1674 1677 1678 1682 1686\n 1688 1698 1702 1703 1706 1707 1716 1719 1737 1741 1745 1753 1765 1766\n 1774 1777 1801 1805 1814 1815 1820 1821 1822 1823 1826 1830 1831 1832\n 1836 1838 1840 1841 1852 1871 1873 1875 1876 1878 1890 1891 1893 1901\n 1902 1905 1906 1907 1909 1912 1914 1919 1931 1934 1937 1940 1944 1951\n 1954 1958 1964 1968 1969 1970 1979 1990 1993 1998 2002 2007 2009 2011\n 2014 2015 2018 2025 2031 2049 2050 2054 2059 2062 2073 2081 2088 2089\n 2092 2097 2099 2105 2106 2119 2131 2136 2139 2149 2155 2156 2163 2172\n 2177 2184 2188 2191 2204 2226 2242 2245 2250 2255 2256 2260 2266 2283\n 2285 2286 2289 2290 2298 2302 2307 2309 2315 2317 2318 2330 2335 2343\n 2346 2347 2350 2355 2364 2366 2369 2370 2374 2379 2382 2385 2392 2401\n 2402 2414 2417 2420 2421 2425 2426 2434 2438 2448 2449 2457 2463 2464\n 2470 2477 2490 2498 2502 2510 2516 2517 2518 2530 2532 2535 2540 2548\n 2551 2560 2590 2597 2599 2600 2604 2605 2607 2620 2628 2630 2631 2636\n 2640 2644 2676 2685 2687 2691 2694 2695 2697 2703]\nTRAIN: [ 0 1 2 ... 2703 2705 2706] TEST: [ 13 18 22 24 28 30 31 38 42 48 56 59 64 66\n 69 89 93 97 105 109 111 113 127 129 131 134 140 143\n 155 157 158 162 168 193 200 206 223 224 244 249 250 262\n 287 289 296 298 305 309 312 313 317 319 321 325 329 330\n 331 340 341 349 351 352 355 357 365 367 375 400 405 406\n 407 409 411 420 423 429 434 439 445 450 451 454 455 471\n 481 484 485 486 490 493 494 497 500 505 512 516 520 524\n 525 526 530 537 541 542 553 554 555 558 560 567 580 584\n 585 590 594 595 596 604 608 612 623 625 630 634 637 640\n 641 643 644 645 646 647 650 655 665 670 673 677 679 682\n 686 688 692 693 696 705 706 709 715 718 719 722 724 730\n 736 742 743 745 754 765 774 775 776 784 786 792 799 805\n 809 810 813 820 821 826 841 843 848 858 868 872 873 876\n 877 880 883 885 904 911 912 916 918 925 926 933 941 950\n 961 972 983 992 1004 1006 1007 1028 1038 1040 1041 1044 1048 1055\n 1063 1066 1077 1078 1079 1080 1084 1133 1144 1145 1146 1156 1161 1162\n 1164 1176 1177 1182 1193 1194 1202 1222 1225 1237 1243 1253 1256 1258\n 1265 1267 1273 1280 1285 1290 1292 1299 1303 1304 1306 1309 1319 1325\n 1327 1331 1347 1350 1351 1357 1360 1362 1364 1366 1369 1371 1377 1378\n 1379 1380 1385 1388 1389 1393 1401 1416 1418 1426 1439 1440 1444 1452\n 1458 1464 1466 1467 1473 1480 1482 1490 1493 1495 1501 1502 1503 1504\n 1505 1506 1512 1514 1522 1524 1529 1530 1535 1536 1544 1560 1569 1571\n 1573 1578 1588 1589 1592 1596 1612 1617 1618 1622 1625 1626 1627 1629\n 1630 1634 1640 1644 1648 1662 1665 1668 1670 1672 1681 1684 1687 1689\n 1692 1695 1699 1708 1713 1722 1723 1729 1732 1736 1740 1749 1752 1755\n 1758 1761 1772 1775 1778 1780 1783 1784 1787 1788 1794 1797 1798 1803\n 1806 1809 1817 1834 1835 1839 1846 1851 1859 1863 1866 1868 1882 1887\n 1894 1895 1897 1904 1913 1922 1924 1929 1935 1939 1941 1949 1950 1952\n 1962 1967 1971 1974 1977 1978 1980 1983 1985 1987 1988 1991 1997 2005\n 2012 2020 2021 2022 2026 2030 2033 2038 2039 2056 2066 2070 2071 2078\n 2080 2083 2100 2101 2102 2107 2114 2117 2118 2123 2130 2135 2137 2150\n 2153 2164 2168 2176 2179 2180 2181 2187 2196 2202 2206 2208 2215 2216\n 2224 2227 2228 2229 2235 2237 2247 2248 2253 2262 2264 2281 2300 2303\n 2304 2306 2319 2323 2331 2337 2340 2341 2356 2361 2371 2381 2383 2390\n 2391 2397 2403 2405 2407 2411 2430 2431 2442 2460 2468 2475 2482 2497\n 2504 2505 2519 2520 2524 2525 2529 2534 2538 2541 2544 2550 2553 2563\n 2566 2570 2571 2574 2578 2582 2583 2584 2588 2592 2596 2602 2611 2615\n 2617 2619 2625 2627 2638 2641 2650 2652 2660 2661 2663 2666 2668 2671\n 2677 2679 2688 2696 2698 2701 2702 2704 2707]\nTRAIN: [ 0 1 2 ... 2704 2705 2707] TEST: [ 5 14 16 17 19 25 36 41 44 49 53 55 63 67\n 74 82 83 91 94 102 103 110 114 122 125 128 144 147\n 152 154 160 167 174 180 185 188 190 192 204 205 211 214\n 215 216 225 229 233 239 241 243 245 251 252 255 256 257\n 260 263 264 268 270 271 272 273 274 276 290 291 294 306\n 307 310 318 320 324 332 333 350 359 364 372 373 374 377\n 380 381 382 387 389 391 393 397 408 414 415 418 422 435\n 459 460 465 474 476 491 499 502 507 508 510 518 522 528\n 535 536 538 547 566 571 572 582 588 593 597 598 605 609\n 614 627 632 638 639 658 685 694 700 712 713 717 721 725\n 726 727 738 740 741 744 750 751 760 761 764 768 770 771\n 778 794 801 803 804 808 818 829 835 844 846 850 853 854\n 860 865 875 879 881 882 884 892 895 897 898 899 900 905\n 913 914 920 921 922 927 930 937 940 942 946 951 956 960\n 967 971 974 981 984 986 990 998 1010 1019 1025 1026 1032 1037\n 1043 1045 1050 1053 1054 1068 1085 1086 1088 1093 1094 1099 1100 1101\n 1111 1112 1119 1120 1122 1126 1129 1135 1136 1137 1139 1140 1148 1151\n 1159 1160 1168 1169 1179 1185 1199 1205 1206 1209 1210 1211 1216 1221\n 1226 1230 1233 1234 1238 1241 1242 1247 1248 1249 1255 1261 1278 1281\n 1287 1295 1301 1308 1311 1313 1322 1337 1339 1359 1361 1372 1374 1375\n 1386 1392 1399 1407 1409 1413 1421 1424 1425 1429 1453 1454 1459 1461\n 1470 1477 1479 1497 1498 1500 1515 1520 1527 1540 1547 1551 1552 1553\n 1555 1565 1567 1570 1572 1574 1577 1582 1586 1590 1593 1594 1600 1604\n 1609 1633 1635 1645 1649 1651 1664 1669 1671 1679 1690 1694 1696 1697\n 1710 1715 1725 1726 1731 1734 1743 1751 1770 1776 1779 1782 1785 1789\n 1793 1800 1802 1807 1808 1810 1849 1850 1853 1854 1855 1860 1862 1869\n 1870 1872 1874 1883 1888 1889 1898 1900 1916 1917 1918 1921 1926 1927\n 1928 1930 1943 1945 1947 1953 1956 1966 1973 1982 1984 1989 1999 2000\n 2003 2004 2013 2029 2042 2045 2046 2047 2048 2053 2057 2058 2064 2068\n 2072 2075 2077 2079 2086 2087 2091 2103 2104 2110 2112 2115 2116 2121\n 2122 2124 2125 2126 2127 2129 2138 2140 2142 2144 2148 2160 2161 2162\n 2167 2178 2185 2190 2194 2205 2209 2212 2217 2219 2221 2223 2225 2230\n 2231 2233 2240 2252 2254 2258 2261 2267 2271 2272 2274 2275 2279 2295\n 2299 2310 2313 2316 2324 2328 2334 2336 2348 2349 2352 2354 2359 2362\n 2363 2365 2368 2373 2376 2380 2386 2387 2388 2393 2398 2415 2429 2435\n 2437 2441 2443 2446 2450 2455 2456 2459 2461 2462 2467 2469 2473 2487\n 2491 2495 2507 2514 2521 2527 2531 2539 2545 2555 2556 2558 2559 2568\n 2575 2581 2585 2587 2589 2591 2595 2598 2606 2610 2612 2637 2642 2643\n 2651 2653 2655 2656 2664 2667 2669 2672 2706]\nTRAIN: [ 0 2 3 ... 2703 2705 2707] TEST: [ 1 7 22 27 33 40 45 49 51 63 67 71 74 75\n 77 84 85 105 111 113 119 125 136 144 152 158 177 182\n 193 195 206 207 211 217 222 225 227 229 233 236 238 242\n 249 261 263 265 270 271 279 285 289 302 305 309 314 318\n 319 332 336 340 348 350 368 369 375 380 386 392 393 397\n 398 405 410 413 424 425 429 434 440 450 453 455 456 457\n 458 467 468 487 490 491 498 500 505 513 522 530 532 534\n 541 545 547 559 561 562 563 564 569 576 578 584 586 594\n 595 605 608 610 615 616 628 633 640 642 643 646 651 654\n 656 658 663 670 674 682 685 689 693 707 710 721 735 742\n 752 755 756 757 761 767 768 769 772 775 791 792 796 798\n 808 809 815 816 833 836 837 846 848 849 855 869 875 880\n 883 890 891 892 898 901 902 908 909 916 928 933 935 938\n 939 940 944 947 960 963 971 973 974 978 992 1010 1020 1025\n 1039 1045 1047 1051 1057 1061 1072 1076 1078 1079 1082 1092 1093 1098\n 1099 1104 1105 1109 1110 1131 1133 1140 1142 1145 1157 1163 1164 1166\n 1168 1170 1172 1191 1207 1209 1211 1213 1221 1224 1225 1227 1230 1233\n 1242 1253 1260 1262 1266 1268 1272 1275 1281 1290 1292 1293 1296 1297\n 1298 1308 1310 1315 1319 1322 1340 1343 1347 1349 1353 1366 1367 1368\n 1375 1391 1392 1399 1407 1412 1416 1428 1430 1434 1444 1452 1456 1457\n 1458 1459 1467 1472 1473 1476 1482 1492 1496 1497 1503 1514 1529 1532\n 1536 1546 1547 1554 1555 1558 1566 1572 1575 1594 1601 1613 1614 1616\n 1619 1624 1625 1627 1633 1638 1642 1643 1648 1656 1659 1660 1673 1678\n 1693 1694 1698 1701 1702 1707 1708 1712 1713 1717 1723 1734 1737 1738\n 1740 1760 1763 1773 1777 1781 1791 1795 1796 1798 1800 1803 1807 1810\n 1814 1816 1827 1851 1863 1867 1872 1878 1880 1885 1886 1895 1896 1904\n 1905 1913 1922 1924 1925 1931 1935 1938 1939 1942 1945 1948 1950 1952\n 1955 1959 1960 1961 1962 1964 1966 1970 1971 1974 1976 1978 1981 1991\n 1994 1999 2000 2002 2003 2013 2014 2020 2021 2022 2023 2027 2029 2030\n 2031 2035 2050 2059 2064 2081 2085 2116 2130 2136 2144 2146 2153 2158\n 2178 2188 2191 2197 2198 2210 2211 2215 2217 2218 2219 2225 2227 2244\n 2247 2250 2251 2252 2254 2256 2260 2262 2281 2282 2289 2300 2304 2305\n 2310 2311 2317 2324 2326 2327 2341 2347 2355 2356 2367 2368 2375 2380\n 2387 2392 2395 2404 2408 2410 2414 2418 2422 2423 2429 2431 2433 2434\n 2444 2447 2449 2450 2453 2454 2466 2468 2479 2481 2482 2489 2498 2511\n 2513 2515 2518 2519 2525 2527 2529 2530 2534 2536 2539 2544 2563 2566\n 2575 2582 2583 2586 2587 2590 2596 2600 2603 2608 2611 2615 2616 2620\n 2621 2622 2626 2627 2628 2632 2635 2637 2643 2644 2647 2649 2650 2651\n 2656 2671 2672 2676 2679 2683 2692 2702 2704 2706]\nTRAIN: [ 1 2 3 ... 2705 2706 2707] TEST: [ 0 9 10 19 28 31 35 38 41 47 54 64 73 76\n 79 82 87 89 91 93 96 101 110 117 118 129 141 148\n 150 151 154 167 168 169 172 178 181 187 191 194 196 197\n 198 203 209 218 228 239 240 248 252 259 262 264 267 274\n 275 276 278 282 286 306 308 313 315 317 327 330 339 342\n 349 353 354 356 361 362 364 378 379 383 389 406 415 416\n 417 419 421 431 436 441 444 470 475 481 488 489 496 497\n 510 518 519 523 526 528 529 537 538 550 551 552 557 560\n 573 585 589 593 599 601 611 617 618 619 622 625 627 631\n 634 637 639 647 650 655 660 665 667 669 671 678 679 686\n 690 691 695 696 697 701 705 716 718 719 720 725 726 728\n 729 739 743 746 753 754 774 780 782 784 785 786 789 799\n 805 811 814 835 841 843 862 863 868 870 879 884 887 930\n 934 945 956 957 958 961 967 972 976 979 980 983 984 986\n 989 995 1001 1008 1009 1014 1019 1022 1026 1034 1037 1040 1041 1043\n 1054 1060 1062 1066 1068 1069 1080 1081 1084 1089 1091 1096 1108 1112\n 1114 1122 1127 1134 1138 1153 1158 1183 1184 1186 1190 1195 1201 1203\n 1210 1228 1229 1239 1240 1244 1245 1246 1248 1249 1265 1267 1271 1276\n 1277 1283 1291 1294 1301 1302 1307 1309 1323 1326 1330 1336 1345 1346\n 1350 1351 1354 1357 1363 1369 1377 1380 1383 1386 1387 1398 1400 1405\n 1406 1409 1423 1432 1436 1439 1442 1445 1451 1465 1469 1474 1483 1484\n 1485 1490 1493 1501 1509 1512 1513 1524 1527 1530 1535 1538 1540 1543\n 1549 1553 1556 1564 1565 1574 1577 1588 1590 1591 1597 1598 1602 1620\n 1621 1623 1628 1640 1641 1645 1653 1658 1662 1663 1668 1674 1679 1685\n 1689 1691 1692 1699 1705 1709 1710 1711 1716 1719 1724 1725 1731 1735\n 1736 1741 1756 1774 1780 1785 1786 1787 1789 1799 1801 1804 1808 1820\n 1821 1828 1830 1832 1838 1840 1844 1857 1860 1861 1865 1871 1876 1881\n 1890 1894 1902 1903 1915 1920 1926 1927 1932 1933 1940 1944 1947 1951\n 1957 1963 1983 1990 1998 2008 2018 2024 2042 2045 2048 2049 2051 2058\n 2060 2061 2068 2070 2076 2079 2084 2089 2090 2092 2095 2096 2099 2103\n 2109 2114 2120 2121 2122 2124 2125 2126 2127 2129 2132 2133 2141 2145\n 2148 2150 2151 2156 2157 2161 2164 2165 2171 2176 2187 2189 2193 2200\n 2213 2232 2234 2236 2237 2238 2249 2253 2275 2276 2277 2283 2295 2297\n 2308 2309 2313 2319 2323 2329 2335 2338 2340 2342 2343 2349 2350 2357\n 2360 2361 2362 2365 2377 2386 2388 2393 2396 2405 2415 2416 2419 2426\n 2430 2439 2445 2446 2448 2451 2460 2465 2473 2474 2484 2499 2502 2508\n 2514 2523 2532 2537 2540 2542 2543 2547 2554 2557 2562 2564 2565 2568\n 2576 2577 2578 2579 2580 2585 2589 2598 2602 2618 2633 2638 2639 2646\n 2661 2667 2668 2669 2680 2684 2687 2689 2695 2696]\nTRAIN: [ 0 1 2 ... 2705 2706 2707] TEST: [ 6 8 11 14 16 17 23 24 29 30 39 42 43 53\n 61 62 65 86 90 95 97 98 102 103 107 114 122 128\n 133 135 139 140 142 147 149 165 173 174 175 176 179 180\n 184 185 188 190 204 213 214 216 219 231 232 234 245 246\n 250 255 266 268 272 283 292 300 303 304 310 311 321 325\n 326 328 329 334 335 337 344 352 359 365 372 373 385 390\n 394 396 399 400 402 403 418 422 423 427 433 437 442 445\n 446 447 449 460 466 476 483 484 486 499 501 504 508 509\n 511 512 527 539 542 549 565 571 572 574 575 583 588 592\n 596 600 603 612 614 621 623 629 630 636 641 653 661 668\n 684 703 708 712 715 722 724 732 740 758 763 764 770 779\n 781 788 795 802 803 804 807 818 823 824 825 830 831 838\n 839 840 850 853 864 865 866 871 872 877 882 886 889 896\n 899 903 907 911 912 917 922 923 924 925 936 937 948 953\n 955 959 970 975 982 987 993 994 997 999 1003 1011 1012 1013\n 1017 1023 1027 1028 1029 1032 1036 1053 1058 1059 1063 1065 1071 1073\n 1074 1075 1077 1083 1087 1088 1119 1121 1123 1124 1125 1128 1136 1137\n 1139 1143 1144 1147 1156 1162 1171 1185 1193 1196 1215 1217 1218 1223\n 1235 1236 1238 1241 1255 1256 1257 1261 1270 1274 1279 1280 1282 1285\n 1287 1288 1289 1305 1312 1321 1327 1331 1332 1333 1337 1338 1344 1360\n 1361 1362 1370 1371 1373 1376 1382 1396 1397 1402 1415 1417 1421 1422\n 1431 1443 1448 1454 1464 1466 1468 1475 1478 1486 1487 1498 1499 1500\n 1502 1511 1526 1528 1537 1545 1550 1562 1563 1569 1582 1584 1585 1587\n 1589 1593 1595 1599 1604 1605 1606 1610 1617 1629 1632 1637 1655 1664\n 1667 1671 1672 1677 1681 1696 1703 1714 1718 1721 1722 1733 1743 1750\n 1752 1754 1758 1765 1767 1771 1772 1790 1793 1797 1805 1806 1811 1815\n 1819 1822 1823 1824 1825 1831 1834 1836 1839 1843 1853 1856 1868 1869\n 1870 1884 1889 1910 1911 1912 1914 1916 1921 1937 1941 1953 1954 1958\n 1973 1977 1980 1984 1986 1987 1988 2001 2009 2016 2019 2025 2026 2033\n 2037 2038 2054 2055 2065 2066 2067 2073 2078 2080 2082 2087 2091 2093\n 2100 2102 2107 2108 2115 2117 2118 2131 2137 2138 2142 2143 2147 2149\n 2152 2159 2167 2170 2179 2183 2206 2209 2224 2228 2230 2240 2257 2258\n 2261 2265 2268 2269 2278 2279 2285 2286 2292 2294 2296 2302 2306 2314\n 2315 2316 2318 2321 2328 2331 2332 2339 2345 2352 2373 2374 2376 2394\n 2400 2406 2409 2412 2425 2427 2432 2441 2442 2452 2455 2456 2458 2461\n 2463 2469 2470 2476 2477 2485 2486 2487 2492 2495 2496 2507 2512 2517\n 2522 2524 2528 2535 2538 2546 2550 2551 2553 2556 2558 2559 2561 2569\n 2570 2594 2595 2597 2599 2606 2607 2624 2645 2648 2652 2653 2663 2670\n 2674 2675 2682 2686 2688 2690 2691 2694 2697 2698]\n"
],
[
"rkf = RepeatedKFold(n_splits=5, n_repeats=10, random_state=5)\nfor train_index, test_index in rkf.split(A):\n print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n X_train, X_test = A[train_index], A[test_index]\n y_train, y_test = y[train_index], y[test_index]\n X1 = X_train.transpose()\n X_train, X_test = X1[train_index], X1[test_index]",
"TRAIN: [ 0 1 3 ... 2705 2706 2707] TEST: [ 2 4 12 15 26 27 32 33 39 47 52 54 65 68\n 71 72 78 86 87 108 124 126 133 135 139 141 142 146\n 149 150 163 170 171 173 176 177 178 182 183 186 187 194\n 197 202 207 219 220 226 227 230 231 232 234 242 246 248\n 258 266 269 275 278 280 281 292 293 295 299 314 342 345\n 346 347 353 360 378 385 394 395 399 401 410 413 424 426\n 428 430 432 437 441 442 443 444 452 463 464 466 470 472\n 473 477 480 483 487 488 495 498 503 506 517 519 521 546\n 548 549 557 559 563 569 573 586 589 599 601 607 610 611\n 613 618 621 622 626 652 653 659 662 663 668 669 675 680\n 681 687 689 704 729 731 734 747 749 752 753 755 758 762\n 763 767 769 780 781 791 796 797 811 814 816 819 824 825\n 837 840 845 847 852 856 857 859 869 878 886 890 894 896\n 903 906 907 909 924 928 935 944 947 966 975 976 982 1002\n 1003 1015 1018 1023 1024 1033 1049 1057 1059 1062 1065 1070 1071 1073\n 1075 1081 1092 1096 1097 1103 1104 1106 1108 1110 1113 1116 1118 1123\n 1125 1128 1130 1141 1147 1149 1150 1154 1155 1163 1167 1170 1173 1175\n 1178 1180 1184 1186 1190 1191 1195 1196 1200 1203 1204 1212 1213 1215\n 1220 1235 1252 1269 1276 1289 1291 1293 1296 1297 1302 1307 1310 1312\n 1317 1320 1326 1330 1335 1340 1355 1356 1370 1376 1382 1387 1396 1406\n 1408 1423 1428 1437 1442 1445 1446 1451 1455 1462 1465 1474 1476 1478\n 1481 1483 1485 1487 1488 1489 1494 1507 1513 1523 1531 1538 1541 1545\n 1546 1548 1557 1559 1561 1563 1575 1580 1581 1585 1597 1598 1601 1605\n 1606 1608 1615 1619 1620 1631 1638 1641 1646 1654 1657 1663 1666 1667\n 1673 1675 1676 1683 1685 1701 1704 1718 1728 1730 1733 1738 1739 1746\n 1763 1767 1771 1773 1792 1796 1799 1804 1811 1816 1825 1833 1837 1842\n 1843 1844 1845 1847 1848 1857 1861 1879 1881 1884 1886 1892 1908 1910\n 1911 1915 1920 1923 1925 1936 1957 1959 1965 1972 1975 1986 1992 1996\n 2006 2008 2016 2019 2023 2024 2027 2028 2032 2036 2041 2043 2052 2055\n 2060 2061 2063 2065 2067 2085 2093 2108 2120 2132 2133 2134 2141 2145\n 2146 2151 2152 2157 2159 2166 2169 2170 2171 2173 2174 2182 2183 2186\n 2195 2200 2201 2207 2210 2213 2234 2238 2244 2246 2249 2251 2259 2265\n 2268 2269 2273 2276 2280 2282 2284 2287 2288 2291 2294 2296 2297 2308\n 2314 2320 2327 2333 2339 2342 2344 2345 2351 2360 2367 2384 2389 2395\n 2396 2404 2406 2412 2416 2423 2432 2436 2440 2447 2452 2454 2465 2471\n 2472 2474 2478 2481 2483 2489 2493 2499 2503 2508 2511 2512 2513 2523\n 2536 2537 2542 2549 2554 2557 2562 2564 2567 2569 2573 2576 2577 2579\n 2586 2594 2603 2608 2609 2618 2621 2624 2626 2633 2634 2645 2646 2654\n 2657 2658 2670 2673 2674 2675 2684 2686 2693 2700]\nTRAIN: [ 2 3 4 ... 2704 2706 2707] TEST: [ 0 1 6 7 10 11 20 29 34 35 43 45 50 51\n 62 70 73 75 88 90 96 98 100 104 112 115 116 117\n 118 120 121 132 136 137 156 164 165 166 169 172 175 181\n 184 191 199 201 208 210 217 222 236 237 247 259 261 279\n 282 283 285 286 288 297 300 303 304 315 316 326 328 334\n 336 338 343 348 354 356 362 363 368 369 370 390 392 412\n 416 417 419 421 425 427 431 433 440 447 448 449 467 469\n 475 478 479 482 492 501 509 511 513 523 527 532 533 539\n 540 543 544 552 556 564 568 575 576 578 587 600 602 603\n 606 615 617 619 620 629 633 649 651 660 661 664 666 667\n 676 678 683 684 695 697 698 701 703 707 708 710 711 716\n 720 739 748 772 783 787 789 795 806 807 812 823 830 831\n 833 838 851 855 863 866 871 874 887 888 889 908 915 919\n 929 932 934 936 938 945 949 952 953 954 955 958 962 965\n 969 973 979 980 985 989 991 994 995 997 999 1000 1001 1008\n 1012 1013 1034 1036 1039 1046 1064 1067 1072 1074 1076 1083 1087 1090\n 1095 1098 1109 1114 1115 1127 1132 1134 1138 1142 1152 1157 1165 1183\n 1188 1214 1217 1218 1219 1228 1232 1236 1239 1240 1254 1257 1260 1262\n 1266 1271 1272 1274 1277 1288 1314 1316 1323 1324 1329 1332 1342 1343\n 1346 1348 1349 1353 1354 1358 1363 1373 1381 1383 1391 1397 1402 1404\n 1405 1411 1412 1415 1419 1420 1427 1432 1433 1434 1435 1441 1448 1449\n 1450 1456 1463 1468 1469 1486 1492 1517 1518 1519 1521 1528 1532 1554\n 1562 1564 1566 1576 1584 1587 1591 1607 1610 1611 1613 1623 1628 1632\n 1639 1642 1643 1650 1652 1653 1656 1680 1691 1693 1700 1705 1709 1711\n 1712 1714 1717 1720 1721 1724 1727 1735 1742 1744 1747 1748 1750 1754\n 1756 1757 1759 1760 1762 1764 1768 1769 1781 1786 1790 1791 1795 1812\n 1813 1818 1819 1824 1827 1828 1829 1856 1858 1864 1865 1867 1877 1880\n 1885 1896 1899 1903 1932 1933 1938 1942 1946 1948 1955 1960 1961 1963\n 1976 1981 1994 1995 2001 2010 2017 2034 2035 2037 2040 2044 2051 2069\n 2074 2076 2082 2084 2090 2094 2095 2096 2098 2109 2111 2113 2128 2143\n 2147 2154 2158 2165 2175 2189 2192 2193 2197 2198 2199 2203 2211 2214\n 2218 2220 2222 2232 2236 2239 2241 2243 2257 2263 2270 2277 2278 2292\n 2293 2301 2305 2311 2312 2321 2322 2325 2326 2329 2332 2338 2353 2357\n 2358 2372 2375 2377 2378 2394 2399 2400 2408 2409 2410 2413 2418 2419\n 2422 2424 2427 2428 2433 2439 2444 2445 2451 2453 2458 2466 2476 2479\n 2480 2484 2485 2486 2488 2492 2494 2496 2500 2501 2506 2509 2515 2522\n 2526 2528 2533 2543 2546 2547 2552 2561 2565 2572 2580 2593 2601 2613\n 2614 2616 2622 2623 2629 2632 2635 2639 2647 2648 2649 2659 2662 2665\n 2678 2680 2681 2682 2683 2689 2690 2692 2699 2705]\nTRAIN: [ 0 1 2 ... 2705 2706 2707] TEST: [ 3 8 9 21 23 37 40 46 57 58 60 61 76 77\n 79 80 81 84 85 92 95 99 101 106 107 119 123 130\n 138 145 148 151 153 159 161 179 189 195 196 198 203 209\n 212 213 218 221 228 235 238 240 253 254 265 267 277 284\n 301 302 308 311 322 323 327 335 337 339 344 358 361 366\n 371 376 379 383 384 386 388 396 398 402 403 404 436 438\n 446 453 456 457 458 461 462 468 489 496 504 514 515 529\n 531 534 545 550 551 561 562 565 570 574 577 579 581 583\n 591 592 616 624 628 631 635 636 642 648 654 656 657 671\n 672 674 690 691 699 702 714 723 728 732 733 735 737 746\n 756 757 759 766 773 777 779 782 785 788 790 793 798 800\n 802 815 817 822 827 828 832 834 836 839 842 849 861 862\n 864 867 870 891 893 901 902 910 917 923 931 939 943 948\n 957 959 963 964 968 970 977 978 987 988 993 996 1005 1009\n 1011 1014 1016 1017 1020 1021 1022 1027 1029 1030 1031 1035 1042 1047\n 1051 1052 1056 1058 1060 1061 1069 1082 1089 1091 1102 1105 1107 1117\n 1121 1124 1131 1143 1153 1158 1166 1171 1172 1174 1181 1187 1189 1192\n 1197 1198 1201 1207 1208 1223 1224 1227 1229 1231 1244 1245 1246 1250\n 1251 1259 1263 1264 1268 1270 1275 1279 1282 1283 1284 1286 1294 1298\n 1300 1305 1315 1318 1321 1328 1333 1334 1336 1338 1341 1344 1345 1352\n 1365 1367 1368 1384 1390 1394 1395 1398 1400 1403 1410 1414 1417 1422\n 1430 1431 1436 1438 1443 1447 1457 1460 1471 1472 1475 1484 1491 1496\n 1499 1508 1509 1510 1511 1516 1525 1526 1533 1534 1537 1539 1542 1543\n 1549 1550 1556 1558 1568 1579 1583 1595 1599 1602 1603 1614 1616 1621\n 1624 1636 1637 1647 1655 1658 1659 1660 1661 1674 1677 1678 1682 1686\n 1688 1698 1702 1703 1706 1707 1716 1719 1737 1741 1745 1753 1765 1766\n 1774 1777 1801 1805 1814 1815 1820 1821 1822 1823 1826 1830 1831 1832\n 1836 1838 1840 1841 1852 1871 1873 1875 1876 1878 1890 1891 1893 1901\n 1902 1905 1906 1907 1909 1912 1914 1919 1931 1934 1937 1940 1944 1951\n 1954 1958 1964 1968 1969 1970 1979 1990 1993 1998 2002 2007 2009 2011\n 2014 2015 2018 2025 2031 2049 2050 2054 2059 2062 2073 2081 2088 2089\n 2092 2097 2099 2105 2106 2119 2131 2136 2139 2149 2155 2156 2163 2172\n 2177 2184 2188 2191 2204 2226 2242 2245 2250 2255 2256 2260 2266 2283\n 2285 2286 2289 2290 2298 2302 2307 2309 2315 2317 2318 2330 2335 2343\n 2346 2347 2350 2355 2364 2366 2369 2370 2374 2379 2382 2385 2392 2401\n 2402 2414 2417 2420 2421 2425 2426 2434 2438 2448 2449 2457 2463 2464\n 2470 2477 2490 2498 2502 2510 2516 2517 2518 2530 2532 2535 2540 2548\n 2551 2560 2590 2597 2599 2600 2604 2605 2607 2620 2628 2630 2631 2636\n 2640 2644 2676 2685 2687 2691 2694 2695 2697 2703]\nTRAIN: [ 0 1 2 ... 2703 2705 2706] TEST: [ 13 18 22 24 28 30 31 38 42 48 56 59 64 66\n 69 89 93 97 105 109 111 113 127 129 131 134 140 143\n 155 157 158 162 168 193 200 206 223 224 244 249 250 262\n 287 289 296 298 305 309 312 313 317 319 321 325 329 330\n 331 340 341 349 351 352 355 357 365 367 375 400 405 406\n 407 409 411 420 423 429 434 439 445 450 451 454 455 471\n 481 484 485 486 490 493 494 497 500 505 512 516 520 524\n 525 526 530 537 541 542 553 554 555 558 560 567 580 584\n 585 590 594 595 596 604 608 612 623 625 630 634 637 640\n 641 643 644 645 646 647 650 655 665 670 673 677 679 682\n 686 688 692 693 696 705 706 709 715 718 719 722 724 730\n 736 742 743 745 754 765 774 775 776 784 786 792 799 805\n 809 810 813 820 821 826 841 843 848 858 868 872 873 876\n 877 880 883 885 904 911 912 916 918 925 926 933 941 950\n 961 972 983 992 1004 1006 1007 1028 1038 1040 1041 1044 1048 1055\n 1063 1066 1077 1078 1079 1080 1084 1133 1144 1145 1146 1156 1161 1162\n 1164 1176 1177 1182 1193 1194 1202 1222 1225 1237 1243 1253 1256 1258\n 1265 1267 1273 1280 1285 1290 1292 1299 1303 1304 1306 1309 1319 1325\n 1327 1331 1347 1350 1351 1357 1360 1362 1364 1366 1369 1371 1377 1378\n 1379 1380 1385 1388 1389 1393 1401 1416 1418 1426 1439 1440 1444 1452\n 1458 1464 1466 1467 1473 1480 1482 1490 1493 1495 1501 1502 1503 1504\n 1505 1506 1512 1514 1522 1524 1529 1530 1535 1536 1544 1560 1569 1571\n 1573 1578 1588 1589 1592 1596 1612 1617 1618 1622 1625 1626 1627 1629\n 1630 1634 1640 1644 1648 1662 1665 1668 1670 1672 1681 1684 1687 1689\n 1692 1695 1699 1708 1713 1722 1723 1729 1732 1736 1740 1749 1752 1755\n 1758 1761 1772 1775 1778 1780 1783 1784 1787 1788 1794 1797 1798 1803\n 1806 1809 1817 1834 1835 1839 1846 1851 1859 1863 1866 1868 1882 1887\n 1894 1895 1897 1904 1913 1922 1924 1929 1935 1939 1941 1949 1950 1952\n 1962 1967 1971 1974 1977 1978 1980 1983 1985 1987 1988 1991 1997 2005\n 2012 2020 2021 2022 2026 2030 2033 2038 2039 2056 2066 2070 2071 2078\n 2080 2083 2100 2101 2102 2107 2114 2117 2118 2123 2130 2135 2137 2150\n 2153 2164 2168 2176 2179 2180 2181 2187 2196 2202 2206 2208 2215 2216\n 2224 2227 2228 2229 2235 2237 2247 2248 2253 2262 2264 2281 2300 2303\n 2304 2306 2319 2323 2331 2337 2340 2341 2356 2361 2371 2381 2383 2390\n 2391 2397 2403 2405 2407 2411 2430 2431 2442 2460 2468 2475 2482 2497\n 2504 2505 2519 2520 2524 2525 2529 2534 2538 2541 2544 2550 2553 2563\n 2566 2570 2571 2574 2578 2582 2583 2584 2588 2592 2596 2602 2611 2615\n 2617 2619 2625 2627 2638 2641 2650 2652 2660 2661 2663 2666 2668 2671\n 2677 2679 2688 2696 2698 2701 2702 2704 2707]\n"
],
[
"X_train.shape,X_test.shape",
"_____no_output_____"
]
],
[
[
"# LDA",
"_____no_output_____"
]
],
[
[
"from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nbegin = time.time()\nclf = LinearDiscriminantAnalysis()\nclf.fit(X_train, y_train.ravel())\nLDAacc = clf.score(X_test, y_test)\nprint(LDAacc)\ntime.sleep(1) \nend = time.time()\nprint(f\"Total runtime of the program is {end - begin}\")",
"0.6229205175600739\nTotal runtime of the program is 7.219120979309082\n"
]
],
[
[
"# SVD with LDA",
"_____no_output_____"
]
],
[
[
"from scipy import linalg\nbegin1 = time.time()\nU, s, VT = linalg.svd(A)\nnewx=[]\nfor c in range(1,10) :\n newx.append(U[:,0:c]*s[0:c]**0.5)\nclf2 = LinearDiscriminantAnalysis()\nSVDaccuracy=[]\nfor i in range(0,9):\n clf.fit(newx[i], y.ravel())\n SVDaccuracy.append(clf.score(newx[i], y.ravel()))\nprint(SVDaccuracy)\ntime.sleep(2) \nend1 = time.time()\nprint(f\"Total runtime of the program is {end1 - begin1}\")",
"[0.30465288035450516, 0.3068685376661743, 0.30908419497784345, 0.30908419497784345, 0.30908419497784345, 0.30908419497784345, 0.31166912850812406, 0.3124076809453471, 0.31388478581979323]\nTotal runtime of the program is 12.886657953262329\n"
]
],
[
[
"# MLPz",
"_____no_output_____"
]
],
[
[
"w = np.zeros((2167,8))\nfor x in range(0,2167):\n i=int(y_train[x])\n w[x][i]=1",
"_____no_output_____"
],
[
"z_train=np.matmul(X_train,w)\nz_test = np.matmul(X_test,w)",
"_____no_output_____"
],
[
"from sklearn.neural_network import MLPClassifier\nbegin2 = time.time()\nclf3 = MLPClassifier(random_state=2, max_iter=1000)\nclf3.fit(z_train, y_train.ravel())\nZYAcc = clf3.score(z_test, y_test)\nprint(ZYAcc)\ntime.sleep(3) \nend2 = time.time()\nprint(f\"Total runtime of the program is {end2 - begin2}\")",
"0.7486136783733827\nTotal runtime of the program is 9.596219062805176\n"
]
],
[
[
"# LDAz",
"_____no_output_____"
]
],
[
[
"begin3 = time.time()\nclf4 = LinearDiscriminantAnalysis()\nclf4.fit(z_train,y_train.ravel())\nZYLDA = clf4.score(z_test, y_test.ravel())\nprint(ZYLDA)\ntime.sleep(4) \nend3 = time.time()\nprint(f\"Total runtime of the program is {end3 - begin3}\")",
"0.7486136783733827\nTotal runtime of the program is 4.01516580581665\n"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec649c287a77347fa8d48571f21f2bfe06e61a1f | 15,829 | ipynb | Jupyter Notebook | Figures/Figure 4/Fig4D.ipynb | JasonACarter/Allelic_inclusion | 1c36bd8b8244af20cb4d858b747bcb37ea733cf9 | [
"Unlicense"
]
| 2 | 2020-01-20T04:57:18.000Z | 2022-02-06T17:02:37.000Z | Figures/Figure 4/Fig4D.ipynb | JasonACarter/Allelic_inclusion | 1c36bd8b8244af20cb4d858b747bcb37ea733cf9 | [
"Unlicense"
]
| null | null | null | Figures/Figure 4/Fig4D.ipynb | JasonACarter/Allelic_inclusion | 1c36bd8b8244af20cb4d858b747bcb37ea733cf9 | [
"Unlicense"
]
| null | null | null | 71.624434 | 9,128 | 0.778887 | [
[
[
"import os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nimport seaborn as sns",
"_____no_output_____"
],
[
"ab=pd.read_pickle('../../Data/AB.pkl')\naab=pd.read_pickle('../../Data/AAB.pkl')\nabb=pd.read_pickle('../../Data/ABB.pkl')",
"_____no_output_____"
],
[
"vdjdb=pd.DataFrame(np.genfromtxt('../../Data/VDJdb.txt',delimiter='\\t',dtype='unicode',autostrip=True))\nvdjdb.columns=vdjdb.iloc[0]\nvdjdb=vdjdb.iloc[1:]",
"_____no_output_____"
],
[
"def truncate_germline_names(data): \n \"\"\"\n Prepare gerlmine names s.t. the format matches the V/J region names provided names\n \"\"\"\n if 'A' in data:\n which='A'\n else:\n which='B'\n if '*' in data:\n value=data[data.find(which)+2:data.find('*')]\n else:\n value=data[data.find(which)+2:]\n return value",
"_____no_output_____"
],
[
"vdjdb.V=list(map(truncate_germline_names,vdjdb.V))\nvdjdb.J=list(map(truncate_germline_names,vdjdb.J))\nchain=[]\nfor x in vdjdb.Gene:\n if x=='TRA':\n chain.append('Alpha')\n elif x=='TRB':\n chain.append('Beta')\nvdjdb['chain']=chain\nvdjdb['Clonotype']=vdjdb.CDR3+vdjdb.chain+vdjdb.V\nvdjdb=vdjdb.drop_duplicates()",
"_____no_output_____"
],
[
"def prepare_dfs(df):\n \"\"\"\n Input full ab,aab, or abb df \n Output alpha and beta dfs with columns \"V\", \"J\" and (amino acid) \"CDR3\"\n \"\"\"\n if 'a2_V' in df.columns: #aab cell -> include both alpha chains\n alpha1=df[['a1_V','a1_J','a1_aaSeqCDR3','subject']]\n alpha2=df[['a2_V','a2_J','a2_aaSeqCDR3','subject']]\n alpha=np.vstack((alpha1,alpha2)) #combine alpha-alpha pairs into one df\n beta=np.array((df[['b1_V','b1_J','b1_aaSeqCDR3','subject']]))\n elif 'b2_V' in df.columns: #aab cell -> include both beta chains\n alpha=np.array((df[['a1_V','a1_J','a1_aaSeqCDR3','subject']]))\n beta1=df[['b1_V','b1_J','b1_aaSeqCDR3','subject']]\n beta2=df[['b2_V','b2_J','b2_aaSeqCDR3','subject']]\n beta=np.vstack((beta1,beta2)) #combine beta-beta pairs into one df\n else: #ab cell\n alpha=np.array((df[['a1_V','a1_J','a1_aaSeqCDR3','subject']]))\n beta=np.array((df[['b1_V','b1_J','b1_aaSeqCDR3','subject']])) \n combined=pd.DataFrame(alpha,columns=['V','J','CDR3','subject']).append(pd.DataFrame(beta,columns=['V','J','CDR3','subject']))\n combined['Chain']=np.hstack((['Alpha']*alpha.shape[0],['Beta']*beta.shape[0]))\n combined.V=list(map(truncate_germline_names,combined.V))\n combined.J=list(map(truncate_germline_names,combined.J))\n combined['Clonotype']=combined.CDR3+combined.Chain+combined.V\n return combined",
"_____no_output_____"
],
[
"ab=prepare_dfs(ab)\naab=prepare_dfs(aab)\nabb=prepare_dfs(abb)",
"_____no_output_____"
],
[
"for chain in ['Alpha','Beta']:\n a=len(np.intersect1d(aab[(aab.Chain==chain)].Clonotype,vdjdb.Clonotype))+len(np.intersect1d(abb[(abb.Chain==chain)].Clonotype,vdjdb.Clonotype))\n b=len(np.intersect1d(ab[(ab.Chain==chain)].Clonotype,vdjdb.Clonotype))\n c=aab[(aab.Chain==chain)].shape[0]+abb[(abb.Chain==chain)].shape[0]\n d=ab[(ab.Chain==chain)].shape[0]\n OR,pvalue=stats.fisher_exact([[a,b],[c,d]])\n print(f'{chain}: odds ratio= {OR} (p={pvalue}')",
"Alpha: odds ratio= 1.9047179276946624 (p=3.603144163250983e-23\nBeta: odds ratio= 1.719168207483494 (p=0.00014641788568560513\n"
],
[
"freqs=[]\nfor chain in ['Alpha','Beta']:\n for i in ab.subject.unique():\n for j,data in enumerate([ab,aab,abb]):\n if ab[(ab.subject==i) & (ab.Chain==chain)].shape[0]>10000:\n use=data[(data.subject==i) & (data.Chain==chain)]\n overlap=len(np.intersect1d(use.Clonotype,vdjdb.Clonotype))/use.shape[0]*100\n freqs.append([i,j,chain,overlap])\ndf=pd.DataFrame(freqs,columns=['subject','dataset','chain','y'])",
"_____no_output_____"
],
[
"a4_dims = (5,5)\nfig, ax = plt.subplots(figsize=a4_dims) \nsns.barplot(x='chain',y='y',hue='dataset',data=df,palette=['Gray','Dodgerblue','Salmon'],ci=68)\nsns.despine()\nplt.yticks(fontsize=20)\nplt.xticks(np.arange(2),[r'$\\alpha$',r'$\\beta$'],fontsize=25)\nplt.xlabel('')\nplt.ylabel('') \nplt.tight_layout()\nplt.legend('')\nplt.ylim([0,1.2])\n\nplt.show()\nplt.close()",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec64aa11b9f7de3d53f8dd838dd2170855c87b94 | 820,143 | ipynb | Jupyter Notebook | TamplateMatching.ipynb | bartoszptak/PuzzleSolver | 575ac1b35ff1e20289e5fc4ec78f684450f8ab17 | [
"MIT"
]
| 3 | 2019-03-04T19:27:39.000Z | 2019-03-12T21:03:44.000Z | TamplateMatching.ipynb | bartoszptak/PuzzleSolver | 575ac1b35ff1e20289e5fc4ec78f684450f8ab17 | [
"MIT"
]
| 4 | 2021-03-19T00:30:01.000Z | 2022-03-11T23:46:46.000Z | TamplateMatching.ipynb | bartoszptak/PuzzleSolver | 575ac1b35ff1e20289e5fc4ec78f684450f8ab17 | [
"MIT"
]
| 1 | 2019-03-26T19:05:35.000Z | 2019-03-26T19:05:35.000Z | 2,828.07931 | 535,052 | 0.962253 | [
[
[
"from Preprocessing import Preprocessing\nps = Preprocessing()\n\nimages = ['img/a0.png', 'img/a1.png']\nresults = ps.do_it(images)\nprint('Puzzles: {}'.format(len(results)))",
"Files: 2\nPuzzles: 54\n"
]
],
[
[
"# Próba match template",
"_____no_output_____"
]
],
[
[
"import cv2\nimport numpy as np\n\nimg = cv2.imread('img/aa.png')\n\nsize = [9, 6]\n\nrows,cols,ch = img.shape\n\npts1 = np.float32([[384-5,64-5],[270-5,1128+5],[1898+5,108-5],[1980+5,1120+5]])\npts2 = np.float32([[0,0],[0,600*size[1]],[600*size[0],0],[600*size[0],600*size[1]]])\n\nM = cv2.getPerspectiveTransform(pts1,pts2)\n\ndst = cv2.warpPerspective(img,M,(600*size[0],600*size[1]))\n\ndst = cv2.resize(dst, (0,0), None, fx=0.1, fy=0.1)\nps.draw(dst)\n\nprint(dst.shape)",
"_____no_output_____"
],
[
"correct = np.array([\n [30,50,43,39,52,6,7,13,19],\n [36,44,32,45,47,2,0,21,17],\n [37,38,41,40,48,24,5,4,10],\n [42,31,51,46,35,25,28,29,20],\n [49,53,33,34,26,12,3,15,8],\n [1,9,27,14,16,11,18,23,22]\n])",
"_____no_output_____"
],
[
"import copy\nbase = copy.deepcopy(dst)\nbase = adjust_gamma(base)\n\ntemplates = []\nfor el in results:\n loc = el[0][0].copy()\n\n loc = cv2.warpAffine(loc, cv2.getRotationMatrix2D((500,500), 270, 1.0), (1000, 1000))\n loc = adjust_gamma(loc)\n templates.append(loc)\n \nown_try = np.zeros(correct.shape)\n\nfor i, template in enumerate(templates):\n method = cv2.TM_CCOEFF_NORMED\n template = cv2.resize(template, (0,0), None, fx=0.1, fy=0.1)\n template = template[20:80, 20:80]\n\n c, w, h = template.shape[::-1]\n\n res = cv2.matchTemplate(base,template, method)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n\n if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:\n top_left = min_loc\n else:\n top_left = max_loc\n\n bottom_right = (top_left[0] + w, top_left[1] + h)\n \n cy = top_left[0] + (h)//2\n cx = top_left[1] + (w)//2\n\n a = base.shape[0]//6\n b = base.shape[1]//9\n\n a, b = cy//a, cx//b\n\n if own_try[b,a] == 0:\n own_try[b,a] = i\n #cv2.rectangle(base, top_left, bottom_right, (255,255,0), 1)",
"_____no_output_____"
],
[
"own_try",
"_____no_output_____"
],
[
"print('Błąd: {:.3f} (poprawnych/wszystkich)'.format(np.count_nonzero(np.subtract(own_try,correct)==0)/(size[0]*size[1])))",
"Błąd: 0.444 (poprawnych/wszystkich)\n"
],
[
"def adjust_gamma(image, gamma=2.5):\n\n invGamma = 1.0 / gamma\n table = np.array([((i / 255.0) ** invGamma) * 255\n for i in np.arange(0, 256)]).astype(\"uint8\")\n\n return cv2.LUT(image, table)",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nplt.figure(figsize=(12,8))\n\nps.draw(base)",
"_____no_output_____"
],
[
"d = results[31][0][0].copy()\nfor el in results[31][1][0]:\n cv2.circle(d, (el[1], el[0]), 1, (255,255,0))\n cv2.circle(d, (el[1], el[0]+30), 1, (0,128,256))\n\nps.draw(d)",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code"
]
| [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec64aa1d548236a36a380f8e0eecc619f016cafd | 27,927 | ipynb | Jupyter Notebook | 04 Build and operate machine learning solutions with Azure Databricks/mslearn-dp100/08 - Create a Pipeline.ipynb | raj713335/DP-100 | 11c2550975f0e9ecec3fa6502fdb5d54ac54c714 | [
"Apache-2.0"
]
| 1 | 2022-03-28T07:56:58.000Z | 2022-03-28T07:56:58.000Z | 04 Build and operate machine learning solutions with Azure Databricks/mslearn-dp100/08 - Create a Pipeline.ipynb | raj713335/DP-100 | 11c2550975f0e9ecec3fa6502fdb5d54ac54c714 | [
"Apache-2.0"
]
| null | null | null | 04 Build and operate machine learning solutions with Azure Databricks/mslearn-dp100/08 - Create a Pipeline.ipynb | raj713335/DP-100 | 11c2550975f0e9ecec3fa6502fdb5d54ac54c714 | [
"Apache-2.0"
]
| null | null | null | 40.182734 | 828 | 0.602285 | [
[
[
"# Create a Pipeline\n\nYou can perform the various steps required to ingest data, train a model, and register the model individually by using the Azure ML SDK to run script-based experiments. However, in an enterprise environment it is common to encapsulate the sequence of discrete steps required to build a machine learning solution into a *pipeline* that can be run on one or more compute targets; either on-demand by a user, from an automated build process, or on a schedule.\n\nIn this notebook, you'll bring together all of these elements to create a simple pipeline that pre-processes data and then trains and registers a model.",
"_____no_output_____"
],
[
"## Connect to your workspace\n\nTo get started, connect to your workspace.\n\n> **Note**: If you haven't already established an authenticated session with your Azure subscription, you'll be prompted to authenticate by clicking a link, entering an authentication code, and signing into Azure.",
"_____no_output_____"
]
],
[
[
"import azureml.core\nfrom azureml.core import Workspace\n\n# Load the workspace from the saved config file\nws = Workspace.from_config()\nprint('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name))",
"_____no_output_____"
]
],
[
[
"## Prepare data\n\nIn your pipeline, you'll use a dataset containing details of diabetes patients. Run the cell below to create this dataset (if you created it previously, the code will find the existing version)",
"_____no_output_____"
]
],
[
[
"from azureml.core import Dataset\n\ndefault_ds = ws.get_default_datastore()\n\nif 'diabetes dataset' not in ws.datasets:\n default_ds.upload_files(files=['./data/diabetes.csv', './data/diabetes2.csv'], # Upload the diabetes csv files in /data\n target_path='diabetes-data/', # Put it in a folder path in the datastore\n overwrite=True, # Replace existing files of the same name\n show_progress=True)\n\n #Create a tabular dataset from the path on the datastore (this may take a short while)\n tab_data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, 'diabetes-data/*.csv'))\n\n # Register the tabular dataset\n try:\n tab_data_set = tab_data_set.register(workspace=ws, \n name='diabetes dataset',\n description='diabetes data',\n tags = {'format':'CSV'},\n create_new_version=True)\n print('Dataset registered.')\n except Exception as ex:\n print(ex)\nelse:\n print('Dataset already registered.')",
"_____no_output_____"
]
],
[
[
"## Create scripts for pipeline steps\n\nPipelines consist of one or more *steps*, which can be Python scripts, or specialized steps like a data transfer step that copies data from one location to another. Each step can run in its own compute context. In this exercise, you'll build a simple pipeline that contains two Python script steps: one to pre-process some training data, and another to use the pre-processed data to train and register a model.\n\nFirst, let's create a folder for the script files we'll use in the pipeline steps.",
"_____no_output_____"
]
],
[
[
"import os\n# Create a folder for the pipeline step files\nexperiment_folder = 'diabetes_pipeline'\nos.makedirs(experiment_folder, exist_ok=True)\n\nprint(experiment_folder)",
"_____no_output_____"
]
],
[
[
"Now let's create the first script, which will read data from the diabetes dataset and apply some simple pre-processing to remove any rows with missing data and normalize the numeric features so they're on a similar scale.\n\nThe script includes a argument named **--prepped-data**, which references the folder where the resulting data should be saved.",
"_____no_output_____"
]
],
[
[
"%%writefile $experiment_folder/prep_diabetes.py\n# Import libraries\nimport os\nimport argparse\nimport pandas as pd\nfrom azureml.core import Run\nfrom sklearn.preprocessing import MinMaxScaler\n\n# Get parameters\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--input-data\", type=str, dest='raw_dataset_id', help='raw dataset')\nparser.add_argument('--prepped-data', type=str, dest='prepped_data', default='prepped_data', help='Folder for results')\nargs = parser.parse_args()\nsave_folder = args.prepped_data\n\n# Get the experiment run context\nrun = Run.get_context()\n\n# load the data (passed as an input dataset)\nprint(\"Loading Data...\")\ndiabetes = run.input_datasets['raw_data'].to_pandas_dataframe()\n\n# Log raw row count\nrow_count = (len(diabetes))\nrun.log('raw_rows', row_count)\n\n# remove nulls\ndiabetes = diabetes.dropna()\n\n# Normalize the numeric columns\nscaler = MinMaxScaler()\nnum_cols = ['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree']\ndiabetes[num_cols] = scaler.fit_transform(diabetes[num_cols])\n\n# Log processed rows\nrow_count = (len(diabetes))\nrun.log('processed_rows', row_count)\n\n# Save the prepped data\nprint(\"Saving Data...\")\nos.makedirs(save_folder, exist_ok=True)\nsave_path = os.path.join(save_folder,'data.csv')\ndiabetes.to_csv(save_path, index=False, header=True)\n\n# End the run\nrun.complete()",
"_____no_output_____"
]
],
[
[
"Now you can create the script for the second step, which will train a model. The script includes a argument named **--training-data**, which references the location where the prepared data was saved by the previous step.",
"_____no_output_____"
]
],
[
[
"%%writefile $experiment_folder/train_diabetes.py\n# Import libraries\nfrom azureml.core import Run, Model\nimport argparse\nimport pandas as pd\nimport numpy as np\nimport joblib\nimport os\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import roc_curve\nimport matplotlib.pyplot as plt\n\n# Get parameters\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--training-data\", type=str, dest='training_data', help='training data')\nargs = parser.parse_args()\ntraining_data = args.training_data\n\n# Get the experiment run context\nrun = Run.get_context()\n\n# load the prepared data file in the training folder\nprint(\"Loading Data...\")\nfile_path = os.path.join(training_data,'data.csv')\ndiabetes = pd.read_csv(file_path)\n\n# Separate features and labels\nX, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values\n\n# Split data into training set and test set\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)\n\n# Train adecision tree model\nprint('Training a decision tree model...')\nmodel = DecisionTreeClassifier().fit(X_train, y_train)\n\n# calculate accuracy\ny_hat = model.predict(X_test)\nacc = np.average(y_hat == y_test)\nprint('Accuracy:', acc)\nrun.log('Accuracy', np.float(acc))\n\n# calculate AUC\ny_scores = model.predict_proba(X_test)\nauc = roc_auc_score(y_test,y_scores[:,1])\nprint('AUC: ' + str(auc))\nrun.log('AUC', np.float(auc))\n\n# plot ROC curve\nfpr, tpr, thresholds = roc_curve(y_test, y_scores[:,1])\nfig = plt.figure(figsize=(6, 4))\n# Plot the diagonal 50% line\nplt.plot([0, 1], [0, 1], 'k--')\n# Plot the FPR and TPR achieved by our model\nplt.plot(fpr, tpr)\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('ROC Curve')\nrun.log_image(name = \"ROC\", plot = fig)\nplt.show()\n\n# Save the trained model in the outputs folder\nprint(\"Saving model...\")\nos.makedirs('outputs', exist_ok=True)\nmodel_file = os.path.join('outputs', 'diabetes_model.pkl')\njoblib.dump(value=model, filename=model_file)\n\n# Register the model\nprint('Registering model...')\nModel.register(workspace=run.experiment.workspace,\n model_path = model_file,\n model_name = 'diabetes_model',\n tags={'Training context':'Pipeline'},\n properties={'AUC': np.float(auc), 'Accuracy': np.float(acc)})\n\n\nrun.complete()",
"_____no_output_____"
]
],
[
[
"## Prepare a compute environment for the pipeline steps\n\nIn this exercise, you'll use the same compute for both steps, but it's important to realize that each step is run independently; so you could specify different compute contexts for each step if appropriate.\n\nFirst, get the compute target you created in a previous lab (if it doesn't exist, it will be created).\n\n> **Important**: Change *your-compute-cluster* to the name of your compute cluster in the code below before running it! Cluster names must be globally unique names between 2 to 16 characters in length. Valid characters are letters, digits, and the - character.",
"_____no_output_____"
]
],
[
[
"from azureml.core.compute import ComputeTarget, AmlCompute\nfrom azureml.core.compute_target import ComputeTargetException\n\ncluster_name = \"your-compute-cluster\"\n\ntry:\n # Check for existing compute target\n pipeline_cluster = ComputeTarget(workspace=ws, name=cluster_name)\n print('Found existing cluster, use it.')\nexcept ComputeTargetException:\n # If it doesn't already exist, create it\n try:\n compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS11_V2', max_nodes=2)\n pipeline_cluster = ComputeTarget.create(ws, cluster_name, compute_config)\n pipeline_cluster.wait_for_completion(show_output=True)\n except Exception as ex:\n print(ex)\n ",
"_____no_output_____"
]
],
[
[
"> **Note**: Compute instances and clusters are based on standard Azure virtual machine images. For this exercise, the *Standard_DS11_v2* image is recommended to achieve the optimal balance of cost and performance. If your subscription has a quota that does not include this image, choose an alternative image; but bear in mind that a larger image may incur higher cost and a smaller image may not be sufficient to complete the tasks. Alternatively, ask your Azure administrator to extend your quota.\n\nThe compute will require a Python environment with the necessary package dependencies installed.",
"_____no_output_____"
]
],
[
[
"%%writefile $experiment_folder/experiment_env.yml\nname: experiment_env\ndependencies:\n- python=3.6.2\n- scikit-learn\n- ipykernel\n- matplotlib\n- pandas\n- pip\n- pip:\n - azureml-defaults\n - pyarrow",
"_____no_output_____"
]
],
[
[
"Now that you have a Conda configuration file, you can create an environment and use it in the run configuration for the pipeline.",
"_____no_output_____"
]
],
[
[
"from azureml.core import Environment\nfrom azureml.core.runconfig import RunConfiguration\n\n# Create a Python environment for the experiment (from a .yml file)\nexperiment_env = Environment.from_conda_specification(\"experiment_env\", experiment_folder + \"/experiment_env.yml\")\n\n# Register the environment \nexperiment_env.register(workspace=ws)\nregistered_env = Environment.get(ws, 'experiment_env')\n\n# Create a new runconfig object for the pipeline\npipeline_run_config = RunConfiguration()\n\n# Use the compute you created above. \npipeline_run_config.target = pipeline_cluster\n\n# Assign the environment to the run configuration\npipeline_run_config.environment = registered_env\n\nprint (\"Run configuration created.\")",
"_____no_output_____"
]
],
[
[
"## Create and run a pipeline\n\nNow you're ready to create and run a pipeline.\n\nFirst you need to define the steps for the pipeline, and any data references that need to be passed between them. In this case, the first step must write the prepared data to a folder that can be read from by the second step. Since the steps will be run on remote compute (and in fact, could each be run on different compute), the folder path must be passed as a data reference to a location in a datastore within the workspace. The **OutputFileDatasetConfig** object is a special kind of data reference that is used for interim storage locations that can be passed between pipeline steps, so you'll create one and use at as the output for the first step and the input for the second step. Note that you need to pass it as a script argument so your code can access the datastore location referenced by the data reference.",
"_____no_output_____"
]
],
[
[
"from azureml.data import OutputFileDatasetConfig\nfrom azureml.pipeline.steps import PythonScriptStep\n\n# Get the training dataset\ndiabetes_ds = ws.datasets.get(\"diabetes dataset\")\n\n# Create an OutputFileDatasetConfig (temporary Data Reference) for data passed from step 1 to step 2\nprepped_data = OutputFileDatasetConfig(\"prepped_data\")\n\n# Step 1, Run the data prep script\nprep_step = PythonScriptStep(name = \"Prepare Data\",\n source_directory = experiment_folder,\n script_name = \"prep_diabetes.py\",\n arguments = ['--input-data', diabetes_ds.as_named_input('raw_data'),\n '--prepped-data', prepped_data],\n compute_target = pipeline_cluster,\n runconfig = pipeline_run_config,\n allow_reuse = True)\n\n# Step 2, run the training script\ntrain_step = PythonScriptStep(name = \"Train and Register Model\",\n source_directory = experiment_folder,\n script_name = \"train_diabetes.py\",\n arguments = ['--training-data', prepped_data.as_input()],\n compute_target = pipeline_cluster,\n runconfig = pipeline_run_config,\n allow_reuse = True)\n\nprint(\"Pipeline steps defined\")",
"_____no_output_____"
]
],
[
[
"OK, you're ready build the pipeline from the steps you've defined and run it as an experiment.",
"_____no_output_____"
]
],
[
[
"from azureml.core import Experiment\nfrom azureml.pipeline.core import Pipeline\nfrom azureml.widgets import RunDetails\n\n# Construct the pipeline\npipeline_steps = [prep_step, train_step]\npipeline = Pipeline(workspace=ws, steps=pipeline_steps)\nprint(\"Pipeline is built.\")\n\n# Create an experiment and run the pipeline\nexperiment = Experiment(workspace=ws, name = 'mslearn-diabetes-pipeline')\npipeline_run = experiment.submit(pipeline, regenerate_outputs=True)\nprint(\"Pipeline submitted for execution.\")\nRunDetails(pipeline_run).show()\npipeline_run.wait_for_completion(show_output=True)",
"_____no_output_____"
]
],
[
[
"A graphical representation of the pipeline experiment will be displayed in the widget as it runs. Keep an eye on the kernel indicator at the top right of the page, when it turns from **⚫** to **◯**, the code has finished running. You can also monitor pipeline runs in the **Experiments** page in [Azure Machine Learning studio](https://ml.azure.com).\n\nWhen the pipeline has finished, you can examine the metrics recorded by it's child runs.",
"_____no_output_____"
]
],
[
[
"for run in pipeline_run.get_children():\n print(run.name, ':')\n metrics = run.get_metrics()\n for metric_name in metrics:\n print('\\t',metric_name, \":\", metrics[metric_name])",
"_____no_output_____"
]
],
[
[
"Assuming the pipeline was successful, a new model should be registered with a *Training context* tag indicating it was trained in a pipeline. Run the following code to verify this.",
"_____no_output_____"
]
],
[
[
"from azureml.core import Model\n\nfor model in Model.list(ws):\n print(model.name, 'version:', model.version)\n for tag_name in model.tags:\n tag = model.tags[tag_name]\n print ('\\t',tag_name, ':', tag)\n for prop_name in model.properties:\n prop = model.properties[prop_name]\n print ('\\t',prop_name, ':', prop)\n print('\\n')",
"_____no_output_____"
]
],
[
[
"## Publish the pipeline\n\nAfter you've created and tested a pipeline, you can publish it as a REST service.",
"_____no_output_____"
]
],
[
[
"# Publish the pipeline from the run\npublished_pipeline = pipeline_run.publish_pipeline(\n name=\"diabetes-training-pipeline\", description=\"Trains diabetes model\", version=\"1.0\")\n\npublished_pipeline",
"_____no_output_____"
]
],
[
[
"Note that the published pipeline has an endpoint, which you can see in the **Endpoints** page (on the **Pipeline Endpoints** tab) in [Azure Machine Learning studio](https://ml.azure.com). You can also find its URI as a property of the published pipeline object:",
"_____no_output_____"
]
],
[
[
"rest_endpoint = published_pipeline.endpoint\nprint(rest_endpoint)",
"_____no_output_____"
]
],
[
[
"## Call the pipeline endpoint\n\nTo use the endpoint, client applications need to make a REST call over HTTP. This request must be authenticated, so an authorization header is required. A real application would require a service principal with which to be authenticated, but to test this out, we'll use the authorization header from your current connection to your Azure workspace, which you can get using the following code:",
"_____no_output_____"
]
],
[
[
"from azureml.core.authentication import InteractiveLoginAuthentication\n\ninteractive_auth = InteractiveLoginAuthentication()\nauth_header = interactive_auth.get_authentication_header()\nprint(\"Authentication header ready.\")",
"_____no_output_____"
]
],
[
[
"Now we're ready to call the REST interface. The pipeline runs asynchronously, so we'll get an identifier back, which we can use to track the pipeline experiment as it runs:",
"_____no_output_____"
]
],
[
[
"import requests\n\nexperiment_name = 'mslearn-diabetes-pipeline'\n\nrest_endpoint = published_pipeline.endpoint\nresponse = requests.post(rest_endpoint, \n headers=auth_header, \n json={\"ExperimentName\": experiment_name})\nrun_id = response.json()[\"Id\"]\nrun_id",
"_____no_output_____"
]
],
[
[
"Since you have the run ID, you can use it to wait for the run to complete.\n\n> **Note**: The pipeline should complete quickly, because each step was configured to allow output reuse. This was done primarily for convenience and to save time in this course. In reality, you'd likely want the first step to run every time in case the data has changed, and trigger the subsequent steps only if the output from step one changes.",
"_____no_output_____"
]
],
[
[
"from azureml.pipeline.core.run import PipelineRun\n\npublished_pipeline_run = PipelineRun(ws.experiments[experiment_name], run_id)\npublished_pipeline_run.wait_for_completion(show_output=True)",
"_____no_output_____"
]
],
[
[
"## Schedule the Pipeline\n\nSuppose the clinic for the diabetes patients collects new data each week, and adds it to the dataset. You could run the pipeline every week to retrain the model with the new data.",
"_____no_output_____"
]
],
[
[
"from azureml.pipeline.core import ScheduleRecurrence, Schedule\n\n# Submit the Pipeline every Monday at 00:00 UTC\nrecurrence = ScheduleRecurrence(frequency=\"Week\", interval=1, week_days=[\"Monday\"], time_of_day=\"00:00\")\nweekly_schedule = Schedule.create(ws, name=\"weekly-diabetes-training\", \n description=\"Based on time\",\n pipeline_id=published_pipeline.id, \n experiment_name='mslearn-diabetes-pipeline', \n recurrence=recurrence)\nprint('Pipeline scheduled.')",
"_____no_output_____"
]
],
[
[
"You can retrieve the schedules that are defined in the workspace like this:",
"_____no_output_____"
]
],
[
[
"schedules = Schedule.list(ws)\nschedules",
"_____no_output_____"
]
],
[
[
"You can check the latest run like this:",
"_____no_output_____"
]
],
[
[
"pipeline_experiment = ws.experiments.get('mslearn-diabetes-pipeline')\nlatest_run = list(pipeline_experiment.get_runs())[0]\n\nlatest_run.get_details()",
"_____no_output_____"
]
],
[
[
"This is a simple example, designed to demonstrate the principle. In reality, you could build more sophisticated logic into the pipeline steps - for example, evaluating the model against some test data to calculate a performance metric like AUC or accuracy, comparing the metric to that of any previously registered versions of the model, and only registering the new model if it performs better.\n\nYou can use the [Azure Machine Learning extension for Azure DevOps](https://marketplace.visualstudio.com/items?itemName=ms-air-aiagility.vss-services-azureml) to combine Azure ML pipelines with Azure DevOps pipelines (yes, it *is* confusing that they have the same name!) and integrate model retraining into a *continuous integration/continuous deployment (CI/CD)* process. For example you could use an Azure DevOps *build* pipeline to trigger an Azure ML pipeline that trains and registers a model, and when the model is registered it could trigger an Azure Devops *release* pipeline that deploys the model as a web service, along with the application or service that consumes the model.",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
]
|
ec64ad06485b2a9e621a11af89803c5a45050d24 | 5,553 | ipynb | Jupyter Notebook | HX_sciann.ipynb | smyng91/DeepHX | 0b3a891a0e4e5ede0c1af46353c5b2d38935e7b7 | [
"Apache-2.0"
]
| null | null | null | HX_sciann.ipynb | smyng91/DeepHX | 0b3a891a0e4e5ede0c1af46353c5b2d38935e7b7 | [
"Apache-2.0"
]
| null | null | null | HX_sciann.ipynb | smyng91/DeepHX | 0b3a891a0e4e5ede0c1af46353c5b2d38935e7b7 | [
"Apache-2.0"
]
| null | null | null | 40.532847 | 1,242 | 0.562759 | [
[
[
"\nimport numpy as np \nimport sciann as sn \nimport matplotlib.pyplot as plt \nimport scipy.io\nfrom sciann.utils.math import diff, sign, sin\n\n\nR = 0.1\nVc = 1\nVh = 1\n\nx = sn.Variable(\"x\", dtype='float64')\ny = sn.Variable(\"y\", dtype='float64')\nt = sn.Variable(\"t\", dtype='float64')\n\ntheta_w = sn.Functional(\"theta_w\", [t], 4*[20], 'tanh')\ntheta_h = sn.Functional(\"theta_h\", [x, t], 8*[20], 'tanh')\ntheta_c = sn.Functional(\"theta_c\", [y, t], 8*[20], 'tanh')\n\ntheta_w_t = sn.diff(theta_w, t)\ntheta_h_t = sn.diff(theta_h, t); theta_h_x = sn.diff(theta_h, x); \ntheta_c_t = sn.diff(theta_c, t); theta_c_y = sn.diff(theta_c, y); \n\n# define governing equations\nL1 = theta_h_t - R/Vh*(theta_w-theta_h-theta_h_x)\nL2 = theta_c_t - 1/Vc*(theta_w-theta_c-theta_c_y)\nL3 = theta_w_t - theta_c - R*theta_h + (1+R)*theta_w\n# initial conditions\nTOL = 0.001\nC1 = (1-sign(t - TOL)) * (theta_w)\nC2 = (1-sign(t - TOL)) * (theta_h)\nC3 = (1-sign(t - TOL)) * (theta_c)\n# boundary conditions\nC4 = (1-sign(x - TOL)) * (theta_h - 1.)\nC5 = (1-sign(y - TOL)) * (theta_c_y)\nC6 = (1+sign(x - ( 1-TOL))) * (theta_h_x)\nC7 = (1+sign(y - ( 1-TOL))) * (theta_c)\n\n# Define the optimization model (set of inputs and constraints)\nm = sn.SciModel([x, y, t], [L1, L2, L3, C1, C2, C3, C4, C5, C6, C7])\nx_data, y_data, t_data = np.meshgrid(\n np.linspace(0, 1, 100), \n np.linspace(0, 1, 100),\n np.linspace(0, 1, 100),\n)\n\nh = m.train([x_data, y_data, t_data], 10*['zero'], \n epochs=10000,\n shuffle=True,\n batch_size=100,\n verbose=1,\n learning_rate=1e-3,\n reduce_lr_after=100,\n stop_loss_value=1e-8\n )\n\nx_test, y_test, t_test = np.meshgrid(\n np.linspace(0, 1, 200), \n np.linspace(0, 1, 200), \n np.linspace(0, 1, 200)\n)\ntheta_h_pred = theta_h.eval(m, [x_test, y_test, t_test])\ntheta_c_pred = theta_c.eval(m, [x_test, y_test, t_test])\ntheta_w_pred = theta_w.eval(m, [x_test, y_test, t_test])\n\nfig = plt.figure(figsize=(3, 4))\nplt.pcolor(x_test, t_test, theta_h_pred, cmap='seismic')\nplt.xlabel('x')\nplt.ylabel('t')\n\nplt.colorbar()\n\n# model.save_weights('trained-navier-stokes.hdf5')\n",
"---------------------- SCIANN 0.6.0.4 ---------------------- \nFor details, check out our review paper and the documentation at: \n + \"https://arxiv.org/abs/2005.08803\", \n + \"https://www.sciann.com\". \n\n"
]
]
]
| [
"code"
]
| [
[
"code"
]
]
|
ec64c05a9bae1fee72d335e31e2212f24ddc01f8 | 204,052 | ipynb | Jupyter Notebook | notebooks/lda_chapters.ipynb | D2KLab/ToModAPI | 6562c39a451d626ab4bfe6b3b47eda625b6afd8c | [
"Apache-2.0"
]
| 25 | 2020-11-19T16:34:32.000Z | 2022-03-03T02:57:39.000Z | notebooks/lda_chapters.ipynb | D2KLab/Topic-Model-API | 6562c39a451d626ab4bfe6b3b47eda625b6afd8c | [
"Apache-2.0"
]
| 10 | 2020-04-10T07:54:02.000Z | 2020-10-07T07:16:23.000Z | notebooks/lda_chapters.ipynb | D2KLab/Topic-Model-API | 6562c39a451d626ab4bfe6b3b47eda625b6afd8c | [
"Apache-2.0"
]
| 1 | 2021-05-24T10:34:43.000Z | 2021-05-24T10:34:43.000Z | 54.867437 | 50,928 | 0.599896 | [
[
[
"!sudo apt update -y\n!pip install nltk\n!pip install gensim\n!sudo apt-get install default-jre -y",
"Get:1 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB]\nGet:2 http://archive.ubuntu.com/ubuntu bionic InRelease [242 kB]\nGet:3 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [88.7 kB]33m\nGet:4 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [74.6 kB]\nGet:5 http://security.ubuntu.com/ubuntu bionic-security/restricted amd64 Packages [19.2 kB]\nGet:6 http://security.ubuntu.com/ubuntu bionic-security/main amd64 Packages [761 kB]\nGet:7 http://archive.ubuntu.com/ubuntu bionic/restricted amd64 Packages [13.5 kB]\nGet:8 http://archive.ubuntu.com/ubuntu bionic/main amd64 Packages [1,344 kB] \u001b[0m\u001b[33m\u001b[33m\nGet:9 http://security.ubuntu.com/ubuntu bionic-security/multiverse amd64 Packages [6,781 B]3m\u001b[33m\u001b[33m\nGet:10 http://security.ubuntu.com/ubuntu bionic-security/universe amd64 Packages [795 kB]\nGet:11 http://archive.ubuntu.com/ubuntu bionic/multiverse amd64 Packages [186 kB]0m\u001b[33m\u001b[33m\u001b[33m\nGet:12 http://archive.ubuntu.com/ubuntu bionic/universe amd64 Packages [11.3 MB][0m\u001b[33m\u001b[33m\nGet:13 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 Packages [1,057 kB]33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\u001b[33m\nGet:14 http://archive.ubuntu.com/ubuntu bionic-updates/multiverse amd64 Packages [10.5 kB]33m\u001b[33m\nGet:15 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 Packages [1,322 kB][33m\nGet:16 http://archive.ubuntu.com/ubuntu bionic-updates/restricted amd64 Packages [32.7 kB]33m\u001b[33m\u001b[33m\nGet:17 http://archive.ubuntu.com/ubuntu bionic-backports/universe amd64 Packages [4,244 B]33m\nGet:18 http://archive.ubuntu.com/ubuntu bionic-backports/main amd64 Packages [2,496 B]3m\nFetched 17.4 MB in 19s (906 kB/s) \u001b[0m\u001b[33m\u001b[33m\nReading package lists... Done\nBuilding dependency tree \nReading state information... Done\n65 packages can be upgraded. Run 'apt list --upgradable' to see them.\nCollecting nltk\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/f6/1d/d925cfb4f324ede997f6d47bea4d9babba51b49e87a767c170b77005889d/nltk-3.4.5.zip (1.5MB)\n\u001b[K |████████████████████████████████| 1.5MB 858kB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: six in /opt/conda/lib/python3.7/site-packages (from nltk) (1.12.0)\nBuilding wheels for collected packages: nltk\n Building wheel for nltk (setup.py) ... \u001b[?25ldone\n\u001b[?25h Created wheel for nltk: filename=nltk-3.4.5-cp37-none-any.whl size=1449906 sha256=8e0e261ba3de912f88cce75cbd8052d7b5bac8c5d9ed4cbb045d137ef4b8ec3a\n Stored in directory: /home/jovyan/.cache/pip/wheels/96/86/f6/68ab24c23f207c0077381a5e3904b2815136b879538a24b483\nSuccessfully built nltk\nInstalling collected packages: nltk\nSuccessfully installed nltk-3.4.5\nCollecting gensim\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/44/93/c6011037f24e3106d13f3be55297bf84ece2bf15b278cc4776339dc52db5/gensim-3.8.1-cp37-cp37m-manylinux1_x86_64.whl (24.2MB)\n\u001b[K |████████████████████████████████| 24.2MB 908kB/s eta 0:00:01 |█████████████████████████▌ | 19.3MB 1.2MB/s eta 0:00:04\n\u001b[?25hRequirement already satisfied: scipy>=0.18.1 in /opt/conda/lib/python3.7/site-packages (from gensim) (1.3.1)\nCollecting smart-open>=1.8.1 (from gensim)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/0c/09/735f2786dfac9bbf39d244ce75c0313d27d4962e71e0774750dc809f2395/smart_open-1.9.0.tar.gz (70kB)\n\u001b[K |████████████████████████████████| 71kB 1.2MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: numpy>=1.11.3 in /opt/conda/lib/python3.7/site-packages (from gensim) (1.17.2)\nRequirement already satisfied: six>=1.5.0 in /opt/conda/lib/python3.7/site-packages (from gensim) (1.12.0)\nCollecting boto>=2.32 (from smart-open>=1.8.1->gensim)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/23/10/c0b78c27298029e4454a472a1919bde20cb182dab1662cec7f2ca1dcc523/boto-2.49.0-py2.py3-none-any.whl (1.4MB)\n\u001b[K |████████████████████████████████| 1.4MB 146kB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: requests in /opt/conda/lib/python3.7/site-packages (from smart-open>=1.8.1->gensim) (2.22.0)\nCollecting boto3 (from smart-open>=1.8.1->gensim)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/f9/01/1c749dc1bca8dda969f5fe0ba16fa6d24c6bd96572d118f790773c54a636/boto3-1.10.45-py2.py3-none-any.whl (128kB)\n\u001b[K |████████████████████████████████| 133kB 912kB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: idna<2.9,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests->smart-open>=1.8.1->gensim) (2.8)\nRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests->smart-open>=1.8.1->gensim) (3.0.4)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests->smart-open>=1.8.1->gensim) (1.25.3)\nRequirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests->smart-open>=1.8.1->gensim) (2019.6.16)\nCollecting botocore<1.14.0,>=1.13.45 (from boto3->smart-open>=1.8.1->gensim)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/96/22/9f8201d900956e57a9811e1b1c91c9f76c87487c76f636c2df1ce8379c38/botocore-1.13.45-py2.py3-none-any.whl (5.9MB)\n\u001b[K |████████████████████████████████| 5.9MB 1.2MB/s eta 0:00:01\n\u001b[?25hCollecting s3transfer<0.3.0,>=0.2.0 (from boto3->smart-open>=1.8.1->gensim)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/16/8a/1fc3dba0c4923c2a76e1ff0d52b305c44606da63f718d14d3231e21c51b0/s3transfer-0.2.1-py2.py3-none-any.whl (70kB)\n\u001b[K |████████████████████████████████| 71kB 1.3MB/s eta 0:00:01\n\u001b[?25hCollecting jmespath<1.0.0,>=0.7.1 (from boto3->smart-open>=1.8.1->gensim)\n Downloading https://files.pythonhosted.org/packages/83/94/7179c3832a6d45b266ddb2aac329e101367fbdb11f425f13771d27f225bb/jmespath-0.9.4-py2.py3-none-any.whl\nRequirement already satisfied: python-dateutil<3.0.0,>=2.1; python_version >= \"2.7\" in /opt/conda/lib/python3.7/site-packages (from botocore<1.14.0,>=1.13.45->boto3->smart-open>=1.8.1->gensim) (2.8.0)\nCollecting docutils<0.16,>=0.10 (from botocore<1.14.0,>=1.13.45->boto3->smart-open>=1.8.1->gensim)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/22/cd/a6aa959dca619918ccb55023b4cb151949c64d4d5d55b3f4ffd7eee0c6e8/docutils-0.15.2-py3-none-any.whl (547kB)\n\u001b[K |████████████████████████████████| 552kB 637kB/s eta 0:00:01\n\u001b[?25hBuilding wheels for collected packages: smart-open\n Building wheel for smart-open (setup.py) ... \u001b[?25ldone\n\u001b[?25h Created wheel for smart-open: filename=smart_open-1.9.0-cp37-none-any.whl size=73088 sha256=3cd52af2cbd23b8196ac97ccbb0569d5f723b33937b8ca16467194e60dd8e29c\n Stored in directory: /home/jovyan/.cache/pip/wheels/ab/10/93/5cff86f5b721d77edaecc29959b1c60d894be1f66d91407d28\nSuccessfully built smart-open\nInstalling collected packages: boto, docutils, jmespath, botocore, s3transfer, boto3, smart-open, gensim\nSuccessfully installed boto-2.49.0 boto3-1.10.45 botocore-1.13.45 docutils-0.15.2 gensim-3.8.1 jmespath-0.9.4 s3transfer-0.2.1 smart-open-1.9.0\nReading package lists... Done\nBuilding dependency tree \nReading state information... Done\nThe following additional packages will be installed:\n ca-certificates-java default-jre-headless java-common libatk-wrapper-java\n libatk-wrapper-java-jni libfontenc1 libpcsclite1 libxmuu1 libxtst6\n libxxf86dga1 openjdk-11-jre openjdk-11-jre-headless x11-utils\nSuggested packages:\n pcscd libnss-mdns fonts-ipafont-gothic fonts-ipafont-mincho\n fonts-wqy-microhei | fonts-wqy-zenhei fonts-indic mesa-utils\nThe following NEW packages will be installed:\n ca-certificates-java default-jre default-jre-headless java-common\n libatk-wrapper-java libatk-wrapper-java-jni libfontenc1 libpcsclite1\n libxmuu1 libxtst6 libxxf86dga1 openjdk-11-jre openjdk-11-jre-headless\n x11-utils\n0 upgraded, 14 newly installed, 0 to remove and 65 not upgraded.\nNeed to get 37.9 MB of archives.\nAfter this operation, 172 MB of additional disk space will be used.\nGet:1 http://archive.ubuntu.com/ubuntu bionic/main amd64 libxxf86dga1 amd64 2:1.1.4-1 [13.7 kB]\nGet:2 http://archive.ubuntu.com/ubuntu bionic/main amd64 libxmuu1 amd64 2:1.1.2-2 [9,674 B]\nGet:3 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 java-common all 0.68ubuntu1~18.04.1 [14.5 kB]\nGet:4 http://archive.ubuntu.com/ubuntu bionic/main amd64 libpcsclite1 amd64 1.8.23-1 [21.3 kB]\nGet:5 http://archive.ubuntu.com/ubuntu bionic/main amd64 libxtst6 amd64 2:1.2.3-1 [12.8 kB]\nGet:6 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 openjdk-11-jre-headless amd64 11.0.5+10-0ubuntu1.1~18.04 [37.5 MB]\nGet:7 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 default-jre-headless amd64 2:1.11-68ubuntu1~18.04.1 [10.9 kB]\nGet:8 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 ca-certificates-java all 20180516ubuntu1~18.04.1 [12.2 kB]\nGet:9 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 openjdk-11-jre amd64 11.0.5+10-0ubuntu1.1~18.04 [34.4 kB]\nGet:10 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 default-jre amd64 2:1.11-68ubuntu1~18.04.1 [1,076 B]\nGet:11 http://archive.ubuntu.com/ubuntu bionic/main amd64 libfontenc1 amd64 1:1.1.3-1 [13.9 kB]\nGet:12 http://archive.ubuntu.com/ubuntu bionic/main amd64 x11-utils amd64 7.7+3build1 [196 kB]\nGet:13 http://archive.ubuntu.com/ubuntu bionic/main amd64 libatk-wrapper-java all 0.33.3-20ubuntu0.1 [34.7 kB]\nGet:14 http://archive.ubuntu.com/ubuntu bionic/main amd64 libatk-wrapper-java-jni amd64 0.33.3-20ubuntu0.1 [28.3 kB]\nFetched 37.9 MB in 39s (967 kB/s) \ndebconf: delaying package configuration, since apt-utils is not installed\nSelecting previously unselected package libxxf86dga1:amd64.\n(Reading database ... 119234 files and directories currently installed.)\nPreparing to unpack .../00-libxxf86dga1_2%3a1.1.4-1_amd64.deb ...\nUnpacking libxxf86dga1:amd64 (2:1.1.4-1) ...\nSelecting previously unselected package libxmuu1:amd64.\nPreparing to unpack .../01-libxmuu1_2%3a1.1.2-2_amd64.deb ...\nUnpacking libxmuu1:amd64 (2:1.1.2-2) ...\nSelecting previously unselected package java-common.\nPreparing to unpack .../02-java-common_0.68ubuntu1~18.04.1_all.deb ...\nUnpacking java-common (0.68ubuntu1~18.04.1) ...\nSelecting previously unselected package libpcsclite1:amd64.\nPreparing to unpack .../03-libpcsclite1_1.8.23-1_amd64.deb ...\nUnpacking libpcsclite1:amd64 (1.8.23-1) ...\nSelecting previously unselected package libxtst6:amd64.\nPreparing to unpack .../04-libxtst6_2%3a1.2.3-1_amd64.deb ...\nUnpacking libxtst6:amd64 (2:1.2.3-1) ...\nSelecting previously unselected package openjdk-11-jre-headless:amd64.\nPreparing to unpack .../05-openjdk-11-jre-headless_11.0.5+10-0ubuntu1.1~18.04_amd64.deb ...\nUnpacking openjdk-11-jre-headless:amd64 (11.0.5+10-0ubuntu1.1~18.04) ...\nSelecting previously unselected package default-jre-headless.\nPreparing to unpack .../06-default-jre-headless_2%3a1.11-68ubuntu1~18.04.1_amd64.deb ...\nUnpacking default-jre-headless (2:1.11-68ubuntu1~18.04.1) ...\nSelecting previously unselected package ca-certificates-java.\nPreparing to unpack .../07-ca-certificates-java_20180516ubuntu1~18.04.1_all.deb ...\nUnpacking ca-certificates-java (20180516ubuntu1~18.04.1) ...\nSelecting previously unselected package openjdk-11-jre:amd64.\nPreparing to unpack .../08-openjdk-11-jre_11.0.5+10-0ubuntu1.1~18.04_amd64.deb ...\nUnpacking openjdk-11-jre:amd64 (11.0.5+10-0ubuntu1.1~18.04) ...\nSelecting previously unselected package default-jre.\nPreparing to unpack .../09-default-jre_2%3a1.11-68ubuntu1~18.04.1_amd64.deb ...\nUnpacking default-jre (2:1.11-68ubuntu1~18.04.1) ...\nSelecting previously unselected package libfontenc1:amd64.\nPreparing to unpack .../10-libfontenc1_1%3a1.1.3-1_amd64.deb ...\nUnpacking libfontenc1:amd64 (1:1.1.3-1) ...\nSelecting previously unselected package x11-utils.\nPreparing to unpack .../11-x11-utils_7.7+3build1_amd64.deb ...\nUnpacking x11-utils (7.7+3build1) ...\nSelecting previously unselected package libatk-wrapper-java.\nPreparing to unpack .../12-libatk-wrapper-java_0.33.3-20ubuntu0.1_all.deb ...\nUnpacking libatk-wrapper-java (0.33.3-20ubuntu0.1) ...\nSelecting previously unselected package libatk-wrapper-java-jni:amd64.\nPreparing to unpack .../13-libatk-wrapper-java-jni_0.33.3-20ubuntu0.1_amd64.deb ...\nUnpacking libatk-wrapper-java-jni:amd64 (0.33.3-20ubuntu0.1) ...\nProcessing triggers for mime-support (3.60ubuntu1) ...\nSetting up libpcsclite1:amd64 (1.8.23-1) ...\nSetting up java-common (0.68ubuntu1~18.04.1) ...\nSetting up libxmuu1:amd64 (2:1.1.2-2) ...\nSetting up libxtst6:amd64 (2:1.2.3-1) ...\nSetting up libxxf86dga1:amd64 (2:1.1.4-1) ...\nProcessing triggers for libc-bin (2.27-3ubuntu1) ...\nSetting up libfontenc1:amd64 (1:1.1.3-1) ...\nProcessing triggers for ca-certificates (20180409) ...\nUpdating certificates in /etc/ssl/certs...\n0 added, 0 removed; done.\nRunning hooks in /etc/ca-certificates/update.d...\ndone.\nProcessing triggers for hicolor-icon-theme (0.17-2) ...\nSetting up x11-utils (7.7+3build1) ...\nSetting up libatk-wrapper-java (0.33.3-20ubuntu0.1) ...\nSetting up libatk-wrapper-java-jni:amd64 (0.33.3-20ubuntu0.1) ...\nSetting up openjdk-11-jre-headless:amd64 (11.0.5+10-0ubuntu1.1~18.04) ...\nupdate-alternatives: using /usr/lib/jvm/java-11-openjdk-amd64/bin/rmid to provide /usr/bin/rmid (rmid) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-11-openjdk-amd64/bin/java to provide /usr/bin/java (java) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-11-openjdk-amd64/bin/keytool to provide /usr/bin/keytool (keytool) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-11-openjdk-amd64/bin/jjs to provide /usr/bin/jjs (jjs) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-11-openjdk-amd64/bin/pack200 to provide /usr/bin/pack200 (pack200) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-11-openjdk-amd64/bin/rmiregistry to provide /usr/bin/rmiregistry (rmiregistry) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-11-openjdk-amd64/bin/unpack200 to provide /usr/bin/unpack200 (unpack200) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-11-openjdk-amd64/lib/jexec to provide /usr/bin/jexec (jexec) in auto mode\nSetting up default-jre-headless (2:1.11-68ubuntu1~18.04.1) ...\nSetting up openjdk-11-jre:amd64 (11.0.5+10-0ubuntu1.1~18.04) ...\nSetting up ca-certificates-java (20180516ubuntu1~18.04.1) ...\nhead: cannot open '/etc/ssl/certs/java/cacerts' for reading: No such file or directory\nAdding debian:QuoVadis_Root_CA_3_G3.pem\nAdding debian:DigiCert_Global_Root_G2.pem\nAdding debian:SwissSign_Silver_CA_-_G2.pem\nAdding debian:Amazon_Root_CA_4.pem\nAdding debian:AC_RAIZ_FNMT-RCM.pem\nAdding debian:SecureTrust_CA.pem\nAdding debian:Trustis_FPS_Root_CA.pem\nAdding debian:Entrust.net_Premium_2048_Secure_Server_CA.pem\nAdding debian:SecureSign_RootCA11.pem\nAdding debian:COMODO_RSA_Certification_Authority.pem\nAdding debian:GlobalSign_Root_CA_-_R3.pem\nAdding debian:QuoVadis_Root_CA_1_G3.pem\nAdding debian:Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem\nAdding debian:SSL.com_EV_Root_Certification_Authority_ECC.pem\nAdding debian:SZAFIR_ROOT_CA2.pem\nAdding debian:DigiCert_High_Assurance_EV_Root_CA.pem\nAdding debian:Deutsche_Telekom_Root_CA_2.pem\nAdding debian:QuoVadis_Root_CA.pem\nAdding debian:T-TeleSec_GlobalRoot_Class_2.pem\nAdding debian:Microsec_e-Szigno_Root_CA_2009.pem\nAdding debian:SwissSign_Gold_CA_-_G2.pem\nAdding debian:DigiCert_Assured_ID_Root_G3.pem\nAdding debian:Certigna.pem\nAdding debian:GlobalSign_Root_CA_-_R2.pem\nAdding debian:ISRG_Root_X1.pem\nAdding debian:AffirmTrust_Commercial.pem\nAdding debian:Atos_TrustedRoot_2011.pem\nAdding debian:AffirmTrust_Premium_ECC.pem\nAdding debian:VeriSign_Class_3_Public_Primary_Certification_Authority_-_G5.pem\nAdding debian:GeoTrust_Primary_Certification_Authority_-_G2.pem\nAdding debian:CFCA_EV_ROOT.pem\nAdding debian:EE_Certification_Centre_Root_CA.pem\nAdding debian:VeriSign_Class_3_Public_Primary_Certification_Authority_-_G4.pem\nAdding debian:TÜRKTRUST_Elektronik_Sertifika_Hizmet_Sağlayıcısı_H5.pem\nAdding debian:OISTE_WISeKey_Global_Root_GA_CA.pem\nAdding debian:Staat_der_Nederlanden_EV_Root_CA.pem\nAdding debian:Staat_der_Nederlanden_Root_CA_-_G2.pem\nAdding debian:Go_Daddy_Class_2_CA.pem\nAdding debian:Certplus_Class_2_Primary_CA.pem\nAdding debian:Cybertrust_Global_Root.pem\nAdding debian:Actalis_Authentication_Root_CA.pem\nAdding debian:Taiwan_GRCA.pem\nAdding debian:QuoVadis_Root_CA_3.pem\nAdding debian:Buypass_Class_3_Root_CA.pem\nAdding debian:SSL.com_Root_Certification_Authority_RSA.pem\nAdding debian:DigiCert_Trusted_Root_G4.pem\nAdding debian:OpenTrust_Root_CA_G1.pem\nAdding debian:Buypass_Class_2_Root_CA.pem\nAdding debian:DigiCert_Global_Root_G3.pem\nAdding debian:Starfield_Class_2_CA.pem\nAdding debian:Chambers_of_Commerce_Root_-_2008.pem\nAdding debian:COMODO_Certification_Authority.pem\n"
],
[
"import pandas as pd\nimport numpy as np\nimport pickle\nfrom os import path\nimport re\nimport nltk\nimport gensim\nimport time\nimport matplotlib.pyplot as plt\nnltk.download('stopwords')\nnltk.download('wordnet')",
"[nltk_data] Downloading package stopwords to /home/jovyan/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n[nltk_data] Downloading package wordnet to /home/jovyan/nltk_data...\n[nltk_data] Package wordnet is already up-to-date!\n"
],
[
"import pickle\nfrom os import path\nwith open(path.expanduser('~/work/data/talk_meta.pkl'), \"rb\") as input_file:\n talk_meta = pickle.load(input_file)\n \nwith open(path.expanduser('~/work/data/talk_sub.pkl'), \"rb\") as input_file:\n talk_sub = pickle.load(input_file)",
"_____no_output_____"
],
[
"talk_sub[(talk_sub.talk_id == 1) & (talk_sub.id==1)]['transcript'].values",
"_____no_output_____"
],
[
"talk_sub.groupby('talk_id').count().sort_values('transcript')",
"_____no_output_____"
],
[
"lem = nltk.stem.WordNetLemmatizer()\ndef prepare_text(text):\n text = re.sub(r'\\((.*?)\\)', ' ', text)\n text = re.sub(r'\\d+', '', text)\n text = nltk.tokenize.RegexpTokenizer(r'\\w+').tokenize(text.lower())\n text = [w for w in text if w not in nltk.corpus.stopwords.words('english')]\n text = [lem.lemmatize(w) for w in text]\n text = [w for w in text if len(w)>=3]\n return text",
"_____no_output_____"
],
[
"extra_keywords = []\ntalk_pre = talk_sub.copy()\ntalk_pre['transcript'] = talk_pre['transcript'].apply(lambda x: prepare_text(x))\ntalk_pre['transcript'] = talk_pre['transcript'].apply(lambda x: ' '.join(x))",
"_____no_output_____"
],
[
"data_words = talk_pre['transcript'].apply(lambda x: x.split()).values\n\nid2word = gensim.corpora.Dictionary(data_words)\nid2word.filter_n_most_frequent(20)\n\n\ncorpus = [id2word.doc2bow(text) for text in data_words]",
"_____no_output_____"
],
[
"id2word.token2id",
"_____no_output_____"
],
[
"def filter_text(text, tokens):\n filtered_text = []\n for word in text.split():\n if word in tokens:\n filtered_text.append(word)\n return filtered_text",
"_____no_output_____"
],
[
"talk_pre['transcript'] = talk_pre.transcript.apply(lambda x: filter_text(x, id2word.token2id))\ntalk_pre['transcript'] = talk_pre.transcript.apply(lambda x: ' '.join(x))",
"_____no_output_____"
],
[
"talk_pre['length'] = talk_pre['transcript'].apply(lambda x: len(x.split()))",
"_____no_output_____"
],
[
"talk_pre = talk_pre[talk_pre.length >= 5]",
"_____no_output_____"
],
[
"len(id2word)",
"_____no_output_____"
],
[
"data_words = talk_pre['transcript'].apply(lambda x: x.split()).values\n\nid2word = gensim.corpora.Dictionary(data_words)\n\ncorpus = [id2word.doc2bow(text) for text in data_words]",
"_____no_output_____"
],
[
"len(id2word)",
"_____no_output_____"
],
[
"with open(path.expanduser('~/work/data/talk_pre.pkl'), 'wb') as output:\n pickle.dump(talk_pre, output, pickle.HIGHEST_PROTOCOL)",
"_____no_output_____"
],
[
"with open(path.expanduser('~/work/data/talk_pre.pkl'), \"rb\") as input_file:\n talk_pre = pickle.load(input_file)",
"_____no_output_____"
],
[
"talk_pre['length'].min()",
"_____no_output_____"
],
[
"def compute_coherence_values(dictionary, corpus, texts, limit, start=2, step=3):\n \"\"\"\n Compute c_v coherence for various number of topics\n\n Parameters:\n ----------\n dictionary : Gensim dictionary\n corpus : Gensim corpus\n texts : List of input texts\n limit : Max num of topics\n\n Returns:\n -------\n model_list : List of LDA topic models\n coherence_values : Coherence values corresponding to the LDA model with respective number of topics\n \"\"\"\n coherence_values = []\n model_list = []\n for num_topics in range(start, limit, step):\n start = time.time()\n mallet_path = path.expanduser('~/work/modules/mallet-2.0.8/bin/mallet')\n model = gensim.models.wrappers.LdaMallet(mallet_path, corpus=corpus, num_topics=num_topics, id2word=id2word, random_seed = 5, alpha = 0.1, iterations = 800, optimize_interval = 10)\n dur = time.time() - start\n print(\"Time Elapsed\",dur,'s')\n model_list.append(model)\n coherencemodel = gensim.models.CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')\n coherence_per_topic = coherencemodel.get_coherence_per_topic()\n coherence_values.append((min(coherence_per_topic),np.mean(coherence_per_topic)))\n print('No. Topics ', num_topics,' - Mean Coherence ',np.mean(coherence_per_topic),' - Min Coherence ',min(coherence_per_topic))\n\n return model_list, coherence_values",
"_____no_output_____"
],
[
"limit=66\nstart=5\nstep=5\n# Can take a long time to run.\nmodel_list, coherence_values = compute_coherence_values(dictionary=id2word, corpus=corpus, texts=data_words, start=start, limit=limit, step=step)",
"Time Elapsed 109.08038473129272 s\nNo. Topics 5 - Mean Coherence 0.45678589932822466 - Min Coherence 0.3818414899064245\nTime Elapsed 133.3303542137146 s\nNo. Topics 10 - Mean Coherence 0.49149079011935254 - Min Coherence 0.40242422414715645\nTime Elapsed 150.06712174415588 s\nNo. Topics 15 - Mean Coherence 0.5200666809904619 - Min Coherence 0.40917280671113965\nTime Elapsed 152.27088117599487 s\nNo. Topics 20 - Mean Coherence 0.5222271483209874 - Min Coherence 0.3709827697437483\nTime Elapsed 158.40461993217468 s\nNo. Topics 25 - Mean Coherence 0.5349924339857826 - Min Coherence 0.42738794499061095\nTime Elapsed 173.73261857032776 s\nNo. Topics 30 - Mean Coherence 0.5161328839678657 - Min Coherence 0.3749419431018066\nTime Elapsed 185.2719020843506 s\nNo. Topics 35 - Mean Coherence 0.5362783406217556 - Min Coherence 0.2899994791925109\nTime Elapsed 176.05911684036255 s\nNo. Topics 40 - Mean Coherence 0.5319178976421575 - Min Coherence 0.37050387101085286\nTime Elapsed 180.17695331573486 s\nNo. Topics 45 - Mean Coherence 0.5268677901465988 - Min Coherence 0.2769223810119311\nTime Elapsed 183.63627934455872 s\nNo. Topics 50 - Mean Coherence 0.5321272684899013 - Min Coherence 0.3696117276255807\nTime Elapsed 215.98038363456726 s\nNo. Topics 55 - Mean Coherence 0.5236254925730359 - Min Coherence 0.23946437881848065\nTime Elapsed 214.92591381072998 s\nNo. Topics 60 - Mean Coherence 0.5165729040001038 - Min Coherence 0.30302497342257084\nTime Elapsed 191.7466721534729 s\nNo. Topics 65 - Mean Coherence 0.5179176025091674 - Min Coherence 0.2694756302351193\n"
],
[
"# Show graph\nplt.figure(figsize=(15,10))\nx = range(start, limit, step)\ncoherence_means, coherence_mins = zip(*coherence_values)\nplt.plot(x, coherence_means)\nplt.plot(x, coherence_mins)\nplt.xlabel(\"Num Topics\")\nplt.ylabel(\"Coherence score\")\nplt.legend(['Minimum Coherence', 'Mean Coherence'], loc='best')\nplt.show()",
"_____no_output_____"
],
[
"num_topics = 25\nstart = time.time()\nmallet_path = path.expanduser('~/work/modules/mallet-2.0.8/bin/mallet') # update this path\n# prefix = path.expanduser('~/work/jupyter_notebooks/notebooks/mallet-dep/')\nmodel = gensim.models.wrappers.LdaMallet(mallet_path, corpus=corpus, num_topics=num_topics, id2word=id2word, random_seed = 5, alpha = 0.1, iterations = 800, optimize_interval = 10)\n\ndur = time.time() - start\n\nprint(\"Time Elapsed\",dur,'s')\n\ncoherence_model_ldamallet = gensim.models.CoherenceModel(model=model, texts=data_words, dictionary=id2word, coherence='c_v')\ncoherence_ldamallet = coherence_model_ldamallet.get_coherence()\nprint('\\nCoherence Score: ', coherence_ldamallet, '\\n\\n')\n\ncoherence_ldamallet_per_topic = coherence_model_ldamallet.get_coherence_per_topic()\n# Show Topics\nfor i in np.argsort(coherence_ldamallet_per_topic):\n print('Topic', i, '- Coherence', coherence_ldamallet_per_topic[i], '\\n' ,model.print_topic(i, topn=10),'\\n')",
"Time Elapsed 163.69370222091675 s\n\nCoherence Score: 0.5349924339857826 \n\n\nTopic 21 - Coherence 0.42738794499061095 \n 0.019*\"art\" + 0.012*\"film\" + 0.012*\"show\" + 0.011*\"image\" + 0.011*\"work\" + 0.010*\"artist\" + 0.010*\"kind\" + 0.009*\"movie\" + 0.009*\"made\" + 0.009*\"picture\" \n\nTopic 5 - Coherence 0.43114296442641925 \n 0.085*\"brain\" + 0.013*\"neuron\" + 0.010*\"body\" + 0.010*\"memory\" + 0.007*\"sleep\" + 0.007*\"system\" + 0.007*\"cell\" + 0.007*\"human\" + 0.007*\"signal\" + 0.007*\"area\" \n\nTopic 20 - Coherence 0.44130449832094953 \n 0.018*\"question\" + 0.015*\"good\" + 0.011*\"problem\" + 0.011*\"lot\" + 0.010*\"answer\" + 0.010*\"kind\" + 0.009*\"number\" + 0.007*\"study\" + 0.007*\"work\" + 0.007*\"find\" \n\nTopic 2 - Coherence 0.447509440291355 \n 0.019*\"god\" + 0.013*\"book\" + 0.011*\"great\" + 0.010*\"religion\" + 0.010*\"story\" + 0.009*\"century\" + 0.008*\"man\" + 0.008*\"culture\" + 0.008*\"history\" + 0.007*\"called\" \n\nTopic 8 - Coherence 0.45037682707226423 \n 0.029*\"life\" + 0.012*\"feel\" + 0.011*\"story\" + 0.010*\"love\" + 0.009*\"change\" + 0.008*\"experience\" + 0.008*\"human\" + 0.006*\"live\" + 0.006*\"future\" + 0.006*\"talk\" \n\nTopic 15 - Coherence 0.4515220031690628 \n 0.026*\"music\" + 0.024*\"sound\" + 0.023*\"word\" + 0.022*\"language\" + 0.012*\"hear\" + 0.012*\"song\" + 0.011*\"play\" + 0.011*\"voice\" + 0.008*\"english\" + 0.008*\"ted\" \n\nTopic 14 - Coherence 0.45301164934125726 \n 0.021*\"game\" + 0.016*\"robot\" + 0.013*\"video\" + 0.011*\"play\" + 0.009*\"move\" + 0.009*\"computer\" + 0.008*\"show\" + 0.007*\"machine\" + 0.007*\"light\" + 0.006*\"camera\" \n\nTopic 6 - Coherence 0.45577345309992606 \n 0.022*\"data\" + 0.015*\"information\" + 0.013*\"computer\" + 0.012*\"internet\" + 0.012*\"phone\" + 0.008*\"medium\" + 0.008*\"online\" + 0.007*\"book\" + 0.007*\"google\" + 0.006*\"technology\" \n\nTopic 7 - Coherence 0.45656346269897796 \n 0.014*\"hand\" + 0.012*\"back\" + 0.011*\"put\" + 0.008*\"guy\" + 0.008*\"head\" + 0.007*\"eye\" + 0.007*\"good\" + 0.005*\"yeah\" + 0.005*\"face\" + 0.005*\"big\" \n\nTopic 22 - Coherence 0.4747643128733972 \n 0.018*\"technology\" + 0.015*\"work\" + 0.013*\"idea\" + 0.013*\"problem\" + 0.011*\"system\" + 0.010*\"human\" + 0.009*\"design\" + 0.008*\"kind\" + 0.008*\"change\" + 0.006*\"create\" \n\nTopic 11 - Coherence 0.5052255631864517 \n 0.042*\"cell\" + 0.016*\"body\" + 0.011*\"material\" + 0.010*\"structure\" + 0.008*\"molecule\" + 0.008*\"blood\" + 0.008*\"tissue\" + 0.007*\"skin\" + 0.007*\"inside\" + 0.007*\"put\" \n\nTopic 4 - Coherence 0.5187873796550649 \n 0.024*\"day\" + 0.013*\"back\" + 0.011*\"thought\" + 0.010*\"started\" + 0.009*\"life\" + 0.008*\"home\" + 0.008*\"friend\" + 0.008*\"wanted\" + 0.008*\"story\" + 0.007*\"family\" \n\nTopic 0 - Coherence 0.536639193531724 \n 0.024*\"war\" + 0.008*\"police\" + 0.008*\"country\" + 0.008*\"violence\" + 0.007*\"military\" + 0.007*\"refugee\" + 0.006*\"prison\" + 0.006*\"soldier\" + 0.006*\"weapon\" + 0.006*\"conflict\" \n\nTopic 18 - Coherence 0.5387134976614774 \n 0.016*\"water\" + 0.015*\"earth\" + 0.011*\"ice\" + 0.010*\"ocean\" + 0.010*\"planet\" + 0.009*\"mar\" + 0.008*\"sea\" + 0.008*\"mile\" + 0.007*\"mountain\" + 0.007*\"foot\" \n\nTopic 9 - Coherence 0.5416671640778915 \n 0.025*\"food\" + 0.023*\"water\" + 0.020*\"energy\" + 0.011*\"oil\" + 0.010*\"plant\" + 0.009*\"carbon\" + 0.008*\"fuel\" + 0.008*\"eat\" + 0.007*\"lot\" + 0.007*\"power\" \n\nTopic 19 - Coherence 0.5696474644351069 \n 0.019*\"human\" + 0.019*\"gene\" + 0.015*\"dna\" + 0.014*\"specie\" + 0.010*\"virus\" + 0.010*\"genome\" + 0.009*\"animal\" + 0.009*\"genetic\" + 0.008*\"evolution\" + 0.008*\"ant\" \n\nTopic 1 - Coherence 0.5745453288176896 \n 0.047*\"city\" + 0.028*\"car\" + 0.024*\"building\" + 0.012*\"place\" + 0.011*\"street\" + 0.011*\"space\" + 0.008*\"road\" + 0.008*\"york\" + 0.008*\"built\" + 0.006*\"community\" \n\nTopic 10 - Coherence 0.5902639305658107 \n 0.024*\"money\" + 0.023*\"dollar\" + 0.021*\"company\" + 0.015*\"business\" + 0.012*\"cost\" + 0.012*\"market\" + 0.009*\"million\" + 0.009*\"job\" + 0.008*\"pay\" + 0.008*\"work\" \n\nTopic 24 - Coherence 0.6108931773725613 \n 0.076*\"woman\" + 0.029*\"men\" + 0.028*\"child\" + 0.020*\"girl\" + 0.015*\"baby\" + 0.014*\"mother\" + 0.013*\"young\" + 0.013*\"family\" + 0.012*\"sex\" + 0.010*\"black\" \n\nTopic 16 - Coherence 0.6111444838005902 \n 0.032*\"country\" + 0.029*\"percent\" + 0.019*\"africa\" + 0.011*\"state\" + 0.011*\"million\" + 0.011*\"china\" + 0.010*\"population\" + 0.009*\"india\" + 0.009*\"today\" + 0.009*\"number\" \n\nTopic 12 - Coherence 0.6273791440365895 \n 0.049*\"school\" + 0.032*\"kid\" + 0.028*\"student\" + 0.020*\"child\" + 0.018*\"teacher\" + 0.016*\"education\" + 0.011*\"high\" + 0.011*\"learning\" + 0.011*\"class\" + 0.010*\"university\" \n\nTopic 23 - Coherence 0.6432008044636636 \n 0.021*\"animal\" + 0.016*\"tree\" + 0.015*\"fish\" + 0.014*\"forest\" + 0.012*\"ocean\" + 0.012*\"specie\" + 0.008*\"bird\" + 0.008*\"coral\" + 0.007*\"water\" + 0.007*\"shark\" \n\nTopic 17 - Coherence 0.6482671250098597 \n 0.023*\"patient\" + 0.022*\"cancer\" + 0.020*\"disease\" + 0.019*\"health\" + 0.016*\"drug\" + 0.015*\"doctor\" + 0.012*\"care\" + 0.011*\"medical\" + 0.010*\"hospital\" + 0.010*\"treatment\" \n\nTopic 13 - Coherence 0.6744581853530904 \n 0.024*\"universe\" + 0.016*\"light\" + 0.015*\"planet\" + 0.015*\"star\" + 0.014*\"space\" + 0.013*\"earth\" + 0.010*\"galaxy\" + 0.009*\"sun\" + 0.009*\"particle\" + 0.008*\"energy\" \n\nTopic 3 - Coherence 0.6946218513927718 \n 0.015*\"government\" + 0.014*\"state\" + 0.012*\"country\" + 0.010*\"political\" + 0.009*\"law\" + 0.008*\"power\" + 0.008*\"public\" + 0.007*\"democracy\" + 0.007*\"change\" + 0.007*\"society\" \n\n"
],
[
"def format_topics_sentences(ldamodel, corpus, texts):\n # Init output\n sent_topics_df = pd.DataFrame()\n\n # Get main topic in each document\n for i, row in enumerate(ldamodel[corpus]):\n row = sorted(row, key=lambda x: (x[1]), reverse=True)\n # Get the Dominant topic, Perc Contribution and Keywords for each document\n for j, (topic_num, prop_topic) in enumerate(row):\n if j == 0: # => dominant topic\n wp = ldamodel.show_topic(topic_num)\n topic_keywords = \", \".join([word for word, prop in wp])\n sent_topics_df = sent_topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)\n else:\n break\n sent_topics_df.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords']\n\n # Add original text to the end of the output\n contents = pd.Series(texts)\n sent_topics_df = pd.concat([sent_topics_df, contents], axis=1)\n return(sent_topics_df)",
"_____no_output_____"
],
[
"df_topic_sents_keywords = format_topics_sentences(ldamodel=model, corpus=corpus, texts=talk_pre['transcript'].values)\n\n# Format\ndf_dominant_topic = df_topic_sents_keywords.reset_index()\ndf_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']\n\n# Show\ndf_dominant_topic.head(10)",
"_____no_output_____"
],
[
"# Group top 5 sentences under each topic\nsent_topics_sorteddf_mallet = pd.DataFrame()\n\nsent_topics_outdf_grpd = df_topic_sents_keywords.groupby('Dominant_Topic')\n\nfor i, grp in sent_topics_outdf_grpd:\n sent_topics_sorteddf_mallet = pd.concat([sent_topics_sorteddf_mallet, \n grp.sort_values(['Perc_Contribution'], ascending=[0]).head(1)], \n axis=0)\n\n# Reset Index \nsent_topics_sorteddf_mallet.reset_index(drop=True, inplace=True)\n\n# Format\nsent_topics_sorteddf_mallet.columns = ['Topic_Num', \"Topic_Perc_Contrib\", \"Keywords\", \"Text\"]\n\n# Show\nsent_topics_sorteddf_mallet",
"_____no_output_____"
],
[
"# Number of Documents for Each Topic\ntopic_counts = df_topic_sents_keywords['Dominant_Topic'].value_counts()\n\n# Percentage of Documents for Each Topic\ntopic_contribution = round(topic_counts/topic_counts.sum(), 4)\n\n# Topic Number and Keywords\ntopic_num_keywords = df_topic_sents_keywords[['Dominant_Topic', 'Topic_Keywords']]\n\n# Concatenate Column wise\ndf_dominant_topics = pd.concat([topic_num_keywords, topic_counts, topic_contribution], axis=1)\n\n# Change Column names\ndf_dominant_topics.columns = ['Dominant_Topic', 'Topic_Keywords', 'Num_Documents', 'Perc_Documents']\n\n# Show\ndf_dominant_topics",
"_____no_output_____"
],
[
"df_topic_sents_keywords = format_topics_sentences(ldamodel=model, corpus=corpus, texts=talk_pre['transcript'].values)\n\n# Format\ndf_dominant_topic = df_topic_sents_keywords.reset_index()\ndf_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']\n\n# Show\ndf_dominant_topic.head(10)",
"_____no_output_____"
],
[
"'''\nTime Elapsed 163.69370222091675 s\n\nCoherence Score: 0.5349924339857826 \n\n\nTopic 21 - Coherence 0.42738794499061095 \n 0.019*\"art\" + 0.012*\"film\" + 0.012*\"show\" + 0.011*\"image\" + 0.011*\"work\" + 0.010*\"artist\" + 0.010*\"kind\" + 0.009*\"movie\" + 0.009*\"made\" + 0.009*\"picture\" \n\nTopic 5 - Coherence 0.43114296442641925 \n 0.085*\"brain\" + 0.013*\"neuron\" + 0.010*\"body\" + 0.010*\"memory\" + 0.007*\"sleep\" + 0.007*\"system\" + 0.007*\"cell\" + 0.007*\"human\" + 0.007*\"signal\" + 0.007*\"area\" \n\nTopic 20 - Coherence 0.44130449832094953 \n 0.018*\"question\" + 0.015*\"good\" + 0.011*\"problem\" + 0.011*\"lot\" + 0.010*\"answer\" + 0.010*\"kind\" + 0.009*\"number\" + 0.007*\"study\" + 0.007*\"work\" + 0.007*\"find\" \n\nTopic 2 - Coherence 0.447509440291355 \n 0.019*\"god\" + 0.013*\"book\" + 0.011*\"great\" + 0.010*\"religion\" + 0.010*\"story\" + 0.009*\"century\" + 0.008*\"man\" + 0.008*\"culture\" + 0.008*\"history\" + 0.007*\"called\" \n\nTopic 8 - Coherence 0.45037682707226423 \n 0.029*\"life\" + 0.012*\"feel\" + 0.011*\"story\" + 0.010*\"love\" + 0.009*\"change\" + 0.008*\"experience\" + 0.008*\"human\" + 0.006*\"live\" + 0.006*\"future\" + 0.006*\"talk\" \n\nTopic 15 - Coherence 0.4515220031690628 \n 0.026*\"music\" + 0.024*\"sound\" + 0.023*\"word\" + 0.022*\"language\" + 0.012*\"hear\" + 0.012*\"song\" + 0.011*\"play\" + 0.011*\"voice\" + 0.008*\"english\" + 0.008*\"ted\" \n\nTopic 14 - Coherence 0.45301164934125726 \n 0.021*\"game\" + 0.016*\"robot\" + 0.013*\"video\" + 0.011*\"play\" + 0.009*\"move\" + 0.009*\"computer\" + 0.008*\"show\" + 0.007*\"machine\" + 0.007*\"light\" + 0.006*\"camera\" \n\nTopic 6 - Coherence 0.45577345309992606 \n 0.022*\"data\" + 0.015*\"information\" + 0.013*\"computer\" + 0.012*\"internet\" + 0.012*\"phone\" + 0.008*\"medium\" + 0.008*\"online\" + 0.007*\"book\" + 0.007*\"google\" + 0.006*\"technology\" \n\nTopic 7 - Coherence 0.45656346269897796 \n 0.014*\"hand\" + 0.012*\"back\" + 0.011*\"put\" + 0.008*\"guy\" + 0.008*\"head\" + 0.007*\"eye\" + 0.007*\"good\" + 0.005*\"yeah\" + 0.005*\"face\" + 0.005*\"big\" \n\nTopic 22 - Coherence 0.4747643128733972 \n 0.018*\"technology\" + 0.015*\"work\" + 0.013*\"idea\" + 0.013*\"problem\" + 0.011*\"system\" + 0.010*\"human\" + 0.009*\"design\" + 0.008*\"kind\" + 0.008*\"change\" + 0.006*\"create\" \n\nTopic 11 - Coherence 0.5052255631864517 \n 0.042*\"cell\" + 0.016*\"body\" + 0.011*\"material\" + 0.010*\"structure\" + 0.008*\"molecule\" + 0.008*\"blood\" + 0.008*\"tissue\" + 0.007*\"skin\" + 0.007*\"inside\" + 0.007*\"put\" \n\nTopic 4 - Coherence 0.5187873796550649 \n 0.024*\"day\" + 0.013*\"back\" + 0.011*\"thought\" + 0.010*\"started\" + 0.009*\"life\" + 0.008*\"home\" + 0.008*\"friend\" + 0.008*\"wanted\" + 0.008*\"story\" + 0.007*\"family\" \n\nTopic 0 - Coherence 0.536639193531724 \n 0.024*\"war\" + 0.008*\"police\" + 0.008*\"country\" + 0.008*\"violence\" + 0.007*\"military\" + 0.007*\"refugee\" + 0.006*\"prison\" + 0.006*\"soldier\" + 0.006*\"weapon\" + 0.006*\"conflict\" \n\nTopic 18 - Coherence 0.5387134976614774 \n 0.016*\"water\" + 0.015*\"earth\" + 0.011*\"ice\" + 0.010*\"ocean\" + 0.010*\"planet\" + 0.009*\"mar\" + 0.008*\"sea\" + 0.008*\"mile\" + 0.007*\"mountain\" + 0.007*\"foot\" \n\nTopic 9 - Coherence 0.5416671640778915 \n 0.025*\"food\" + 0.023*\"water\" + 0.020*\"energy\" + 0.011*\"oil\" + 0.010*\"plant\" + 0.009*\"carbon\" + 0.008*\"fuel\" + 0.008*\"eat\" + 0.007*\"lot\" + 0.007*\"power\" \n\nTopic 19 - Coherence 0.5696474644351069 \n 0.019*\"human\" + 0.019*\"gene\" + 0.015*\"dna\" + 0.014*\"specie\" + 0.010*\"virus\" + 0.010*\"genome\" + 0.009*\"animal\" + 0.009*\"genetic\" + 0.008*\"evolution\" + 0.008*\"ant\" \n\nTopic 1 - Coherence 0.5745453288176896 \n 0.047*\"city\" + 0.028*\"car\" + 0.024*\"building\" + 0.012*\"place\" + 0.011*\"street\" + 0.011*\"space\" + 0.008*\"road\" + 0.008*\"york\" + 0.008*\"built\" + 0.006*\"community\" \n\nTopic 10 - Coherence 0.5902639305658107 \n 0.024*\"money\" + 0.023*\"dollar\" + 0.021*\"company\" + 0.015*\"business\" + 0.012*\"cost\" + 0.012*\"market\" + 0.009*\"million\" + 0.009*\"job\" + 0.008*\"pay\" + 0.008*\"work\" \n\nTopic 24 - Coherence 0.6108931773725613 \n 0.076*\"woman\" + 0.029*\"men\" + 0.028*\"child\" + 0.020*\"girl\" + 0.015*\"baby\" + 0.014*\"mother\" + 0.013*\"young\" + 0.013*\"family\" + 0.012*\"sex\" + 0.010*\"black\" \n\nTopic 16 - Coherence 0.6111444838005902 \n 0.032*\"country\" + 0.029*\"percent\" + 0.019*\"africa\" + 0.011*\"state\" + 0.011*\"million\" + 0.011*\"china\" + 0.010*\"population\" + 0.009*\"india\" + 0.009*\"today\" + 0.009*\"number\" \n\nTopic 12 - Coherence 0.6273791440365895 \n 0.049*\"school\" + 0.032*\"kid\" + 0.028*\"student\" + 0.020*\"child\" + 0.018*\"teacher\" + 0.016*\"education\" + 0.011*\"high\" + 0.011*\"learning\" + 0.011*\"class\" + 0.010*\"university\" \n\nTopic 23 - Coherence 0.6432008044636636 \n 0.021*\"animal\" + 0.016*\"tree\" + 0.015*\"fish\" + 0.014*\"forest\" + 0.012*\"ocean\" + 0.012*\"specie\" + 0.008*\"bird\" + 0.008*\"coral\" + 0.007*\"water\" + 0.007*\"shark\" \n\nTopic 17 - Coherence 0.6482671250098597 \n 0.023*\"patient\" + 0.022*\"cancer\" + 0.020*\"disease\" + 0.019*\"health\" + 0.016*\"drug\" + 0.015*\"doctor\" + 0.012*\"care\" + 0.011*\"medical\" + 0.010*\"hospital\" + 0.010*\"treatment\" \n\nTopic 13 - Coherence 0.6744581853530904 \n 0.024*\"universe\" + 0.016*\"light\" + 0.015*\"planet\" + 0.015*\"star\" + 0.014*\"space\" + 0.013*\"earth\" + 0.010*\"galaxy\" + 0.009*\"sun\" + 0.009*\"particle\" + 0.008*\"energy\" \n\nTopic 3 - Coherence 0.6946218513927718 \n 0.015*\"government\" + 0.014*\"state\" + 0.012*\"country\" + 0.010*\"political\" + 0.009*\"law\" + 0.008*\"power\" + 0.008*\"public\" + 0.007*\"democracy\" + 0.007*\"change\" + 0.007*\"society\" \n\n'''\ndf_dominant_topic[(df_dominant_topic.Dominant_Topic == 7) & (df_dominant_topic.Topic_Perc_Contrib > 0.1)].sort_values('Topic_Perc_Contrib')[['Topic_Perc_Contrib','Keywords','Text']].values",
"_____no_output_____"
],
[
"talk_pre_extra_keywords[talk_pre_extra_keywords.transcript == 'second result surprise people ant actually switch task ant task whole life example extra food everybody else midden worker stop midden work get food become forager nest maintenance worker become forager patroller become forager every transition possible show works like say food collect patroller midden worker nest maintenance worker change forage patrol create disturbance extra patroller need nest maintenance worker switch patrol nest maintenance work need example bunch toothpick nobody ever switch back nest maintenance get nest maintenance worker inside nest foraging acts sink ant inside nest act source finally look like ant deciding moment moment whether active']",
"_____no_output_____"
],
[
"# Group top 5 sentences under each topic\nsent_topics_sorteddf_mallet = pd.DataFrame()\n\nsent_topics_outdf_grpd = df_topic_sents_keywords.groupby('Dominant_Topic')\n\nfor i, grp in sent_topics_outdf_grpd:\n sent_topics_sorteddf_mallet = pd.concat([sent_topics_sorteddf_mallet, \n grp.sort_values(['Perc_Contribution'], ascending=[0]).head(1)], \n axis=0)\n\n# Reset Index \nsent_topics_sorteddf_mallet.reset_index(drop=True, inplace=True)\n\n# Format\nsent_topics_sorteddf_mallet.columns = ['Topic_Num', \"Topic_Perc_Contrib\", \"Keywords\", \"Text\"]\n\n# Show\nsent_topics_sorteddf_mallet",
"_____no_output_____"
],
[
"sent_topics_sorteddf_mallet['Coherence'] = sent_topics_sorteddf_mallet['Topic_Num'].apply(lambda x: coherence_ldamallet_per_topic[int(x)])",
"_____no_output_____"
],
[
"sent_topics_sorteddf_mallet.to_csv(path.expanduser('~/work/tests/topics.csv'))",
"_____no_output_____"
],
[
"sent_topics_sorteddf_mallet",
"_____no_output_____"
],
[
"f = open(path.expanduser('~/work/data/data.txt'),\"w+\")\nfor text in talk_pre['transcript']:\n f.write(text+'\\n')\nf.close()",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec64c1beb77d9d10cd87a9152b56b511a18ddcc9 | 87,381 | ipynb | Jupyter Notebook | testlik.ipynb | kapserge/AgorizeMaster | 01094c2b5c890d8362eabcff34c2e544893d7acd | [
"WTFPL"
]
| null | null | null | testlik.ipynb | kapserge/AgorizeMaster | 01094c2b5c890d8362eabcff34c2e544893d7acd | [
"WTFPL"
]
| null | null | null | testlik.ipynb | kapserge/AgorizeMaster | 01094c2b5c890d8362eabcff34c2e544893d7acd | [
"WTFPL"
]
| null | null | null | 49.173326 | 7,542 | 0.45915 | [
[
[
"<a href=\"https://colab.research.google.com/github/kapserge/AgorizeMaster/blob/master/testlik.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nfrom google.colab import files\nuploaded = files.upload()\n",
"_____no_output_____"
],
[
"df = pd.read_json('sample.json', lines=True)\n#print(df.to_string()) \ndff = pd.DataFrame(df)\ndff",
"_____no_output_____"
],
[
"multi= df.loc[(df['device'] == 'TABLET') & (df['pricePublisherUusd'] <= 700)]\nmulti",
"_____no_output_____"
],
[
"count_imp= df.groupby('device').size()\ncount_imp",
"_____no_output_____"
],
[
"gp = df.groupby('device')\nfor device, group in gp:\n print(device)\n print(\"**********************\")\n print(group)\n print()",
"DESKTOP\n**********************\n uid ... priceAdvertiserUusd\n0 e0b81eb5f6594aa2 ... 243.0\n6 37634004b622054d ... 111.0\n10 4283eceaf67ae0ae ... 18.0\n13 dd6a9423eaea5b54 ... NaN\n15 429762e5b7d69d02 ... NaN\n.. ... ... ...\n365 93c05c5280b7f6a9 ... 134.0\n366 ac2cdc18f8dfd69b ... 135.0\n372 b8b24ce2a0f1d23f ... 139.0\n376 2cb6512e91a6035c ... NaN\n377 da3ff852aed9f8b4 ... NaN\n\n[116 rows x 9 columns]\n\nMOBILE\n**********************\n uid ... priceAdvertiserUusd\n1 396d0ea901cb620a ... NaN\n2 c09e8771b6dedeba ... 1248.0\n3 64a498c6b069a348 ... 114.0\n4 c9ccc24529e386b0 ... 187.0\n5 48ed754cc18d64ba ... 375.0\n.. ... ... ...\n369 89a56fa23c84ba49 ... 562.0\n370 1053ecc41f44331c ... 375.0\n371 79f4d7ad229420d2 ... 356.0\n373 855fe05bbdba5f31 ... 270.0\n374 52699e217ff351db ... 941.0\n\n[250 rows x 9 columns]\n\nTABLET\n**********************\n uid ... priceAdvertiserUusd\n12 46d1d0447279c14d ... NaN\n39 07b7b195b48eb550 ... NaN\n75 071f616621a783df ... 291.0\n114 777c37d9d3dcdc4c ... 109.0\n156 4a08f5b5059f7842 ... NaN\n187 f8d2e062442f63fb ... 176.0\n198 32b07f0a802e5216 ... 288.0\n238 3e486395840be742 ... NaN\n321 d9e1c77955b18838 ... 262.0\n356 a6bc56d2e80438fc ... 700.0\n362 eedbfd442cbe9898 ... 345.0\n375 1c7e29b4661da4af ... NaN\n\n[12 rows x 9 columns]\n\n"
],
[
"select_mois = df[df[\"country\"] == \"MX\"]\nselect_device = select_mois[select_mois[\"device\"] == \"DESKTOP\"]\nselect_device",
"_____no_output_____"
],
[
"group_df = df.groupby('device').size()\ngroup_df",
"_____no_output_____"
],
[
"maxValues = df[df['priceAdvertiserUusd'] == df['priceAdvertiserUusd'].max()]\nmaxValues",
"_____no_output_____"
],
[
"def max_entier(x):\n return int(max(x))\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ngroups = df.groupby(['country']).size()\ngroups.plot.bar()",
"_____no_output_____"
],
[
"nbr = df.groupby('device').count().sort_values(by='pricePublisherUusd', ascending=False)\n\nnbr.head()",
"_____no_output_____"
],
[
"sup_inf = df[df[\"pricePublisherUusd\"] < 702.0]\n\nsup_inf",
"_____no_output_____"
],
[
"df.shape",
"_____no_output_____"
],
[
"df.isnull().any()",
"_____no_output_____"
],
[
"df.columns[df.isnull().any()]",
"_____no_output_____"
],
[
"df.isnull().sum()",
"_____no_output_____"
],
[
"df.index[df.isnull().any(axis=1)]",
"_____no_output_____"
],
[
"df.iloc[1,:].isnull().sum()",
"_____no_output_____"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec64c3683306aa7290935aa602b4e0a3aba50de1 | 110,339 | ipynb | Jupyter Notebook | studies/profile_assimilation_obs/assess_AMA_sounding.ipynb | lassman135/assessment | 450f16b08e182a1aee1ab5c69dfe91215ef2fcba | [
"Apache-2.0"
]
| 2 | 2019-10-25T17:10:11.000Z | 2020-03-26T17:35:56.000Z | studies/profile_assimilation_obs/assess_AMA_sounding.ipynb | lassman135/assessment | 450f16b08e182a1aee1ab5c69dfe91215ef2fcba | [
"Apache-2.0"
]
| 49 | 2019-02-25T21:24:34.000Z | 2021-11-22T21:34:07.000Z | studies/profile_assimilation_obs/assess_AMA_sounding.ipynb | lassman135/assessment | 450f16b08e182a1aee1ab5c69dfe91215ef2fcba | [
"Apache-2.0"
]
| 10 | 2019-02-22T19:13:36.000Z | 2021-06-04T01:49:59.000Z | 98.693202 | 75,520 | 0.777821 | [
[
[
"%load_ext autoreload\n%autoreload 2",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
],
[
"%matplotlib inline\nimport os, sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport xarray",
"_____no_output_____"
],
[
"# manually add a2e-mmc repos to PYTHONPATH if needed\nmodule_path = os.path.join(os.environ['HOME'],'tools','a2e-mmc')\nif module_path not in sys.path:\n sys.path.append(module_path)",
"_____no_output_____"
],
[
"from mmctools.plotting import plot_profile\nfrom mmctools.helper_functions import calc_wind, T_to_Tv, theta",
"_____no_output_____"
]
],
[
[
"Define some physical constants",
"_____no_output_____"
]
],
[
[
"import matplotlib as mpl\nmpl.rcParams['xtick.labelsize'] = 16\nmpl.rcParams['ytick.labelsize'] = 16\nmpl.rcParams['axes.labelsize'] = 16",
"_____no_output_____"
]
],
[
[
"# SWIFT profile assimilation + obs study: Assess Amarillo sounding\nwritten by [Dries Allaerts](mailto:[email protected])\n\nAssess Amarillo sounding data which is used to specify the initial potential temperature profile. This notebook depends on:\n- WRF data, generated in `wrf_to_sowfa.ipynb`,\n- TTU tower data, generated in `process_TTU_tower.ipynb`",
"_____no_output_____"
],
[
"Main directories on Eagle",
"_____no_output_____"
]
],
[
[
"datadir = '/projects/mmc/dallaert/profile_assimilation_obs/'\nreferencedir = '/projects/mmc/dallaert/referenceData/'",
"_____no_output_____"
]
],
[
[
"## Load observation data",
"_____no_output_____"
]
],
[
[
"all_data = 'TTU_obs_QC_20131108.csv'",
"_____no_output_____"
],
[
"df = pd.read_csv(os.path.join(referencedir,all_data),parse_dates=True,index_col=['datetime','height'])",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df = df.xs('2013-11-08 12:00:00',level='datetime')",
"_____no_output_____"
],
[
"radar_scan0 = df.loc[df['scan_type']==0].copy()\nradar_scan1 = df.loc[df['scan_type']==1].copy()",
"_____no_output_____"
]
],
[
[
"## Load Amarillo sounding data",
"_____no_output_____"
],
[
"Load sounding data from Amarillo airport observation station (data downloaded from http://weather.uwyo.edu/cgi-bin/sounding?region=naconf&TYPE=TEXT%3ALIST&YEAR=2013&MONTH=11&FROM=0800&TO=1000&STNM=72363)",
"_____no_output_____"
]
],
[
[
"datetime = '2013-11-08 12:00:00'\nsoundingdata = 'sounding.AMA/AMA_'+datetime.replace(' ','_')\nama_sounding = np.loadtxt(referencedir+soundingdata,skiprows=8)",
"_____no_output_____"
],
[
"zsurface = 1099\nknot_to_mpers = 0.514444",
"_____no_output_____"
],
[
"data = {}\ndata['height'] = ama_sounding[:,1]-zsurface\ndata['thetav'] = ama_sounding[:,10]\ndata['wspd'] = ama_sounding[:,7] * knot_to_mpers\ndata['wdir'] = ama_sounding[:,6]\nama = pd.DataFrame(data=data)\nama['datetime'] = pd.to_datetime(datetime)\nama.set_index(['datetime','height'],inplace=True)",
"_____no_output_____"
],
[
"ama.head()",
"_____no_output_____"
]
],
[
[
"## Load TTU tower data",
"_____no_output_____"
],
[
"Load tower data for surface boundary conditions. The dataset is created within the notebook \"process_TTU_tower.ipynb\" which can be found in the [a2e-mmc/assessment repository](https://github.com/a2e-mmc/assessment)",
"_____no_output_____"
]
],
[
[
"towerdata = 'TTU_tilt_corrected_20131108-09_10min.csv'",
"_____no_output_____"
],
[
"tower = pd.read_csv(os.path.join(referencedir,towerdata),parse_dates=True,index_col=['datetime','height'])",
"_____no_output_____"
],
[
"tower.head()",
"_____no_output_____"
],
[
"tower.columns",
"_____no_output_____"
],
[
"tower['Tv'] = T_to_Tv(tower['T'],p=tower['p'],RH=tower['RH'])\ntower['thetav'] = theta(tower['Tv'],tower['p'])",
"_____no_output_____"
]
],
[
[
"## Load WRF data",
"_____no_output_____"
]
],
[
[
"WRFdata = 'WRF_20131108-09.nc'\nwrf = xarray.open_dataset(os.path.join(referencedir,WRFdata)).to_dataframe()",
"_____no_output_____"
],
[
"# Convert to standard names\nwrf.rename({'U':'u','V':'v','W':'w','UST':'u*','theta':'thetav'},\n axis='columns',inplace=True)",
"_____no_output_____"
],
[
"# Compute wind speed and wind direction\nwrf['wspd'], wrf['wdir'] = calc_wind(wrf)",
"_____no_output_____"
],
[
"wrf.head()",
"_____no_output_____"
]
],
[
[
"# Plot sounding data",
"_____no_output_____"
]
],
[
[
"fig,ax = plot_profile(\n datasets={\n 'AMA sounding':ama,\n 'WRF mesoscale':wrf.xs('2013-11-08 12:00:00',level='Time'),\n 'TTU tower':tower.xs('2013-11-08 12:00:00',level='datetime'),\n 'TTU radar (scan type 0)':radar_scan0,\n 'TTU radar (scan type 1)':radar_scan1,\n },\n fields=['wspd','wdir','thetav'],\n heightlimits=(0,2000),\n fieldlimits={\n 'wspd':(0,25),\n 'wdir':(200,260),\n 'thetav':(280,310),\n },\n datasetkwargs={'TTU tower':{'linestyle':'','marker':'^',},\n 'TTU radar (scan type 0)':{'linestyle':'','marker':'x'},\n 'TTU radar (scan type 1)':{'linestyle':'','marker':'+'},\n 'AMA sounding':{'linestyle':'-','marker':'o'},\n 'WRF mesoscale':{'linestyle':'--'},\n },\n mfc='none',markersize=7\n)\n\n#Place legend at the top\nhandles, labels = ax[0].get_legend_handles_labels()\nax[2].get_legend().remove()\n\nleg = fig.legend(handles, labels, loc = 'lower left', bbox_to_anchor = (.125,.91,.775,0.0),\n borderaxespad=0,ncol=3,fontsize=13,\n bbox_transform = plt.gcf().transFigure,mode='expand')\nleg.get_frame().set_edgecolor('k')\n\n#Save figure\nfigname = 'ama_sounding'\nplt.savefig(os.path.join(datadir,'Figures',figname+'.png'),bbox_inches='tight')\nplt.savefig(os.path.join(datadir,'Figures',figname+'.eps'),bbox_inches='tight')\nplt.savefig(os.path.join(datadir,'Figures',figname+'.pdf'),bbox_inches='tight')",
"Warning: field \"thetav\" not available in dataset TTU radar (scan type 0)\nWarning: field \"thetav\" not available in dataset TTU radar (scan type 1)\n"
]
],
[
[
"Note: Can we find sounding data for Midland, Texas, which is about 100 km south of Lubbock (Lubbock is in the middle between Amarillo and Midland). Considering the wind is coming from the southwest, Midland might be even more representative. In any case, it would be good to check that Midland sounding is comparable to Amarillo and wrf data",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
]
|
ec64cb8bdf8c48dd0a38579475ccf26d20cb5e62 | 5,489 | ipynb | Jupyter Notebook | Verification/Adult_Default.ipynb | joymallyac/Fairway | 6aa11e4a182e7271059a473cd6857f91d1668e7f | [
"BSD-2-Clause"
]
| 6 | 2020-07-15T06:33:05.000Z | 2022-03-22T14:02:09.000Z | Verification/Adult_Default.ipynb | joymallyac/Fairway | 6aa11e4a182e7271059a473cd6857f91d1668e7f | [
"BSD-2-Clause"
]
| 1 | 2020-07-15T06:34:43.000Z | 2020-07-15T06:34:43.000Z | Verification/Adult_Default.ipynb | joymallyac/Fairway | 6aa11e4a182e7271059a473cd6857f91d1668e7f | [
"BSD-2-Clause"
]
| 1 | 2020-07-15T06:39:31.000Z | 2020-07-15T06:39:31.000Z | 35.642857 | 146 | 0.599381 | [
[
[
"## Here I am checking for how many datapoints prediction changes after switching the value of protected attribute (Default )\n\nimport pandas as pd\nimport random,time,csv\nimport numpy as np\nimport math,copy,os\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\nfrom sklearn import tree\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import SVC\nimport sklearn.metrics as metrics\n\nimport sys\nsys.path.append(os.path.abspath('..'))\n\nfrom Measure import measure_final_score,calculate_recall,calculate_far,calculate_precision,calculate_accuracy",
"_____no_output_____"
],
[
"## Load dataset\nfrom sklearn import preprocessing\ndataset_orig = pd.read_csv('../dataset/adult.data.csv')\n\n## Drop NULL values\ndataset_orig = dataset_orig.dropna()\n\n## Drop categorical features\ndataset_orig = dataset_orig.drop(['workclass','fnlwgt','education','marital-status','occupation','relationship','native-country'],axis=1)\n\n## Change symbolics to numerics\ndataset_orig['sex'] = np.where(dataset_orig['sex'] == ' Male', 1, 0)\ndataset_orig['race'] = np.where(dataset_orig['race'] != ' White', 0, 1)\ndataset_orig['Probability'] = np.where(dataset_orig['Probability'] == ' <=50K', 0, 1)\n\n\n## Discretize age\ndataset_orig['age'] = np.where(dataset_orig['age'] >= 70, 70, dataset_orig['age'])\ndataset_orig['age'] = np.where((dataset_orig['age'] >= 60 ) & (dataset_orig['age'] < 70), 60, dataset_orig['age'])\ndataset_orig['age'] = np.where((dataset_orig['age'] >= 50 ) & (dataset_orig['age'] < 60), 50, dataset_orig['age'])\ndataset_orig['age'] = np.where((dataset_orig['age'] >= 40 ) & (dataset_orig['age'] < 50), 40, dataset_orig['age'])\ndataset_orig['age'] = np.where((dataset_orig['age'] >= 30 ) & (dataset_orig['age'] < 40), 30, dataset_orig['age'])\ndataset_orig['age'] = np.where((dataset_orig['age'] >= 20 ) & (dataset_orig['age'] < 30), 20, dataset_orig['age'])\ndataset_orig['age'] = np.where((dataset_orig['age'] >= 10 ) & (dataset_orig['age'] < 10), 10, dataset_orig['age'])\ndataset_orig['age'] = np.where(dataset_orig['age'] < 10, 0, dataset_orig['age'])\n\n\n\nfrom sklearn.preprocessing import MinMaxScaler\n\nscaler = MinMaxScaler()\ndataset_orig = pd.DataFrame(scaler.fit_transform(dataset_orig),columns = dataset_orig.columns)\n\ndataset_orig.shape\ndataset_orig.columns",
"_____no_output_____"
],
[
"## Divide into train,validation,test\n# dataset_orig_train, dataset_orig_test = train_test_split(dataset_orig, test_size=0.2, random_state = 0, shuffle = True)\ndataset_orig_train, dataset_orig_test = train_test_split(dataset_orig, test_size=0.2, shuffle = True)\n\nX_train, y_train = dataset_orig_train.loc[:, dataset_orig_train.columns != 'Probability'], dataset_orig_train['Probability']\nX_test , y_test = dataset_orig_test.loc[:, dataset_orig_test.columns != 'Probability'], dataset_orig_test['Probability']",
"_____no_output_____"
],
[
"# Train LSR model\nclf = LogisticRegression(C=1.0, penalty='l2', solver='liblinear', max_iter=100)\nclf.fit(X_train, y_train)",
"_____no_output_____"
],
[
"# Create new test by switching the value of prottected attribute\nsame , not_same = 0,0\nfor index,row in dataset_orig_test.iterrows():\n row_ = [row.values[0:len(row.values)-1]] \n y_normal = clf.predict(row_)\n # Here protected attribute value gets switched\n if row_[0][3] == 0: ## index of Sex is 3, Race is 2\n row_[0][3] = 1\n else:\n row_[0][3] = 0 \n y_reverse = clf.predict(row_)\n if y_normal[0] != y_reverse[0]:\n not_same += 1\n else:\n same += 1\nprint(same , not_same)\n \n",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code"
]
]
|
ec64ccae6f9c60fe98f3059a58db0f6f7d11fa16 | 659,158 | ipynb | Jupyter Notebook | notebooks/3_processing_data.ipynb | AaronScherf/wheat_yield_prediction_gee | a7d8f2da667dd5c34233ddd8504a1dd21d667a56 | [
"MIT"
]
| 1 | 2022-01-29T17:17:11.000Z | 2022-01-29T17:17:11.000Z | notebooks/3_processing_data.ipynb | hulaba/wheat_yield_prediction_gee | a7d8f2da667dd5c34233ddd8504a1dd21d667a56 | [
"MIT"
]
| null | null | null | notebooks/3_processing_data.ipynb | hulaba/wheat_yield_prediction_gee | a7d8f2da667dd5c34233ddd8504a1dd21d667a56 | [
"MIT"
]
| 1 | 2022-01-29T17:16:33.000Z | 2022-01-29T17:16:33.000Z | 659,158 | 659,158 | 0.84857 | [
[
[
"## Processing Data and Creating Export Objects",
"_____no_output_____"
]
],
[
[
"# Install Packages\n\n\n# Import Packages\nimport os\nfrom pathlib import Path\nimport shutil\nimport pandas as pd\nimport re\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np",
"_____no_output_____"
],
[
"# Mount User's Drive\n# Copy and paste the passkey from your Google account\n# You should use the same account that is operating the Colab file\n# Ignore if you aren't accessing this file in Google Colab\n\nfrom google.colab import drive\ndrive.mount('/content/drive', force_remount=True)",
"Mounted at /content/drive\n"
],
[
"parent_dir_name = 'GxE with GEE'\n\nfor dirpath, subdirs, files in os.walk('/content'):\n if parent_dir_name in subdirs:\n parent_path = dirpath + \"/\" + parent_dir_name\n\nparent_path",
"_____no_output_____"
],
[
"# Set Data Path\n## Change the destination to your Drive directory containing the folder 'raw_data.zip'\ndata_path_end = '/Colab Workspace/Data'\nos.chdir(parent_path + data_path_end)\n\n# Print the current working directory\nos.getcwd()",
"_____no_output_____"
],
[
"# os.chdir('C:/Users/theaa/Downloads')",
"_____no_output_____"
]
],
[
[
"## Import Aggregated Data from 1_Import",
"_____no_output_____"
]
],
[
[
"data = pd.read_pickle('aggregated_data.pkl')\ndata.drop_duplicates(subset=['UNIQUE_ID'],inplace=True)\nin_shape = data.shape\nin_shape",
"_____no_output_____"
],
[
"data.columns=data.columns.str.upper()\ndata.head()",
"_____no_output_____"
]
],
[
[
"## Counting Missing Data and Dropping Overly Missing Columns",
"_____no_output_____"
]
],
[
[
"data = data.replace({'nan': np.nan, 'NAN':np.nan, 'NA':np.nan})\ndata['GRAIN_YIELD'] = pd.to_numeric(data['GRAIN_YIELD'],errors='coerce')\n\ndata.isna().sum().sort_values(ascending=True)[0:30]",
"_____no_output_____"
],
[
"data.dropna(how='all',inplace=True)\ndata.shape",
"_____no_output_____"
],
[
"missing_list = data.isna().sum()\nmissing_list['HARVEST_FINISHING_DATE']",
"_____no_output_____"
]
],
[
[
"### Testing Missing Values for Randomness",
"_____no_output_____"
]
],
[
[
"data['Has_Yield'] = data['GRAIN_YIELD'].notnull()\ndata['Has_Yield'].unique()",
"_____no_output_____"
],
[
"def check_random(data,test_var, ancillary_var) :\n data['Has_Var'] = data[test_var].notnull()\n has_var_desc = data[data['Has_Var']==True][ancillary_var].describe()\n miss_var_desc = data[data['Has_Var']==False][ancillary_var].describe()\n a = data[data['Has_Var']==True][ancillary_var].dropna()\n b = data[data['Has_Var']==False][ancillary_var].dropna()\n ttest_result = stats.ttest_ind(a,b)\n data.drop(columns = ['Has_Var'],inplace=True)\n print(\"Summary of {} for Rows with {}\".format(ancillary_var, test_var))\n print(has_var_desc)\n print(\"Summary of {} for Rows Missing {}\".format(ancillary_var, test_var))\n print(miss_var_desc)\n print(\"T-Test Results\")\n print(ttest_result)",
"_____no_output_____"
],
[
"check_random(data=data,test_var='GRAIN_YIELD',ancillary_var='ALTITUDE')",
"Summary of ALTITUDE for Rows with GRAIN_YIELD\ncount 183109.000000\nmean 526.231594\nstd 658.411371\nmin -224.000000\n25% 83.000000\n50% 250.000000\n75% 650.000000\nmax 3391.000000\nName: ALTITUDE, dtype: float64\nSummary of ALTITUDE for Rows Missing GRAIN_YIELD\ncount 3059.000000\nmean 692.946061\nstd 895.934683\nmin 0.000000\n25% 72.000000\n50% 320.000000\n75% 850.000000\nmax 3050.000000\nName: ALTITUDE, dtype: float64\nT-Test Results\nTtest_indResult(statistic=-13.792765181006207, pvalue=2.9592205736516972e-43)\n"
],
[
"check_random(data=data,test_var='GRAIN_YIELD',ancillary_var='LONG_COORD')",
"Summary of LONG_COORD for Rows with GRAIN_YIELD\ncount 183109.000000\nmean 24.532251\nstd 61.995533\nmin -122.389999\n25% -5.380000\n50% 36.259998\n75% 73.059998\nmax 175.369995\nName: LONG_COORD, dtype: float64\nSummary of LONG_COORD for Rows Missing GRAIN_YIELD\ncount 3059.000000\nmean 0.591192\nstd 65.966492\nmin -122.389999\n25% -59.400002\n50% 14.250000\n75% 45.110001\nmax 165.300003\nName: LONG_COORD, dtype: float64\nT-Test Results\nTtest_indResult(statistic=21.163253154012274, pvalue=2.7296009904468205e-99)\n"
],
[
"check_random(data=data,test_var='GRAIN_YIELD',ancillary_var='LAT_COORD')",
"Summary of LAT_COORD for Rows with GRAIN_YIELD\ncount 183109.000000\nmean 20.612999\nstd 23.147266\nmin -43.380001\n25% 19.090000\n50% 29.000000\n75% 34.520000\nmax 59.400002\nName: LAT_COORD, dtype: float64\nSummary of LAT_COORD for Rows Missing GRAIN_YIELD\ncount 3059.000000\nmean 7.242839\nstd 31.622749\nmin -38.320000\n25% -26.480000\n50% 9.030000\n75% 39.230000\nmax 55.029999\nName: LAT_COORD, dtype: float64\nT-Test Results\nTtest_indResult(statistic=31.47238321622934, pvalue=7.730114816941718e-217)\n"
],
[
"check_random(data=data,test_var='HARVEST_FINISHING_DATE',ancillary_var='ALTITUDE')",
"Summary of ALTITUDE for Rows with HARVEST_FINISHING_DATE\ncount 151520.000000\nmean 528.915846\nstd 661.991085\nmin -224.000000\n25% 91.000000\n50% 265.000000\n75% 640.000000\nmax 3391.000000\nName: ALTITUDE, dtype: float64\nSummary of ALTITUDE for Rows Missing HARVEST_FINISHING_DATE\ncount 34648.000000\nmean 529.211903\nstd 669.203947\nmin 0.000000\n25% 45.000000\n50% 241.000000\n75% 660.000000\nmax 3391.000000\nName: ALTITUDE, dtype: float64\nT-Test Results\nTtest_indResult(statistic=-0.07494804597351477, pvalue=0.9402561293957447)\n"
],
[
"check_random(data=data,test_var='HARVEST_FINISHING_DATE',ancillary_var='LAT_COORD')",
"Summary of LAT_COORD for Rows with HARVEST_FINISHING_DATE\ncount 151520.000000\nmean 20.177988\nstd 23.660246\nmin -43.380001\n25% 18.170000\n50% 29.000000\n75% 34.250000\nmax 59.400002\nName: LAT_COORD, dtype: float64\nSummary of LAT_COORD for Rows Missing HARVEST_FINISHING_DATE\ncount 34648.000000\nmean 21.341383\nstd 22.041737\nmin -38.200001\n25% 18.170000\n50% 29.000000\n75% 36.430000\nmax 59.400002\nName: LAT_COORD, dtype: float64\nT-Test Results\nTtest_indResult(statistic=-8.328982204718423, pvalue=8.208404925550464e-17)\n"
],
[
"check_random(data=data,test_var='HARVEST_FINISHING_DATE',ancillary_var='LONG_COORD')",
"Summary of LONG_COORD for Rows with HARVEST_FINISHING_DATE\ncount 151520.000000\nmean 25.075661\nstd 62.136841\nmin -122.389999\n25% -6.300000\n50% 36.560001\n75% 73.070000\nmax 175.369995\nName: LONG_COORD, dtype: float64\nSummary of LONG_COORD for Rows Missing HARVEST_FINISHING_DATE\ncount 34648.000000\nmean 20.048719\nstd 61.966213\nmin -115.040001\n25% -4.450000\n50% 35.480000\n75% 70.279999\nmax 143.520004\nName: LONG_COORD, dtype: float64\nT-Test Results\nTtest_indResult(statistic=13.60320213582165, pvalue=4.015006695639614e-42)\n"
],
[
"check_random(data=data,test_var='LONG_COORD',ancillary_var='ALTITUDE')",
"Summary of ALTITUDE for Rows with LONG_COORD\ncount 186168.000000\nmean 528.970946\nstd 663.337625\nmin -224.000000\n25% 83.000000\n50% 258.000000\n75% 650.000000\nmax 3391.000000\nName: ALTITUDE, dtype: float64\nSummary of ALTITUDE for Rows Missing LONG_COORD\ncount 0.0\nmean NaN\nstd NaN\nmin NaN\n25% NaN\n50% NaN\n75% NaN\nmax NaN\nName: ALTITUDE, dtype: float64\nT-Test Results\nTtest_indResult(statistic=nan, pvalue=nan)\n"
],
[
"data['Harvest_Finish_Missing'] = data['HARVEST_FINISHING_DATE'].notnull() == False\ndata[data['Harvest_Finish_Missing'] == True]['HARVEST_STARTING_DATE'].isna().sum()",
"_____no_output_____"
],
[
"data[data['Harvest_Finish_Missing'] == True]['SOWING_DATE'].isna().sum()",
"_____no_output_____"
],
[
"check_random(data=data,test_var='HARVEST_FINISHING_DATE',ancillary_var='GRAIN_YIELD')",
"Summary of GRAIN_YIELD for Rows with HARVEST_FINISHING_DATE\ncount 149479.000000\nmean 4.344084\nstd 2.207203\nmin -0.074000\n25% 2.750000\n50% 4.059000\n75% 5.677000\nmax 17.823000\nName: GRAIN_YIELD, dtype: float64\nSummary of GRAIN_YIELD for Rows Missing HARVEST_FINISHING_DATE\ncount 33880.000000\nmean 4.430586\nstd 2.351689\nmin 0.010000\n25% 2.639000\n50% 4.147500\n75% 6.013000\nmax 13.726000\nName: GRAIN_YIELD, dtype: float64\nT-Test Results\nTtest_indResult(statistic=-6.433357477796599, pvalue=1.2512141836050946e-10)\n"
]
],
[
[
"## Convert Dates to Datetime Format and Recover Missing Harvest Dates",
"_____no_output_____"
]
],
[
[
"# Convert all column names to upper case, and then drop _TEXT columns which lack a substantial amount of datas.\ndata.drop([\"EMERGENCE_DATE_TEXT\",\"HARVEST_FINISHING_DATE_TEXT\", \"HARVEST_STARTING_DATE_TEXT\",\"SOWING_DATE_TEXT\"], axis=1,inplace=True)",
"_____no_output_____"
],
[
"# Set target date-time columns (Sowing Date, Emergence Date, Harvest Starting Date, Harvest Finishing Date) to datetime format\ntarget_dates = [\"SOWING_DATE\", \"EMERGENCE_DATE\",\"HARVEST_STARTING_DATE\", \"HARVEST_FINISHING_DATE\",'FERTILIZER_1','FERTILIZER_2','FERTILIZER_3']\ndata[target_dates] = data[target_dates].apply(pd.to_datetime, errors='coerce')\ndata[target_dates].head()",
"_____no_output_____"
],
[
"data[target_dates].isna().sum().sort_values(ascending=False)[0:20]",
"_____no_output_____"
],
[
"data['HARVEST_FINISHING_DATE'][data['HARVEST_FINISHING_DATE'] > pd.to_datetime('2020-07-03')]",
"_____no_output_____"
],
[
"# Try to recover harvest finishing date for missing data from other dates\ndata['Harvest_Finish_Missing'] = data['HARVEST_FINISHING_DATE'].notnull() == False\n\nHarvest_Start_to_Finish = data[data['Harvest_Finish_Missing'] == False]['HARVEST_FINISHING_DATE'] - data[data['Harvest_Finish_Missing'] == False]['HARVEST_STARTING_DATE']\nSowing_to_Harvest = data[data['Harvest_Finish_Missing'] == False]['HARVEST_FINISHING_DATE'] - data[data['Harvest_Finish_Missing'] == False]['SOWING_DATE']\n\nAv_Harvest_Start_to_Finish = Harvest_Start_to_Finish.mean()\nAv_Sowing_to_Harvest = Sowing_to_Harvest.mean()\n\nprint('Average Harvest Start to Finish: {}'.format(Av_Harvest_Start_to_Finish))\nprint('Average Sowing to Harvest: {}'.format(Av_Sowing_to_Harvest))",
"Average Harvest Start to Finish: 2 days 00:05:30.536023\nAverage Sowing to Harvest: 162 days 07:00:15.683168\n"
],
[
"data.loc[data['Harvest_Finish_Missing'] == True,'HARVEST_FINISHING_DATE'] = \\\ndata.loc[data['Harvest_Finish_Missing'] == True,'HARVEST_FINISHING_DATE']. \\\nfillna(data['HARVEST_STARTING_DATE'] + Av_Harvest_Start_to_Finish). \\\nfillna(data['SOWING_DATE'] + Av_Sowing_to_Harvest)",
"_____no_output_____"
],
[
"data['HARVEST_FINISHING_DATE'].isna().sum()",
"_____no_output_____"
],
[
"# Drop all rows from data missing their harvest finishing date\ndata.dropna(subset=['HARVEST_FINISHING_DATE'],inplace=True)\ndata.shape",
"_____no_output_____"
]
],
[
[
"## Dropping Observations with Missing Data in Key Variables",
"_____no_output_____"
]
],
[
[
"X = 2000\nmissing_over_X = data.isna().sum() > (len(data) - X)\ncol_to_drop = missing_over_X[missing_over_X].index\ndata.drop(columns=col_to_drop,inplace=True)",
"_____no_output_____"
],
[
"# Drop observations missing data for the target variable, harvest date, or CID_SID\ndata.drop_duplicates(subset=['UNIQUE_ID'],inplace=True)\ndata.dropna(subset=['GRAIN_YIELD'],inplace=True)\ndata.dropna(subset=['GENO_ID'],inplace=True)",
"_____no_output_____"
],
[
"pr_shape = data.shape\npr_shape",
"_____no_output_____"
],
[
"rows_lost = in_shape[0] - pr_shape[0]\ncols_lost = in_shape[1] - pr_shape[1]\nsh_lost = (rows_lost, cols_lost)\nsh_lost",
"_____no_output_____"
]
],
[
[
"## Create trial type column",
"_____no_output_____"
]
],
[
[
"data['TRIAL_TYPE'] = data.index.str.findall('\\D+')\ndata['TRIAL_TYPE']= data[\"TRIAL_TYPE\"].str.get(0).str.strip(\"_\")\ndata['TRIAL_TYPE'].unique()",
"_____no_output_____"
],
[
"data[\"TRIAL_TYPE\"][data['TRIAL_TYPE'].str.contains('HTYWT')] = \"HTWYT\"\ndata['TRIAL_TYPE'].unique()",
"/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \"\"\"Entry point for launching an IPython kernel.\n"
],
[
"data.to_pickle('mid_processed_data.pkl')",
"_____no_output_____"
]
],
[
[
"## Create lat_long object",
"_____no_output_____"
]
],
[
[
"import math\n# Create unique Location-Harvest_Month Column\ndata['HARVEST_FINISHING_YEAR'] = pd.DatetimeIndex(data['HARVEST_FINISHING_DATE'],yearfirst=True).year\ndata['HARVEST_FINISHING_MONTH'] = pd.DatetimeIndex(data['HARVEST_FINISHING_DATE'],yearfirst=True).month\ndata['HARVEST_FINISHING_DAY'] = pd.DatetimeIndex(data['HARVEST_FINISHING_DATE'],yearfirst=True).day\n\ndata['HARVEST_FINISHING_YEAR'] = pd.to_numeric(data['HARVEST_FINISHING_YEAR'],errors='ignore').apply(math.trunc)\ndata['HARVEST_FINISHING_MONTH'] = pd.to_numeric(data['HARVEST_FINISHING_MONTH'],errors='ignore').apply(math.trunc)\ndata['HARVEST_FINISHING_DAY'] = pd.to_numeric(data['HARVEST_FINISHING_DAY'],errors='ignore').apply(math.trunc)",
"_____no_output_____"
],
[
"data['LAT_LONG_DAY_MONTH_YEAR'] = data['LAT_COORD'].astype(str) + \"_\" + data['LONG_COORD'].astype(str) + \\\n \"_\" + data['HARVEST_FINISHING_DAY'].astype(str) + \"_\" + \\\n data['HARVEST_FINISHING_MONTH'].astype(str) + \"_\" + \\\n data['HARVEST_FINISHING_YEAR'].astype(str)\n ",
"_____no_output_____"
],
[
"# Finally, pull the coordinates and unique IDs for each observation across all trials into separate CSV\n# This is what we'll send to GEE\nGEE_vars = ['LAT_COORD', 'LONG_COORD', 'COUNTRY', 'HARVEST_FINISHING_DATE', 'HARVEST_FINISHING_DAY', 'HARVEST_FINISHING_MONTH', 'HARVEST_FINISHING_YEAR', 'LAT_LONG_DAY_MONTH_YEAR']\n\nlat_long = pd.DataFrame(index=data.index.values)\nlat_long[GEE_vars] = data[GEE_vars]\nlat_long.dropna(subset=['HARVEST_FINISHING_DATE'],inplace=True)\n\nrows = lat_long.shape[0]\nrows",
"_____no_output_____"
],
[
"lat_long.drop_duplicates(subset='LAT_LONG_DAY_MONTH_YEAR',inplace=True)\nunique_climates = lat_long.shape[0]",
"_____no_output_____"
],
[
"rows / unique_climates",
"_____no_output_____"
],
[
"lat_long.sort_values('HARVEST_FINISHING_YEAR')",
"_____no_output_____"
],
[
"lat_long.to_pickle('lat_long.pkl')\nlat_long.to_csv('lat_long.csv')",
"_____no_output_____"
]
],
[
[
"## Import COP Data and Join to CIMMYT Trials",
"_____no_output_____"
]
],
[
[
"lat_long = pd.read_pickle('lat_long.pkl')\nlat_long.shape",
"_____no_output_____"
],
[
"GID_CID_SID = pd.read_csv('GID_CID_SID_all.csv')",
"_____no_output_____"
],
[
"GID_CID_SID['GID'] = GID_CID_SID['gid'].str.split(\" \", expand = True)[1].astype(str)\nGID_CID_SID['CID'] = GID_CID_SID['cid_sid'].str.split(\" \", expand = True)[1].astype(str)\nGID_CID_SID['SID'] = GID_CID_SID['cid_sid'].str.split(\" \", expand = True)[3].astype(str)\nGID_CID_SID['GENO_ID'] = GID_CID_SID['CID'].astype(str) + '_' + GID_CID_SID['SID'].astype(str)\nGID_CID_SID['GENO_ID'].value_counts()",
"_____no_output_____"
],
[
"joined = data.merge(GID_CID_SID, how='left', left_on = 'GENO_ID', right_on = 'GENO_ID')\njoined.drop(['Unnamed: 0', 'Unnamed: 0.1', 'url_num', 'gid', 'cid_sid'],axis=1,inplace=True)\njoined = joined.set_index(data.index)",
"_____no_output_____"
],
[
"# Number of unique genotypes in combined dataset\njoined['GID'].value_counts()",
"_____no_output_____"
],
[
"# Number of rows with different CID's\njoined['diff_CID'] = joined['CID_X'].astype(str) != joined['CID'].astype(str)\njoined['diff_SID'] = joined['SID_X'].astype(str) != joined['SID'].astype(str)\njoined[joined['diff_CID']]['GEN_NAME']",
"_____no_output_____"
],
[
"# Number of rows with different CID's that aren't Local Checks\njoined[joined['diff_CID']][joined[joined['diff_CID']]['GEN_NAME'] != 'LOCAL CHECK']['GEN_NAME']\n",
"_____no_output_____"
],
[
"# Check whether the different values have NaN for GID, so they likely weren't in the database online\njoined[joined['diff_CID']][joined[joined['diff_CID']]['GEN_NAME'] != 'LOCAL CHECK']['GID'].dropna()",
"_____no_output_____"
],
[
"# Prepare unique set of GID values for export to find COP matrix\nGID_CID_SID = pd.DataFrame(index=joined.index.values)\nGID_CID_SID['GID'] = joined['GID']\nGID_CID_SID['CID'] = joined['GID']\nGID_CID_SID['SID'] = joined['GID']\nGID_CID_SID.dropna(inplace=True,subset=['GID'])\nGID_CID_SID.drop_duplicates(inplace=True,subset=['GID'])\nGID_CID_SID.shape",
"_____no_output_____"
],
[
"GID_CID_SID.to_pickle('GID_Numbers.pkl')",
"_____no_output_____"
],
[
"data_gid_vars = ['GENO_ID','CID_X',\t'SID_X','GEN_NAME','GID']\ndata_gids = joined[data_gid_vars]\ndata_gids.head()",
"_____no_output_____"
],
[
"data_gids.shape",
"_____no_output_____"
],
[
"# A : pedigree coefficient of parentage symmetric positive semi-definite matrix\n\n# Rdata files don't seem to open properly in the Rpy2 environment\n# Use the following commands in R to convert the Rdata to a csv\n\n# A_raw = load('A_3k.RData')\n# write.csv(A_raw, 'A_matrix.csv')\n\nA = pd.read_csv('A_matrix.csv', index_col=0)\nA.head()",
"_____no_output_____"
],
[
"# Get a list of unique GID values from the data\nUnique_GID = data_gids['GID'].unique()\nlen(Unique_GID)",
"_____no_output_____"
],
[
"# Identify the GIDs which are present in the data but not the A matrix\ndata_GID_not_in_A = list(set(Unique_GID.astype(str)) - set(A.index.values.astype(str)))\ndata_GID_not_in_A",
"_____no_output_____"
],
[
"# Identify the GIDs which are present in A but not in the data\ndata_A_not_in_GID = list(set(A.index.values.astype(str)) - set(Unique_GID.astype(str)))\ndata_A_not_in_GID",
"_____no_output_____"
],
[
"Unique_GID = [ele for ele in Unique_GID if ele not in data_GID_not_in_A and str(ele) != 'nan']\nlen(Unique_GID) ",
"_____no_output_____"
],
[
"# Subset the A matrix to the list of GIDs in the data sample\nA_uniq = A[Unique_GID]\nA_uniq.reset_index(inplace=True)\nA_uniq.head()",
"_____no_output_____"
],
[
"A_uniq = A_uniq[~A_uniq['index'].isin(data_A_not_in_GID)]\nA_uniq.index = A_uniq['index'].astype(str)\nA_uniq.drop(columns='index',inplace=True)\nA_uniq.head()",
"_____no_output_____"
],
[
"A_uniq['30374']['30374'] # Should be 1",
"_____no_output_____"
],
[
"data_GID = joined[-joined.GID.isin(data_GID_not_in_A)]\ndata_GID.dropna(axis=0,subset=['GID'],inplace=True)",
"/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \n"
],
[
"data_GID.head()",
"_____no_output_____"
],
[
"len(data_GID.GID.unique())",
"_____no_output_____"
],
[
"A_uniq.to_pickle('A_matrix_unique.pkl')",
"_____no_output_____"
],
[
"data_GID.to_pickle('proc_data.pkl')\ndata_GID.shape",
"_____no_output_____"
],
[
"data_GID.to_csv('proc_data.csv')",
"_____no_output_____"
]
],
[
[
"## Create Environmentals Covariate Object",
"_____no_output_____"
]
],
[
[
"low_na_columns = data.isna().sum().sort_values(ascending=True)[0:70]",
"_____no_output_____"
],
[
"# Extract Environmental Covariates\nenv_cov_list = ['ALTITUDE',\n'PPN_10TH_MO_BEFORE_HARVESTED',\n'PPN_11TH_MO_BEFORE_HARVESTED',\n'PPN_1ST_MO_BEFORE_HARVESTED',\n'PPN_2ND_MO_BEFORE_HARVESTED',\n'PPN_3RD_MO_BEFORE_HARVESTED',\n'PPN_4TH_MO_BEFORE_HARVESTED',\n'PPN_5TH_MO_BEFORE_HARVESTED',\n'PPN_6TH_MO_BEFORE_HARVESTED',\n'PPN_7TH_MO_BEFORE_HARVESTED',\n'PPN_8TH_MO_BEFORE_HARVESTED',\n'PPN_9TH_MO_BEFORE_HARVESTED',\n'PPN_MONTH_OF_HARVESTED',\n'PRECIPITATION_FROM_SOWING_TO_MATURITY',\n'TOTAL_PRECIPIT_IN_12_MONTHS',\n'IRRIGATED',\n'SOIL_ALUMINIUM_TOXICITY',\n'OTHER_MICRONUTRIENT_TOXICITY/DEFICIENCY_Y/N',\n'LENGTH_OF_ROWS_SOWN',\n'SPACE_BTN_ROWS_SOWN',\n'NO_OF_ROWS_SOWN',\n'LENGTH_OF_ROWS_HARVESTED',\n'NO_OF_ROWS_HARVESTED',\n'CROP_STAND_OR_DENSITY',\n'WEED_PROBLEM', \n'BIRD_DAMAGE', \n'INSECT_DAMAGE',\n'FERTILIZER_APPLIED', \n'FOLIAR_DISEASE_DEVELOPMENT',\n'LODGING',\n'ROOT_DISEASE_DEVELOPMENT', \n'HAIL_DAMAGE', \n'FROST_DAMAGE_SPIKE'\n]\n\nW = pd.DataFrame(data_GID[env_cov_list],index=data_GID.index)\n\nW.dtypes\n\n# dmy = dummyVars(\" ~ .\", data = W)\n# W = data.frame(predict(dmy, newdata = W))\n\n# str(W)",
"_____no_output_____"
],
[
"# Need a better way to identify factors and automatically convert dtypes appropriately",
"_____no_output_____"
],
[
"W_factors = ['IRRIGATED',\n 'SOIL_ALUMINIUM_TOXICITY',\n 'OTHER_MICRONUTRIENT_TOXICITY/DEFICIENCY_Y/N',\n 'BIRD_DAMAGE',\n 'CROP_STAND_OR_DENSITY',\n 'WEED_PROBLEM',\n 'INSECT_DAMAGE',\n 'FERTILIZER_APPLIED',\n 'FOLIAR_DISEASE_DEVELOPMENT',\n 'LODGING',\n 'ROOT_DISEASE_DEVELOPMENT', \n 'HAIL_DAMAGE', \n 'FROST_DAMAGE_SPIKE'\n ]",
"_____no_output_____"
],
[
"W[W_factors] = W[W_factors].astype('category')",
"_____no_output_____"
],
[
"W_numeric = [x for x in env_cov_list if x not in W_factors]",
"_____no_output_____"
],
[
"for variable in W_numeric :\n W[variable] = pd.to_numeric(W[variable], errors='coerce')\nW.dtypes",
"_____no_output_____"
],
[
"!pip install missingno",
"_____no_output_____"
],
[
"import missingno\n# https://www.geeksforgeeks.org/python-visualize-missing-values-nan-values-using-missingno-library/\n# missing data plot, where white is missing\nmissingno.matrix(W, sparkline=False)",
"_____no_output_____"
],
[
"missingno.bar(W, sort='ascending')",
"_____no_output_____"
],
[
"W.to_pickle('W_pre_impute.pkl')",
"_____no_output_____"
]
],
[
[
"## Imputation of Remaining Missing Data",
"_____no_output_____"
]
],
[
[
"# Random Forest MissForest Imputation with MissingPy\n# https://pypi.org/project/missingpy/\n\n!pip install missingpy",
"Collecting missingpy\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/b5/be/998d04d27054b58f0974b5f09f8457778a0a72d4355e0b7ae877b6cfb850/missingpy-0.2.0-py3-none-any.whl (49kB)\n\r\u001b[K |██████▊ | 10kB 19.8MB/s eta 0:00:01\r\u001b[K |█████████████▍ | 20kB 3.1MB/s eta 0:00:01\r\u001b[K |████████████████████ | 30kB 4.0MB/s eta 0:00:01\r\u001b[K |██████████████████████████▊ | 40kB 3.0MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 51kB 2.6MB/s \n\u001b[?25hInstalling collected packages: missingpy\nSuccessfully installed missingpy-0.2.0\n"
],
[
"#W = pd.read_csv('W_pre_impute.csv', index_col=0)\nW[W_factors] = W[W_factors].astype('category')\nW.dtypes",
"_____no_output_____"
],
[
"for col in W_factors :\n W[col] = W[col].str.upper()",
"_____no_output_____"
],
[
"W_dummies = pd.get_dummies(W[W_factors]) # Create dummy variables for each categorical variable\nW = pd.concat([W[W_numeric],W_dummies],axis=1,) # Concatenate dummy variables onto dataframe of numeric variables\nW.head()",
"_____no_output_____"
],
[
"W_dummy_col_num = [W.columns.get_loc(c) for c in W_dummies.columns if c in W]",
"_____no_output_____"
],
[
"W.shape",
"_____no_output_____"
],
[
"# https://pypi.org/project/missingpy/\nfrom missingpy import MissForest\n\nimputer = MissForest()\nW_imp = imputer.fit_transform(W,cat_vars=W_dummy_col_num)",
"/usr/local/lib/python3.6/dist-packages/missingpy/missforest.py:407: RuntimeWarning: invalid value encountered in long_scalars\n (Ximp[:, self.cat_vars_] != Ximp_old[:, self.cat_vars_])) / n_catmissing\n"
],
[
"W_imp = pd.DataFrame(W_imp, columns=W.columns)\nW_imp.index = W.index\nW_imp.head()",
"_____no_output_____"
],
[
"W_imp.to_pickle('W_imp.pkl')",
"_____no_output_____"
]
],
[
[
"## Basic Visualizations",
"_____no_output_____"
]
],
[
[
"data['YLD_NM'] = pd.to_numeric(data['GRAIN_YIELD'], errors='coerce')\ndata['YLD_NM'].plot.kde()",
"_____no_output_____"
],
[
"# https://python-graph-gallery.com/74-density-plot-of-several-variables/\n# https://stackoverflow.com/questions/45201514/edit-seaborn-legend\n\n# plot of 2 variables\np1=sns.kdeplot(data[data['TRIAL_TYPE']=='ESWYT']['YLD_NM'], shade=False, color=\"b\")\np1=sns.kdeplot(data[data['TRIAL_TYPE']=='HTWYT']['YLD_NM'], shade=False, color=\"y\")\np1=sns.kdeplot(data[data['TRIAL_TYPE']=='HRWYT']['YLD_NM'], shade=False, color=\"g\")\np1=sns.kdeplot(data[data['TRIAL_TYPE']=='SAWYT']['YLD_NM'], shade=False, color=\"r\")\n\n\nplt.legend(title='Trial Type', loc='upper right', labels=['ESWYT','HTWYT', 'HRWYT','SAWYT'])\np1.set_xlabel('Grain Yield (Tons per Hectare)')\np1.set_ylabel('Density')\np1.set_title('Density Distribution of Grain Yield by Trial Type')\nplt.show(p1)",
"_____no_output_____"
],
[
"fig_dims = (3, 4)\nfig, ax = plt.subplots(figsize=fig_dims)\nsns.boxplot(y = data['YLD_NM'], ax = ax)",
"_____no_output_____"
],
[
"\nax = sns.boxplot(x = 'TRIAL_TYPE', y = 'YLD_NM', data = data)\nax.set_ylabel('Grain Yield')\nax.set_xlabel('Trial Type')\nax.set_title('Grain Yield by Trial Type')",
"_____no_output_____"
],
[
"# https://python-graph-gallery.com/134-how-to-avoid-overplotting-with-python/\n\n# Sample 5000 random lines\ndf_sample=data.sample(5000)\n \n# Make the plot with this subset\nplt.plot( 'ALTITUDE', 'YLD_NM', data=df_sample, linestyle='', marker='o', markersize=0.7)\n \n# titles\nplt.xlabel('Altitude (Meters)')\nplt.ylabel('Grain Yield (Tons per Hectare)')\nplt.title('Scatterplot of Grain Yield and Altitude', loc='center')\n",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
]
|
ec64d05369ac394e2b8bbeb7f0e0c498fba87ecc | 21,987 | ipynb | Jupyter Notebook | colormaps.ipynb | MRYingLEE/Deep-Learning-Project | 21c0a92274ed605776f960b385826bb88adc5a23 | [
"MIT"
]
| null | null | null | colormaps.ipynb | MRYingLEE/Deep-Learning-Project | 21c0a92274ed605776f960b385826bb88adc5a23 | [
"MIT"
]
| null | null | null | colormaps.ipynb | MRYingLEE/Deep-Learning-Project | 21c0a92274ed605776f960b385826bb88adc5a23 | [
"MIT"
]
| null | null | null | 115.115183 | 3,540 | 0.653386 | [
[
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"\n***********************\nColormaps in Matplotlib\n***********************\n\nHow (and why) to choose a particular colormap.\n\nOverview\n========\n\nThe idea behind choosing a good colormap is to find a good representation in 3D\ncolorspace for your data set. The best colormap for any given data set depends\non many things including:\n\n- Whether representing form or metric data ([Ware]_)\n\n- Your knowledge of the data set (*e.g.*, is there a critical value\n from which the other values deviate?)\n\n- If there is an intuitive color scheme for the parameter you are plotting\n\n- If there is a standard in the field the audience may be expecting\n\nFor many applications, a perceptually uniform colormap is the best\nchoice --- one in which equal steps in data are perceived as equal\nsteps in the color space. Researchers have found that the human brain\nperceives changes in the lightness parameter as changes in the data\nmuch better than, for example, changes in hue. Therefore, colormaps\nwhich have monotonically increasing lightness through the colormap\nwill be better interpreted by the viewer. A wonderful example of\nperceptually uniform colormaps is [colorcet]_.\n\nColor can be represented in 3D space in various ways. One way to represent color\nis using CIELAB. In CIELAB, color space is represented by lightness,\n$L^*$; red-green, $a^*$; and yellow-blue, $b^*$. The lightness\nparameter $L^*$ can then be used to learn more about how the matplotlib\ncolormaps will be perceived by viewers.\n\nAn excellent starting resource for learning about human perception of colormaps\nis from [IBM]_.\n\n\nClasses of colormaps\n====================\n\nColormaps are often split into several categories based on their function (see,\n*e.g.*, [Moreland]_):\n\n1. Sequential: change in lightness and often saturation of color\n incrementally, often using a single hue; should be used for\n representing information that has ordering.\n\n2. Diverging: change in lightness and possibly saturation of two\n different colors that meet in the middle at an unsaturated color;\n should be used when the information being plotted has a critical\n middle value, such as topography or when the data deviates around\n zero.\n\n3. Cyclic: change in lightness of two different colors that meet in\n the middle and beginning/end at an unsaturated color; should be\n used for values that wrap around at the endpoints, such as phase\n angle, wind direction, or time of day.\n\n4. Qualitative: often are miscellaneous colors; should be used to\n represent information which does not have ordering or\n relationships.\n\n",
"_____no_output_____"
]
],
[
[
"# sphinx_gallery_thumbnail_number = 2\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom colorspacious import cspace_converter\nfrom collections import OrderedDict\n\ncmaps = OrderedDict()",
"_____no_output_____"
]
],
[
[
"Sequential\n----------\n\nFor the Sequential plots, the lightness value increases monotonically through\nthe colormaps. This is good. Some of the $L^*$ values in the colormaps\nspan from 0 to 100 (binary and the other grayscale), and others start around\n$L^*=20$. Those that have a smaller range of $L^*$ will accordingly\nhave a smaller perceptual range. Note also that the $L^*$ function varies\namongst the colormaps: some are approximately linear in $L^*$ and others\nare more curved.\n\n",
"_____no_output_____"
]
],
[
[
"cmaps['Perceptually Uniform Sequential'] = [\n 'viridis', 'plasma', 'inferno', 'magma', 'cividis']\n\ncmaps['Sequential'] = [\n 'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',\n 'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',\n 'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']",
"_____no_output_____"
]
],
[
[
"Sequential2\n-----------\n\nMany of the $L^*$ values from the Sequential2 plots are monotonically\nincreasing, but some (autumn, cool, spring, and winter) plateau or even go both\nup and down in $L^*$ space. Others (afmhot, copper, gist_heat, and hot)\nhave kinks in the $L^*$ functions. Data that is being represented in a\nregion of the colormap that is at a plateau or kink will lead to a perception of\nbanding of the data in those values in the colormap (see [mycarta-banding]_ for\nan excellent example of this).\n\n",
"_____no_output_____"
]
],
[
[
"cmaps['Sequential (2)'] = [\n 'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink',\n 'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia',\n 'hot', 'afmhot', 'gist_heat', 'copper']",
"_____no_output_____"
]
],
[
[
"Diverging\n---------\n\nFor the Diverging maps, we want to have monotonically increasing $L^*$\nvalues up to a maximum, which should be close to $L^*=100$, followed by\nmonotonically decreasing $L^*$ values. We are looking for approximately\nequal minimum $L^*$ values at opposite ends of the colormap. By these\nmeasures, BrBG and RdBu are good options. coolwarm is a good option, but it\ndoesn't span a wide range of $L^*$ values (see grayscale section below).\n\n",
"_____no_output_____"
]
],
[
[
"cmaps['Diverging'] = [\n 'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',\n 'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic']",
"_____no_output_____"
]
],
[
[
"Cyclic\n------\n\nFor Cyclic maps, we want to start and end on the same color, and meet a\nsymmetric center point in the middle. $L^*$ should change monotonically\nfrom start to middle, and inversely from middle to end. It should be symmetric\non the increasing and decreasing side, and only differ in hue. At the ends and\nmiddle, $L^*$ will reverse direction, which should be smoothed in\n$L^*$ space to reduce artifacts. See [kovesi-colormaps]_ for more\ninformation on the design of cyclic maps.\n\nThe often-used HSV colormap is included in this set of colormaps, although it\nis not symmetric to a center point. Additionally, the $L^*$ values vary\nwidely throughout the colormap, making it a poor choice for representing data\nfor viewers to see perceptually. See an extension on this idea at\n[mycarta-jet]_.\n\n",
"_____no_output_____"
]
],
[
[
"cmaps['Cyclic'] = ['twilight', 'twilight_shifted', 'hsv']",
"_____no_output_____"
]
],
[
[
"Qualitative\n-----------\n\nQualitative colormaps are not aimed at being perceptual maps, but looking at the\nlightness parameter can verify that for us. The $L^*$ values move all over\nthe place throughout the colormap, and are clearly not monotonically increasing.\nThese would not be good options for use as perceptual colormaps.\n\n",
"_____no_output_____"
]
],
[
[
"cmaps['Qualitative'] = ['Pastel1', 'Pastel2', 'Paired', 'Accent',\n 'Dark2', 'Set1', 'Set2', 'Set3',\n 'tab10', 'tab20', 'tab20b', 'tab20c']",
"_____no_output_____"
]
],
[
[
"Miscellaneous\n-------------\n\nSome of the miscellaneous colormaps have particular uses for which\nthey have been created. For example, gist_earth, ocean, and terrain\nall seem to be created for plotting topography (green/brown) and water\ndepths (blue) together. We would expect to see a divergence in these\ncolormaps, then, but multiple kinks may not be ideal, such as in\ngist_earth and terrain. CMRmap was created to convert well to\ngrayscale, though it does appear to have some small kinks in\n$L^*$. cubehelix was created to vary smoothly in both lightness\nand hue, but appears to have a small hump in the green hue area.\n\nThe often-used jet colormap is included in this set of colormaps. We can see\nthat the $L^*$ values vary widely throughout the colormap, making it a\npoor choice for representing data for viewers to see perceptually. See an\nextension on this idea at [mycarta-jet]_.\n\n",
"_____no_output_____"
]
],
[
[
"cmaps['Miscellaneous'] = [\n 'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern',\n 'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'brg',\n 'gist_rainbow', 'rainbow', 'jet', 'nipy_spectral', 'gist_ncar']",
"_____no_output_____"
]
],
[
[
"\nFirst, we'll show the range of each colormap. Note that some seem\nto change more \"quickly\" than others.\n\n",
"_____no_output_____"
]
],
[
[
"nrows = max(len(cmap_list) for cmap_category, cmap_list in cmaps.items())\ngradient = np.linspace(0, 1, 256)\ngradient = np.vstack((gradient, gradient))\n\n\ndef plot_color_gradients(cmap_category, cmap_list, nrows):\n fig, axes = plt.subplots(nrows=nrows)\n fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99)\n axes[0].set_title(cmap_category + ' colormaps', fontsize=14)\n\n for ax, name in zip(axes, cmap_list):\n ax.imshow(gradient, aspect='auto', cmap=plt.get_cmap(name))\n pos = list(ax.get_position().bounds)\n x_text = pos[0] - 0.01\n y_text = pos[1] + pos[3]/2.\n fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10)\n\n # Turn off *all* ticks & spines, not just the ones with colormaps.\n for ax in axes:\n ax.set_axis_off()\n\n\nfor cmap_category, cmap_list in cmaps.items():\n plot_color_gradients(cmap_category, cmap_list, nrows)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"Lightness of matplotlib colormaps\n=================================\n\nHere we examine the lightness values of the matplotlib colormaps.\nNote that some documentation on the colormaps is available\n([list-colormaps]_).\n\n",
"_____no_output_____"
]
],
[
[
"mpl.rcParams.update({'font.size': 12})\n\n# Number of colormap per subplot for particular cmap categories\n_DSUBS = {'Perceptually Uniform Sequential': 5, 'Sequential': 6,\n 'Sequential (2)': 6, 'Diverging': 6, 'Cyclic': 3,\n 'Qualitative': 4, 'Miscellaneous': 6}\n\n# Spacing between the colormaps of a subplot\n_DC = {'Perceptually Uniform Sequential': 1.4, 'Sequential': 0.7,\n 'Sequential (2)': 1.4, 'Diverging': 1.4, 'Cyclic': 1.4,\n 'Qualitative': 1.4, 'Miscellaneous': 1.4}\n\n# Indices to step through colormap\nx = np.linspace(0.0, 1.0, 100)\n\n# Do plot\nfor cmap_category, cmap_list in cmaps.items():\n\n # Do subplots so that colormaps have enough space.\n # Default is 6 colormaps per subplot.\n dsub = _DSUBS.get(cmap_category, 6)\n nsubplots = int(np.ceil(len(cmap_list) / dsub))\n\n # squeeze=False to handle similarly the case of a single subplot\n fig, axes = plt.subplots(nrows=nsubplots, squeeze=False,\n figsize=(7, 2.6*nsubplots))\n\n for i, ax in enumerate(axes.flat):\n\n locs = [] # locations for text labels\n\n for j, cmap in enumerate(cmap_list[i*dsub:(i+1)*dsub]):\n\n # Get RGB values for colormap and convert the colormap in\n # CAM02-UCS colorspace. lab[0, :, 0] is the lightness.\n rgb = cm.get_cmap(cmap)(x)[np.newaxis, :, :3]\n lab = cspace_converter(\"sRGB1\", \"CAM02-UCS\")(rgb)\n\n # Plot colormap L values. Do separately for each category\n # so each plot can be pretty. To make scatter markers change\n # color along plot:\n # http://stackoverflow.com/questions/8202605/matplotlib-scatterplot-colour-as-a-function-of-a-third-variable\n\n if cmap_category == 'Sequential':\n # These colormaps all start at high lightness but we want them\n # reversed to look nice in the plot, so reverse the order.\n y_ = lab[0, ::-1, 0]\n c_ = x[::-1]\n else:\n y_ = lab[0, :, 0]\n c_ = x\n\n dc = _DC.get(cmap_category, 1.4) # cmaps horizontal spacing\n ax.scatter(x + j*dc, y_, c=c_, cmap=cmap, s=300, linewidths=0.0)\n\n # Store locations for colormap labels\n if cmap_category in ('Perceptually Uniform Sequential',\n 'Sequential'):\n locs.append(x[-1] + j*dc)\n elif cmap_category in ('Diverging', 'Qualitative', 'Cyclic',\n 'Miscellaneous', 'Sequential (2)'):\n locs.append(x[int(x.size/2.)] + j*dc)\n\n # Set up the axis limits:\n # * the 1st subplot is used as a reference for the x-axis limits\n # * lightness values goes from 0 to 100 (y-axis limits)\n ax.set_xlim(axes[0, 0].get_xlim())\n ax.set_ylim(0.0, 100.0)\n\n # Set up labels for colormaps\n ax.xaxis.set_ticks_position('top')\n ticker = mpl.ticker.FixedLocator(locs)\n ax.xaxis.set_major_locator(ticker)\n formatter = mpl.ticker.FixedFormatter(cmap_list[i*dsub:(i+1)*dsub])\n ax.xaxis.set_major_formatter(formatter)\n ax.xaxis.set_tick_params(rotation=50)\n\n ax.set_xlabel(cmap_category + ' colormaps', fontsize=14)\n fig.text(0.0, 0.55, 'Lightness $L^*$', fontsize=12,\n transform=fig.transFigure, rotation=90)\n\n fig.tight_layout(h_pad=0.0, pad=1.5)\n plt.show()",
"_____no_output_____"
]
],
[
[
"Grayscale conversion\n====================\n\nIt is important to pay attention to conversion to grayscale for color\nplots, since they may be printed on black and white printers. If not\ncarefully considered, your readers may end up with indecipherable\nplots because the grayscale changes unpredictably through the\ncolormap.\n\nConversion to grayscale is done in many different ways [bw]_. Some of the better\nones use a linear combination of the rgb values of a pixel, but weighted\naccording to how we perceive color intensity. A nonlinear method of conversion\nto grayscale is to use the $L^*$ values of the pixels. In general, similar\nprinciples apply for this question as they do for presenting one's information\nperceptually; that is, if a colormap is chosen that is monotonically increasing\nin $L^*$ values, it will print in a reasonable manner to grayscale.\n\nWith this in mind, we see that the Sequential colormaps have reasonable\nrepresentations in grayscale. Some of the Sequential2 colormaps have decent\nenough grayscale representations, though some (autumn, spring, summer, winter)\nhave very little grayscale change. If a colormap like this was used in a plot\nand then the plot was printed to grayscale, a lot of the information may map to\nthe same gray values. The Diverging colormaps mostly vary from darker gray on\nthe outer edges to white in the middle. Some (PuOr and seismic) have noticeably\ndarker gray on one side than the other and therefore are not very symmetric.\ncoolwarm has little range of gray scale and would print to a more uniform plot,\nlosing a lot of detail. Note that overlaid, labeled contours could help\ndifferentiate between one side of the colormap vs. the other since color cannot\nbe used once a plot is printed to grayscale. Many of the Qualitative and\nMiscellaneous colormaps, such as Accent, hsv, and jet, change from darker to\nlighter and back to darker gray throughout the colormap. This would make it\nimpossible for a viewer to interpret the information in a plot once it is\nprinted in grayscale.\n\n",
"_____no_output_____"
]
],
[
[
"mpl.rcParams.update({'font.size': 14})\n\n# Indices to step through colormap.\nx = np.linspace(0.0, 1.0, 100)\n\ngradient = np.linspace(0, 1, 256)\ngradient = np.vstack((gradient, gradient))\n\n\ndef plot_color_gradients(cmap_category, cmap_list):\n fig, axes = plt.subplots(nrows=len(cmap_list), ncols=2)\n fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99,\n wspace=0.05)\n fig.suptitle(cmap_category + ' colormaps', fontsize=14, y=1.0, x=0.6)\n\n for ax, name in zip(axes, cmap_list):\n\n # Get RGB values for colormap.\n rgb = cm.get_cmap(plt.get_cmap(name))(x)[np.newaxis, :, :3]\n\n # Get colormap in CAM02-UCS colorspace. We want the lightness.\n lab = cspace_converter(\"sRGB1\", \"CAM02-UCS\")(rgb)\n L = lab[0, :, 0]\n L = np.float32(np.vstack((L, L, L)))\n\n ax[0].imshow(gradient, aspect='auto', cmap=plt.get_cmap(name))\n ax[1].imshow(L, aspect='auto', cmap='binary_r', vmin=0., vmax=100.)\n pos = list(ax[0].get_position().bounds)\n x_text = pos[0] - 0.01\n y_text = pos[1] + pos[3]/2.\n fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10)\n\n # Turn off *all* ticks & spines, not just the ones with colormaps.\n for ax in axes.flat:\n ax.set_axis_off()\n\n plt.show()\n\n\nfor cmap_category, cmap_list in cmaps.items():\n\n plot_color_gradients(cmap_category, cmap_list)",
"_____no_output_____"
]
],
[
[
"Color vision deficiencies\n=========================\n\nThere is a lot of information available about color blindness (*e.g.*,\n[colorblindness]_). Additionally, there are tools available to convert images to\nhow they look for different types of color vision deficiencies (*e.g.*,\n[vischeck]_).\n\nThe most common form of color vision deficiency involves differentiating between\nred and green. Thus, avoiding colormaps with both red and green will avoid many\nproblems in general.\n\n\nReferences\n==========\n\n.. [colorcet] https://github.com/bokeh/colorcet\n.. [Ware] http://ccom.unh.edu/sites/default/files/publications/Ware_1988_CGA_Color_sequences_univariate_maps.pdf\n.. [Moreland] http://www.kennethmoreland.com/color-maps/ColorMapsExpanded.pdf\n.. [list-colormaps] https://gist.github.com/endolith/2719900#id7\n.. [mycarta-banding] https://mycarta.wordpress.com/2012/10/14/the-rainbow-is-deadlong-live-the-rainbow-part-4-cie-lab-heated-body/\n.. [mycarta-jet] https://mycarta.wordpress.com/2012/10/06/the-rainbow-is-deadlong-live-the-rainbow-part-3/\n.. [kovesi-colormaps] https://arxiv.org/abs/1509.03700\n.. [bw] http://www.tannerhelland.com/3643/grayscale-image-algorithm-vb6/\n.. [colorblindness] http://www.color-blindness.com/\n.. [vischeck] http://www.vischeck.com/vischeck/\n.. [IBM] http://www.research.ibm.com/people/l/lloydt/color/color.HTM\n\n",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
]
|
ec64d8fc256e768acc1a35adf9577eb7c46f4953 | 415,084 | ipynb | Jupyter Notebook | doc/source/ipynb/lprec.ipynb | aps-7bm/tomopy | ccf2cfa2df1aba8987e7a8827ac360d0b5534c55 | [
"BSD-3-Clause"
]
| 229 | 2015-02-13T08:10:58.000Z | 2022-03-30T00:19:32.000Z | doc/source/ipynb/lprec.ipynb | aps-7bm/tomopy | ccf2cfa2df1aba8987e7a8827ac360d0b5534c55 | [
"BSD-3-Clause"
]
| 459 | 2015-01-02T16:33:23.000Z | 2022-03-29T05:30:11.000Z | doc/source/ipynb/lprec.ipynb | aps-7bm/tomopy | ccf2cfa2df1aba8987e7a8827ac360d0b5534c55 | [
"BSD-3-Clause"
]
| 272 | 2015-01-11T13:59:58.000Z | 2022-03-06T10:22:15.000Z | 983.611374 | 79,160 | 0.956546 | [
[
[
"\n# TomoPy with LPrec\n\nHere is an example on how to use the [log-polar based method](https://github.com/math-vrn/lprec) for reconstruction with TomoPy.\n\nTo reconstruct the image with the LPrec instead of TomoPy, change the ``algorithm`` keyword to ``tomopy.lprec``. Specify which LPrec algorithm to reconstruct with the ``lpmethod`` keyword.",
"_____no_output_____"
],
[
"These two cells are an abbreviated setup for [Reconstruction with TomoPy](tomopy.rst).",
"_____no_output_____"
]
],
[
[
"import dxchange\nimport matplotlib.pyplot as plt\nimport tomopy",
"_____no_output_____"
],
[
"proj, flat, dark, theta = dxchange.read_aps_32id(\n fname='../../../source/tomopy/data/tooth.h5',\n sino=(0, 2),\n)\nproj = tomopy.normalize(proj, flat, dark)\nrot_center = 296",
"_____no_output_____"
]
],
[
[
"Note that with LPrec, there can be no negative values after the transmission tomography linearization:",
"_____no_output_____"
]
],
[
[
"proj = tomopy.minus_log(proj)\nproj[proj < 0] = 0 # no values less than zero with lprec",
"_____no_output_____"
]
],
[
[
"Reconstruction using FBP method with the log-polar coordinates.\n$$ \\hat{f}=\\mathcal{W}\\mathcal{R}^* g $$",
"_____no_output_____"
]
],
[
[
"recon = tomopy.recon(proj,\n theta,\n center=rot_center,\n algorithm=tomopy.lprec,\n lpmethod='fbp',\n filter_name='parzen')\nrecon = tomopy.circ_mask(recon, axis=0, ratio=0.95)\nplt.imshow(recon[0, :, :])\nplt.show()",
"Reconstructing 48 slice groups with 2 master threads...\n"
]
],
[
[
"Reconstruction using the gradient descent method with the log-polar coordinates.\n$$ \\hat{f} = \\text{argmin}_f\\lVert\\mathcal{R}f-g \\rVert_2^2 $$",
"_____no_output_____"
]
],
[
[
"recon = tomopy.recon(proj,\n theta,\n center=rot_center,\n algorithm=tomopy.lprec,\n lpmethod='grad',\n ncore=1,\n num_iter=64,\n reg_par=-1)\nrecon = tomopy.circ_mask(recon, axis=0, ratio=0.95)\nplt.imshow(recon[0, :, :])\nplt.show()",
"Reconstructing 1 slice groups with 1 master threads...\n"
]
],
[
[
"Reconstruction using the conjugate gradient method with the log-polar coordinates.\n$$ \\hat{f} = \\text{argmin}_f\\lVert\\mathcal{R}f-g \\rVert_2^2 $$",
"_____no_output_____"
]
],
[
[
"recon = tomopy.recon(proj,\n theta,\n center=rot_center,\n algorithm=tomopy.lprec,\n lpmethod='cg',\n ncore=1,\n num_iter=16,\n reg_par=-1)\nrecon = tomopy.circ_mask(recon, axis=0, ratio=0.95)\nplt.imshow(recon[0, :, :])\nplt.show()",
"Reconstructing 1 slice groups with 1 master threads...\n"
]
],
[
[
"Reconstruction using the TV method with the log-polar coordinates. It gives piecewise constant reconstructions and can be used for denoising.\n$$ \\hat{f} = \\text{argmin}_f\\lVert\\mathcal{R}f-g \\rVert_2^2 + \\lambda \\lVert\\nabla f\\rVert_1 $$",
"_____no_output_____"
]
],
[
[
"recon = tomopy.recon(proj,\n theta,\n center=rot_center,\n algorithm=tomopy.lprec,\n lpmethod='tv',\n ncore=1,\n num_iter=512,\n reg_par=5e-4)\nrecon = tomopy.circ_mask(recon, axis=0, ratio=0.95)\nplt.imshow(recon[0, :, :])\nplt.show()",
"Reconstructing 1 slice groups with 1 master threads...\n"
]
],
[
[
"Reconstruction using the TV-entropy method with the log-polar coordinates. It can be used for suppressing Poisson noise.\n$$ \\hat{f} = \\text{argmin}_f \\lambda \\lVert\\nabla f\\rVert_1+\\int_\\Omega\\mathcal{R}f-g\\log(\\mathcal{R}f)df $$",
"_____no_output_____"
]
],
[
[
"recon = tomopy.recon(proj,\n theta,\n center=rot_center,\n algorithm=tomopy.lprec,\n lpmethod='tve',\n ncore=1,\n num_iter=512,\n reg_par=2e-4)\nrecon = tomopy.circ_mask(recon, axis=0, ratio=0.95)\nplt.imshow(recon[0, :, :])\nplt.show()",
"Reconstructing 1 slice groups with 1 master threads...\n"
]
],
[
[
"Reconstruction using the TV-l1 method with the log-polar coordinates. It can be used to remove\nstructures of an image of a certain scale, and the regularization parameter $\\lambda$ can be used for scale selection. \n$$ \\hat{f} = \\text{argmin}_f\\lVert\\mathcal{R}f-g \\rVert_1 + \\lambda \\lVert\\nabla f\\rVert_1 $$\n",
"_____no_output_____"
]
],
[
[
"recon = tomopy.recon(proj,\n theta,\n center=rot_center,\n algorithm=tomopy.lprec,\n lpmethod='tvl1',\n ncore=1,\n num_iter=512,\n reg_par=3e-2)\nrecon = tomopy.circ_mask(recon, axis=0, ratio=0.95)\nplt.imshow(recon[0, :, :])\nplt.show()",
"Reconstructing 1 slice groups with 1 master threads...\n"
]
],
[
[
"Reconstruction using the MLEM method with the log-polar coordinates.",
"_____no_output_____"
]
],
[
[
"recon = tomopy.recon(proj,\n theta,\n center=rot_center,\n algorithm=tomopy.lprec,\n lpmethod='em',\n ncore=1,\n num_iter=64,\n reg_par=0.05)\nrecon = tomopy.circ_mask(recon, axis=0, ratio=0.95)\nplt.imshow(recon[0, :, :])\nplt.show()",
"Reconstructing 1 slice groups with 1 master threads...\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec64e72ba54a1d6b2db8d0fd10215c72b7fbb559 | 84,743 | ipynb | Jupyter Notebook | ETL.ipynb | Melow49/Youtube-US-comment-reviews-Project | 85d2427b5a7939a9a5b3097c668c060cadd01d6a | [
"MIT"
]
| null | null | null | ETL.ipynb | Melow49/Youtube-US-comment-reviews-Project | 85d2427b5a7939a9a5b3097c668c060cadd01d6a | [
"MIT"
]
| null | null | null | ETL.ipynb | Melow49/Youtube-US-comment-reviews-Project | 85d2427b5a7939a9a5b3097c668c060cadd01d6a | [
"MIT"
]
| null | null | null | 43.52491 | 13,936 | 0.497268 | [
[
[
"import pandas as pd\nfrom sqlalchemy import create_engine\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nplt.style.use('ggplot')",
"_____no_output_____"
],
[
"#open videos csv file\n\nvideos = pd.read_csv('./Resources/USvideos.csv',error_bad_lines=False)\n\nvideos.head()",
"b'Skipping line 2401: expected 11 fields, saw 21\\nSkipping line 2800: expected 11 fields, saw 21\\nSkipping line 5297: expected 11 fields, saw 12\\nSkipping line 5299: expected 11 fields, saw 12\\nSkipping line 5300: expected 11 fields, saw 12\\nSkipping line 5301: expected 11 fields, saw 12\\n'\n"
],
[
"#open comments file \ncomments = pd.read_csv('./Resources/UScomments.csv', error_bad_lines=False)\ncomments.head()",
"b'Skipping line 41589: expected 4 fields, saw 11\\nSkipping line 51628: expected 4 fields, saw 7\\nSkipping line 114465: expected 4 fields, saw 5\\n'\nb'Skipping line 142496: expected 4 fields, saw 8\\nSkipping line 189732: expected 4 fields, saw 6\\nSkipping line 245218: expected 4 fields, saw 7\\n'\nb'Skipping line 388430: expected 4 fields, saw 5\\n'\n/Users/miladhazrati/anaconda3/lib/python3.7/site-packages/IPython/core/interactiveshell.py:2785: DtypeWarning: Columns (2,3) have mixed types. Specify dtype option on import or set low_memory=False.\n interactivity=interactivity, compiler=compiler, result=result)\n"
],
[
"videos['category_id'] = videos['category_id'].astype(int)\nvideos.head()",
"_____no_output_____"
],
[
"#total comments by us customers\ntotal_comment= videos[\"comment_total\"].max()\ntotal_comment",
"_____no_output_____"
],
[
"#merging both file into one \nyoutube_df = pd.merge(videos, comments, on=\"video_id\")\nyoutube_df.head()",
"_____no_output_____"
],
[
"#convert the date in to regular format\n\nyoutube_df['date'] = pd.to_datetime(youtube_df.date, format=\"%d.%m\")\nyoutube_df.head()",
"_____no_output_____"
],
[
"#rename all the column names\n\nyoutube_df = youtube_df.rename(columns={\"video_id\":\"video_id\",\n \"title\": \"title\",\n \"channel_title\": \"channel\",\n \"category_id\": \"category\",\n \"tags\": \"tags\",\n \"views\": \"views\",\n \"likes_x\":\"likes\",\n \"dislikes\": \"dislike\",\n \"comment_total\": \"total-comment\",\n \"thumbnail_link\": \"thumbnail-links\",\n \"date\": \"date\",\n \"comment_text\": \"text-comments\",\n \"likes_y\": \"likes-y\",\n \"replies\": \"replies\"\n })\nyoutube_df.head()\n",
"_____no_output_____"
],
[
"USVideo_Likes = youtube_df.sort_values('likes', ascending=False).drop_duplicates('video_id').sort_index()\nUSVideo_Likes.groupby('category').likes.mean().plot(kind=\"bar\")\nprint(USVideo_Likes.groupby('category').likes.mean())\nplt.title(\"Average Likes per Category\")\nplt.show()",
"category\n1 22582.793814\n2 13033.714286\n10 68134.164134\n15 16028.432432\n17 12280.625899\n19 12025.294118\n20 21085.333333\n22 33875.609756\n23 56468.828571\n24 29488.078603\n25 4429.817143\n26 29031.834615\n27 17404.887640\n28 18632.423611\n29 411.000000\n43 114.000000\nName: likes, dtype: float64\n"
],
[
"#save it to output folder\nyoutube_df.to_csv(\"./Output/youtube.csv\")",
"_____no_output_____"
],
[
"#creating csv file 1\n\ntable_1df= youtube_df[[\"video_id\",\"title\",\"channel\",\"likes\",\"dislike\",\"category\"]].copy()\ntable_1df.head()",
"_____no_output_____"
],
[
"#save it to output folder\n\ntable_1df.to_csv(\"./Output/table_1df.csv\")",
"_____no_output_____"
],
[
"#creating csv file 2 for comments, tags and number of views\ntable_2df = youtube_df[[\"category\", \"video_id\",\"total-comment\",\"thumbnail-links\", \"views\", \"tags\", \"text-comments\"]].copy()\ntable_2df.head()",
"_____no_output_____"
],
[
"\ntable_2df.to_csv(\"./Output/table_2df.csv\")",
"_____no_output_____"
],
[
"#separate data set for categories and number of likes\n\ntable_3df = youtube_df[[\"category\", \"text-comments\",\"likes-y\", \"replies\",\"date\", \"likes\"]].copy()\ntable_3df.head()",
"_____no_output_____"
],
[
"#save it to output folder\n\ntable_3df.to_csv(\"./Output/table_3df.csv\")",
"_____no_output_____"
],
[
"#connect to pdadmin \n\nrds_connection_string = \"postgres:[email protected]/etl-db\"\nengine = create_engine(f'postgres://{rds_connection_string}')",
"/Users/miladhazrati/anaconda3/lib/python3.7/site-packages/psycopg2/__init__.py:144: UserWarning: The psycopg2 wheel package will be renamed from release 2.8; in order to keep installing from binary please use \"pip install psycopg2-binary\" instead. For details see: <http://initd.org/psycopg/docs/install.html#binary-install-from-pypi>.\n \"\"\")\n"
],
[
"#added the table to pgadmin\ntable_1df.to_sql(name='youtube', con=engine, if_exists='append', index=False)",
"_____no_output_____"
],
[
"#added 2nd table to pgadmin\ntable_2df.to_sql(name='comments', con=engine, if_exists='append', index=False)",
"_____no_output_____"
],
[
"#added 3rd table to pdadmin\ntable_3df.to_sql(name='category', con=engine, if_exists='append', index=False)",
"_____no_output_____"
],
[
"#successful to read the data set directly from pgadmin\n\n\npd.read_sql_query('select * from youtube', con=engine).head()",
"_____no_output_____"
],
[
"#reading 2nd dataset from pgadmin\n\n\npd.read_sql_query('select * from comments', con=engine).head()",
"_____no_output_____"
],
[
"#Reading category dataset from pgadmin\n\n\npd.read_sql_query('select * from category', con=engine).head()\n\n",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec64ed10ebd4da6611b27b6a8f0eb2cb163646d8 | 771,246 | ipynb | Jupyter Notebook | Update_data_processing.ipynb | bolossi509/Mini-Project-2 | a2a067c9b8c2b91decd79c04c4a66d0d68fbb370 | [
"MIT"
]
| null | null | null | Update_data_processing.ipynb | bolossi509/Mini-Project-2 | a2a067c9b8c2b91decd79c04c4a66d0d68fbb370 | [
"MIT"
]
| null | null | null | Update_data_processing.ipynb | bolossi509/Mini-Project-2 | a2a067c9b8c2b91decd79c04c4a66d0d68fbb370 | [
"MIT"
]
| null | null | null | 645.394142 | 76,498 | 0.717469 | [
[
[
"# Ayiti Analytics Data Processing Bootcamp\nAyiti Analytics Data wants to expand its training centers throughout all the communes of the country. Your role as a data analyst is to help them realize this dream.\n\nIts objective is to know which three communes of the country will be the most likely to expand its training centers.\n\nKnowing that each cohort must have 30 students\n \n* How many applications must be made to select 25% women for each on average\n\n* What are the most effective communication channels (Alumni, Facebook, WhatsApp, Friend ...) that will allow a student to be susceptible to selection\n \n* What is the average number of university students who should participate in this program\n* What will be the average number of applications per week that we could have\n* How many weeks should we extend the application process to select 60 students per commune?\n* If we were to do all the bootcamp online, who would be the best communes and how many applications would we need to select 30 student and what percentage of students would have a laptop, an internet connection, both at the same time\n* What are the most effective communication channels (Alumni, Facebook, WhatsApp, Friend ...) that will allow a women to be susceptible to selection\n\n### NB \nUse the same framework of the BA project to complete this project\n",
"_____no_output_____"
],
[
"# We import the libraries we will need",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom datetime import date \n",
"_____no_output_____"
]
],
[
[
"# Read CSV and Excel files",
"_____no_output_____"
]
],
[
[
"enroll = pd.read_csv(\"enroll.csv\")\nindustry = pd.read_csv(\"industry.csv\")\norder = pd.read_csv(\"ord.csv\")\nquest = pd.read_csv(\"quest.csv\")\nstudy_domain = pd.read_csv(\"study_domain.csv\")\ntechnology = pd.read_csv(\"technology.csv\")\ntransaction = pd.read_csv(\"transaction.csv\")\nGeo = pd.read_excel('commune.xlsx', sheet_name= 0)",
"_____no_output_____"
]
],
[
[
"# Data Processing...\n ",
"_____no_output_____"
]
],
[
[
"# Before the merging, we clean the created_at columns to make better process of the data \n#This code will turn all the values in the columns in string\nquest['department'] = quest['department'].apply(lambda x : str(x))\n#This code will transform all the strings in uppercase. The goal is to match the value in the quest dataset with commune dataset \nquest['department']= quest['department'].apply(lambda x : x.upper())\nquest['commune']= quest['commune'].apply(lambda x : x.upper())\n",
"_____no_output_____"
],
[
"#Here we can process with the merging\ntable1 = pd.merge(quest,Geo, how = 'left',left_on =['department','commune'],right_on = ['ADM1_PCODE','Commune_Id'])\ntable1.head()",
"_____no_output_____"
],
[
"#We will get rid of the timestamp in the values of the column created_at dataset\ny = pd.DataFrame(table1.created_at.str.split('T',1).tolist(),columns = ['Date_created_at','To_drop'])\ntable1['created_at'] = y.iloc[:,0]\ntable1['created_at'].head()",
"_____no_output_____"
],
[
"#We start our data merging with the enroll and transaction dataset\ntable3=pd.merge(enroll,transaction, how = 'right',left_on =['user_id'],right_on = ['user_id'] )\n# We do the same process with the enroll and ord dataset\ntable4 =pd.merge(enroll,order, how = 'right',left_on =['user_id'],right_on = ['user_id'] )\n#We create a new column with categorical variable on each table to specify the name of the payment\ntable3['Payment Method'] = 'Moncash'\ntable4['Payment Method'] = 'Credit Card/Paypal'\n",
"_____no_output_____"
],
[
"# Here we did and slicing and data cleaning \nx = table4.loc[:,['Payment Method','quest_id_y']]\ny = table3.loc[:,['Payment Method','quest_id']]\nx=x.rename(columns = {'quest_id_y': 'quest_id'})\n#Let's concatenate the dataframe \nz = pd.concat([x,y],axis = 0)\ndisplay(z.head(20))",
"_____no_output_____"
],
[
"final = pd.merge(table1,z,how = 'left', left_on = 'quest_id', right_on= 'quest_id')\nfinal.reset_index(inplace = True ,level = 0)\nfinal['Payment Method'] = final['Payment Method'].fillna('No Payment')\nfinal =final.loc[:,['gender','dob','commune','created_at','department','education_level','university','current_employed','formal_sector_job','have_computer_home','internet_at_home','hear_AA_1','after_AA','quest_id','Commune_en','Commune_Id','Departement','Payment Method']]\nfinal = final.set_index('quest_id')\n\n",
"_____no_output_____"
],
[
"print('Here we are displaying the head of our final dataframe \\n')\ndisplay(final.head())\n",
"Here we are displaying the head of our final dataframe \n\n"
],
[
"\nprint('We are checking if our dataset has Na values \\n' )\ndisplay(final.isna().sum())\n",
"We are checking if our dataset has Na values \n\n"
],
[
"print('The column date of birth has 6 Na values \\n' )\ndisplay(final[final['dob'].isna()])\nprint('We will clean the missing values when wil be working on the dates columns')",
"The column date of birth has 6 Na values \n\n"
],
[
"#Working on the Date and Age of all applicants\n# We will work on the date of birth columns.\nfinal['dob'].replace({'3 aout 1977':'03/08/1977'},inplace = True)\nfinal['dob'] = pd.to_datetime(final['dob'])\nfinal['created_at'] = pd.to_datetime(final['created_at'])\nfinal['dob']",
"_____no_output_____"
]
],
[
[
"\n## ",
"_____no_output_____"
]
],
[
[
"#II - Setting up the Age columns and cleaning incorrect data.\n\n#N.B : We replace incorrect data about age with the mean of the age of the applicants\n\n\n\ndef Calculate_Age(born) :\n \"\"\"\n This function will allow us to calcute the age of all the applicants by the end of the application process\n \"\"\"\n today = date(2021,6,18)\n return today.year - born.year - ((today.month,today.day)< (born.month,born.day))\n\nfinal['Age'] = final['dob'].apply(Calculate_Age)\nfinal['Age'] = final['Age'].fillna(final['Age'].mean())\n#we find incorrect input in dataset. we replace with mean of the age of the applicants\nfinal['Age'].replace({(1,0,-1):final['Age'].mean()},inplace = True)\nfinal['Age'] = final['Age'].apply(lambda x : int(x))\nmove = final.pop('Age')\nfinal.insert(2,'Age',move)\nfinal=final.rename(columns = {' Date_of_registration': 'Date'})\nfinal.columns",
"_____no_output_____"
],
[
"display(final['Age'].groupby(by= final['gender']).describe())\nplt.figure(figsize=(10,6))\nplt.title('Average age of applicants by Gender')\nsns.boxplot(data=final,x=\"gender\",y=\"Age\")",
"_____no_output_____"
],
[
"display(final['Age'].groupby(by= final['education_level']).describe())\nplt.figure(figsize=(12,6))\nsns.boxplot(data=final,x=\"education_level\",y=\"Age\")\nplt.title('Boxplot of applicants by education level')\n",
"_____no_output_____"
],
[
"\ndef generate_barchart(data=final, title =\"\",abs_value =\"Total\",rel_value=\"Percent\",figsize =(15,8),ylabel=\"\") :\n '''\n This function we define here will generate bar chart with more information on the data \n '''\n plt.figure(figsize=figsize)\n axes = sns.barplot(data=data,x=data.index,y=abs_value)\n i=0\n for tot, perc in zip(data[abs_value],data[rel_value]):\n axes.text(i,\n tot/2,\n str(np.round(perc*100,2))+ \"%\",\n fontdict=dict(color='White',fontsize=12,horizontalalignment=\"center\")\n )\n axes.text(i,\n tot+ 3,\n str(tot),\n fontdict=dict(color='blue',fontsize=12,horizontalalignment=\"center\")\n )\n \n \n i+=1\n plt.title(title)\n plt.ylabel(ylabel)\n plt.show()\n",
"_____no_output_____"
],
[
"Com = final.Commune_en.value_counts().to_frame().iloc[:4,:]\nCom.rename(columns={\"Commune_en\":\"Total\"},inplace=True)\nCom1 = final.Commune_en.value_counts(normalize = True).to_frame().iloc[:4,:]\nCom1.rename(columns={\"Commune_en\":\"Percent\"},inplace=True)\nresul = pd.concat([Com,Com1],axis =1)\n\nresul.reset_index()\ndisplay(resul)\n\ngenerate_barchart(resul,title =\"4 Communes with the more applicants\", abs_value=\"Total\",rel_value='Percent', ylabel='' )",
"_____no_output_____"
],
[
"payment_table = pd.pivot_table(final,'Age',index='Commune_en',columns=['Payment Method'],aggfunc = ['count'],fill_value=0)\npayment_table = payment_table.sort_values(by= ('count','Moncash'),ascending = False).iloc[:5,:2]\npayment_table",
"_____no_output_____"
]
],
[
[
"\n## How many applications must be made to select 25% women for each on average ?",
"_____no_output_____"
]
],
[
[
"gender_table = pd.pivot_table(final,'Age',index='Commune_en',columns=['gender'],aggfunc = ['count'],fill_value=0)\ngender_table = gender_table.sort_values(by= ('count','female'),ascending = False)\ngender_table['Total'] =gender_table[('count','female')] +gender_table[('count','male')]\n\ngender_table['Percentage of female'] =gender_table[('count','female')]/(gender_table[('Total')])\nt =gender_table.iloc[:4,:] \nt\n",
"_____no_output_____"
],
[
"#gender_table.iloc[:4,:] \nt['mean']= np.round(t['Percentage of female'].mean(),4)\n#Determinons le pourcentage d'applications manquants en moyenne par commune pour atteindre les 25%\nt['perct_mean'] = 0.25 - t['mean']\nt['expectation'] = np.round((t['perct_mean']) * t['Total'],0)\nt['mean_excp'] = np.round((t['expectation'] + t[('count','female')]) / (t['Total'] + t['expectation']),4)\nt['mean2'] = np.round(t['mean_excp'].mean(),2)",
"<ipython-input-27-0b63ccf314ee>:2: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n t['mean']= np.round(t['Percentage of female'].mean(),4)\n<ipython-input-27-0b63ccf314ee>:4: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n t['perct_mean'] = 0.25 - t['mean']\n<ipython-input-27-0b63ccf314ee>:5: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n t['expectation'] = np.round((t['perct_mean']) * t['Total'],0)\n<ipython-input-27-0b63ccf314ee>:6: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n t['mean_excp'] = np.round((t['expectation'] + t[('count','female')]) / (t['Total'] + t['expectation']),4)\n<ipython-input-27-0b63ccf314ee>:7: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n t['mean2'] = np.round(t['mean_excp'].mean(),2)\n"
],
[
"print('The number applications of women to obtain 25 % on average should exceed :',np.round(t['expectation'].sum(),0), 'in the first 4 communes ')\ndisplay(t)",
"The number applications of women to obtain 25 % on average should exceed : 5.0 in the first 4 communes \n"
],
[
"education_table = pd.pivot_table(final,'Age',index='Commune_en',columns=['education_level'],aggfunc = ['count'],fill_value=0)\ndisplay(education_table.sort_values(by=('count','Bachelors (bacc +4)'),ascending = False).iloc[:4,[0,1,3]])\n",
"_____no_output_____"
]
],
[
[
"## What is the average number of university students who should participate in this program",
"_____no_output_____"
]
],
[
[
"#We created a pivot table with commune as index and education level as columns \neduca_level = pd.pivot_table(final,'Age',index='Commune_en',columns='education_level',aggfunc = ['count'],fill_value=0)\neduca_level = educa_level.sort_values(by=('count','Bachelors (bacc +4)'),ascending = False)\neduca_level=educa_level.iloc[:4,[0,1,3]]\n \neduca_level['sum'] = educa_level.sum(axis = 1)\neduca_level['total']= resul['Total']\neduca_level['mean'] = educa_level['sum']/educa_level['total']\ndisplay(educa_level)\n\n\nprint('The average number of university student who should participate in one cohort should be around',np.round(educa_level['mean'].mean(),1)*30)",
"_____no_output_____"
]
],
[
[
"To get this result, we calculate the average of the mean of all the university levels applicant in each commune.Then we use the proportion to find the number of applicants in one cohort",
"_____no_output_____"
]
],
[
[
"internet_access = pd.pivot_table(final,'Age',index='Commune_en',columns='internet_at_home',aggfunc = ['count'],fill_value=0)\ninternet_access = internet_access.sort_values(by=('count','Yes'),ascending = False)\ninternet_access = internet_access.iloc[:4,[1]]\ninternet_access['Percent'] = internet_access[('count','Yes')]/final.shape[0]\ninternet_access.rename(columns={\"count\":\"Total\"},inplace=True)\ndisplay(internet_access)\ngenerate_barchart(internet_access,title = \"4 communes where applicants have access to internet\", abs_value = ('Total','Yes') , rel_value = 'Percent',ylabel='Number of applicants that have both access to internet')\n",
"_____no_output_____"
]
],
[
[
"This graph here shows the 3 communes beside Port-au-Prince that have a higher access to internet.Delmas has the higher percentage of applicants that have access to internet followed by Petion-ville and Carrefour.",
"_____no_output_____"
]
],
[
[
"computer_access = pd.pivot_table(final,'Age',index='Commune_en',columns='have_computer_home',aggfunc = ['count'],fill_value=0)\ncomputer_access = computer_access.sort_values(by=('count','Yes'),ascending = False)\ncomputer_access = computer_access.iloc[:4,[1]]\ncomputer_access\ncomputer_access['Percent'] =computer_access[('count','Yes')]/final.shape[0]\ncomputer_access.rename(columns={\"count\":\"Total\"},inplace=True)\ndisplay(computer_access)\ngenerate_barchart(computer_access,title = \"4 communes where applicants have access to computer at home\", abs_value = ('Total','Yes') , rel_value = 'Percent',ylabel='Number of applicants that have both access to computer')",
"_____no_output_____"
]
],
[
[
"This graph here shows the 3 communes beside Port-au-Prince that have a higher access to computer at Home.Delmas has the higher percentage of applicants that have computer at home followed by Petion-ville and Carrefour",
"_____no_output_____"
],
[
"This graph here shows the 3 communes beside Port-au-Prince that have a higher access to computer at Home and internet at home.Delmas has the higher percentage of applicants that have computer at home followed by Petion-ville and Carrefour",
"_____no_output_____"
]
],
[
[
"reg = pd.pivot_table(final,'Age',index='Commune_en',columns=['internet_at_home','have_computer_home'],aggfunc = ['count'],fill_value=0)\nreg = reg.sort_values(by=('count','Yes','Yes'),ascending = False)\nreg = reg.iloc[:4,[2,3]]\nreg['Percent'] = reg[('count','Yes','Yes')]/final.shape[0]\nreg.rename(columns={\"count\":\"Total\"},inplace=True)\nprint(reg)\ngenerate_barchart(reg,title = \"4 communes where applicants have access to internet and computer\", abs_value = ('Total','Yes','Yes') , rel_value = 'Percent',ylabel='Number of applicants that have both access to internet and computer')\n\n",
" Total Percent\ninternet_at_home Yes \nhave_computer_home No Yes \nCommune_en \nDelmas 3 58 0.232\nPort-au-Prince 0 51 0.204\nPetion-Ville 4 21 0.084\nCarrefour 1 17 0.068\n"
],
[
"#Let's start by creating a pivot table on the communication channels and gender columns \nresult2 = pd.pivot_table(final,'Age',index =['hear_AA_1'] ,columns=['gender'],aggfunc=['count'],fill_value = 0)\n#we are sorting the values from biggest to smallest and filter the 3 first\nresult2= result2.sort_values(by = ('count','male'),ascending = False)\nresult2 = result2.iloc[:4,:]\nresult2",
"_____no_output_____"
]
],
[
[
"## What are the most effective communication channels (Alumni, Facebook, WhatsApp, Friend ...) that will allow a student to be susceptible to selection ?\n\n",
"_____no_output_____"
]
],
[
[
"#first we did slicing by gender then by payment method\nresul4 = final[final['gender'] == 'female']\nresul4 = final[(final['Payment Method'] == 'Moncash') | (final['Payment Method'] == 'Credit Card/Paypal')]\n#we did a pivot table on the column Hear AA and gender as gender \nresult4 = pd.pivot_table(resul4,'Age',index = ['gender'],columns=['hear_AA_1'],aggfunc=['count'],fill_value = 0)\nvalues = result4.T\ndisplay(values)\nvalues[\"Total\"] = values.sum(axis=1)\nvalues = values.sort_values(by =\"Total\",ascending=True)\nax =values.sort_values(by =\"Total\",ascending=True)[[\"male\",\"female\"]].plot(kind=\"barh\",figsize=(15,10),stacked= True,alpha =0.7)\nylab = ax.set_ylabel('Number of Paid Applicants')\nxlab = ax.set_xlabel('Communications Channels')\ntitle = ax.set_title('Which communications channels did the applicants that enroll for the course hear of AA ?')\n\n\nindex =0\nc =0\nfor male, female ,total in zip(values['male'],values[\"female\"],values[\"Total\"]):\n if male != total and female != total :\n ax.text(male/2 ,\n index,\n str(np.round((male/total)*100,1)) + \"%\",\n fontdict=dict(color='white',fontsize=10,horizontalalignment=\"center\",verticalalignment=\"center\")\n )\n ax.text(male + female/2 ,\n index,\n str(np.round((female/total)*100,1)) + \"%\",\n fontdict=dict(color='blue',fontsize=10,horizontalalignment=\"center\",verticalalignment=\"center\")\n )\n elif female == total:\n ax.text(female/2 ,\n index,\n str(np.round((female/total)*100,1)) + \"%\",\n fontdict=dict(color='blue',fontsize=10,horizontalalignment=\"center\",verticalalignment=\"center\")\n )\n else:\n ax.text(male/2 ,\n index,\n str(np.round((male/total)*100,1)) + \"%\",\n fontdict=dict(color='white',fontsize=10,horizontalalignment=\"center\",verticalalignment=\"center\")\n )\n ax.text(total+0.3 ,\n index,\n str(total),\n fontdict=dict(color='blue',fontsize=10,horizontalalignment=\"center\",verticalalignment=\"center\")\n )\n index+=1",
"_____no_output_____"
]
],
[
[
"## What are the most effective communication channels (Alumni, Facebook, WhatsApp, Friend ...) that will allow a women to be susceptible to selection ?",
"_____no_output_____"
]
],
[
[
"\nresul4 = final[(final['Payment Method'] == 'Moncash') | (final['Payment Method'] == 'Credit Card/Paypal')]\n\nresult5 = pd.pivot_table(resul4,'Age',index = ['gender'],columns=['hear_AA_1'],aggfunc=['count'],fill_value = 0)\nvalues = result5.T\nvalues = values.iloc[:,[0]]\ndisplay(values)\nvalues[\"Total\"] = values.sum(axis=1)\nvalues = values.sort_values(by =\"Total\",ascending=True)\nax =values.sort_values(by =\"Total\",ascending=True)[[\"female\"]].plot(kind=\"barh\",figsize=(15,10),stacked= True,alpha =0.7)\nylab = ax.set_ylabel('Number of Applicants')\nxlab = ax.set_xlabel('Communications Channels')\ntitle = ax.set_title('Which communications channels did the women that enroll hear of AA ?')\ndisplay(values)\nindex =0\nc =0\nfor female ,total in zip(values[\"female\"],values[\"Total\"]):\n if female != total :\n ax.text(female/2 ,\n index,\n str(np.round((female)*100,1)) + \"%\",\n fontdict=dict(color='white',fontsize=10,horizontalalignment=\"center\",verticalalignment=\"center\")\n )\n \n elif total==0:\n ax.text(female/2 + 1.0,\n index,\n 0,\n fontdict=dict(color='blue',fontsize=10,horizontalalignment=\"center\",verticalalignment=\"center\")\n )\n else:\n ax.text(total+0.3 ,\n index,\n str(total),\n fontdict=dict(color='blue',fontsize=10,horizontalalignment=\"center\",verticalalignment=\"center\")\n )\n index+=1\n",
"_____no_output_____"
]
],
[
[
"## - What will be the average number of applications per week that we could have ?",
"_____no_output_____"
]
],
[
[
"final['date'] = \"\"\ndg = final.groupby(pd.Grouper(key='created_at', freq='7D'))['date'].value_counts().to_frame()\ndg['Number of weeks'] = ['Week 1','Week 2','Week 3','Week 4','Week 5']\ndg = dg.set_index('Number of weeks')\n\n",
"_____no_output_____"
],
[
"#Setting up the mean column in the table \ndg['mean'] = dg.date.mean()\nprint('The average number of applications per week should be ',dg['mean'][0])\ndisplay(dg)\n\n#plotting the table\nplt.grid=True\nplt.plot(dg.index,dg.date,label='Registration')\nplt.plot(dg.index,dg['mean'],label='mean',linestyle = '--')\nplt.title('Applications per week')\nplt.show()\n",
"The average number of applications per week should be 50.0\n"
]
],
[
[
"\n### Conclusion\n\nAfter getting all of the insights through the dataset, The communes we have identified where Ayiti analytics could extend their training centers should be Delmas, Carrefour and Petion-ville. These communes have the higher access to internet and computer after the diagnostic analysis, but we will need to test our hypothesis.\n\n\nThey are also the top of communes with the more courses paid, applicants from these regions shows a lot of interest and if Ayiti analytics were to made an only online boot camp, these communes should be on top but there as many factors as internet penetrations by communes, electricity access that could undermine this project we will need more data to draw more insights about this question.\n\nAyiti analytics want to see more women engage in data science, we saw that the effective communications channels are Whatsapp, friends. Participating in a bootcamp can be challenging, Friends can be a great source of motivation as we saw mostly those who paid for the next steps hear from AA via a Friend or Social media.But still our analysis is based on descriptive analysis, and we will need to provide a more in-depth analysis",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
]
|
ec64f3abddf799f81e16c528b9c143507dee4b07 | 44,837 | ipynb | Jupyter Notebook | scientific_details_of_algorithms/linear_learner_class_weights_loss_functions/linear_learner_class_weights_loss_functions.ipynb | qidewenwhen/amazon-sagemaker-examples | 77f7ad7970381a3c9ab74fc8604ab8903ec55c9b | [
"Apache-2.0"
]
| null | null | null | scientific_details_of_algorithms/linear_learner_class_weights_loss_functions/linear_learner_class_weights_loss_functions.ipynb | qidewenwhen/amazon-sagemaker-examples | 77f7ad7970381a3c9ab74fc8604ab8903ec55c9b | [
"Apache-2.0"
]
| 1 | 2022-03-15T20:04:30.000Z | 2022-03-15T20:04:30.000Z | scientific_details_of_algorithms/linear_learner_class_weights_loss_functions/linear_learner_class_weights_loss_functions.ipynb | vivekmadan2/amazon-sagemaker-examples | 4ccb050067c5305a50db750df3444dbc85600d5f | [
"Apache-2.0"
]
| null | null | null | 51.418578 | 1,527 | 0.66521 | [
[
[
"# Train faster, more flexible models with Amazon SageMaker Linear Learner",
"_____no_output_____"
],
[
"Today Amazon SageMaker is launching several additional features to the built-in linear learner algorithm. Amazon SageMaker algorithms are designed to scale effortlessly to massive datasets and take advantage of the latest hardware optimizations for unparalleled speed. The Amazon SageMaker linear learner algorithm encompasses both linear regression and binary classification algorithms. These algorithms are used extensively in banking, fraud/risk management, insurance, and healthcare. The new features of linear learner are designed to speed up training and help you customize models for different use cases. Examples include classification with unbalanced classes, where one of your outcomes happens far less frequently than another. Or specialized loss functions for regression, where it’s more important to penalize certain model errors more than others.\n\nIn this blog post we'll cover three things:\n1. Early stopping and saving the best model\n1. New ways to customize linear learner models, including:\n * Hinge loss (support vector machines)\n * Quantile loss\n * Huber loss\n * Epsilon-insensitive loss\n * Class weights options\n1. Then we'll walk you through a hands-on example of using class weights to boost performance in binary classification",
"_____no_output_____"
],
[
"## Early Stopping\n\nLinear learner trains models using Stochastic Gradient Descent (SGD) or variants of SGD like Adam. Training requires multiple passes over the data, called *epochs*, in which the data are loaded into memory in chunks called *batches*, sometimes called *minibatches*. How do we know how many epochs to run? Ideally, we'd like to continue training until convergence - that is, until we no longer see any additional benefits. Running additional epochs after the model has converged is a waste of time and money, but guessing the right number of epochs is difficult to do before submitting a training job. If we train for too few epochs, our model will be less accurate than it should be, but if we train for too many epochs, we'll waste resources and potentially harm model accuracy by overfitting. To remove the guesswork and optimize model training, linear learner has added two new features: automatic early stopping and saving the best model. \n\nEarly stopping works in two basic regimes: with or without a validation set. Often we split our data into training, validation, and testing data sets. Training is for optimizing the loss, validation is for tuning hyperparameters, and testing is for producing an honest estimate of how the model will perform on unseen data in the future. If you provide linear learner with a validation data set, training will stop early when validation loss stops improving. If no validation set is available, training will stop early when training loss stops improving.\n\n#### Early Stopping with a validation data set\nOne big benefit of having a validation data set is that we can tell if and when we start overfitting to the training data. Overfitting is when the model gives predictions that are too closely tailored to the training data, so that generalization performance (performance on future unseen data) will be poor. The following plot on the right shows a typical progression during training with a validation data set. Until epoch 5, the model has been learning from the training set and doing better and better on the validation set. But in epochs 7-10, we see that the model has begun to overfit on the training set, which shows up as worse performance on the validation set. Regardless of whether the model continues to improve (overfit) on the training data, we want to stop training after the model starts to overfit. And we want to restore the best model from just before the overfitting started. These two features are now turned on by default in linear learner. \n\nThe default parameter values for early stopping are shown in the following code. To tweak the behavior of early stopping, try changing the values. To turn off early stopping entirely, choose a patience value larger than the number of epochs you want to run.",
"_____no_output_____"
],
[
" early_stopping_patience=3,\n early_stopping_tolerance=0.001,",
"_____no_output_____"
],
[
"The parameter early_stoping_patience defines how many epochs to wait before ending training if no improvement is made. It's useful to have a little patience when deciding to stop early, since the training curve can be bumpy. Performance may get worse for one or two epochs before continuing to improve. By default, linear learner will stop early if performance has degraded for three epochs in a row.\n\nThe parameter early_stopping_tolerance defines the size of an improvement that's considered significant. If the ratio of the improvement in loss divided by the previous best loss is smaller than this value, early stopping will consider the improvement to be zero.",
"_____no_output_____"
],
[
"#### Early stopping without a validation data set\n\nWhen training with a training set only, we have no way to detect overfitting. But we still want to stop training once the model has converged and improvement has levelled off. In the left panel of the following figure, that happens around epoch 25.",
"_____no_output_____"
],
[
"<img src=\"images/early_stop.png\">",
"_____no_output_____"
],
[
"#### Early stopping and calibration\nYou may already be familiar with the linear learner automated threshold tuning for binary classification models. Threshold tuning and early stopping work together seamlessly by default in linear learner. \n\nWhen a binary classification model outputs a probability (e.g., logistic regression) or a raw score (SVM), we convert that to a binary prediction by applying a threshold, for example:",
"_____no_output_____"
],
[
" predicted_label = 1 if raw_prediction > 0.5 else 0",
"_____no_output_____"
],
[
"We might want to tune the threshold (0.5 in the example) based on the metric we care about most, such as accuracy or recall. Linear learner does this tuning automatically using the 'binary_classifier_model_selection_criteria' parameter. When threshold tuning and early stopping are both turned on (the default), then training stops early based on the metric you request. For example, if you provide a validation data set and request a logistic regression model with threshold tuning based on accuracy, then training will stop when the model with auto-thresholding reaches optimal performance on the validation data. If there is no validation set and auto-thresholding is turned off, then training will stop when the best value of the loss function on the training data is reached.",
"_____no_output_____"
],
[
"## New loss functions\n\nThe loss function is our definition of the cost of making an error in prediction. When we train a model, we push the model weights in the direction that minimizes loss, given the known labels in the training set. The most common and well-known loss function is squared loss, which is minimized when we train a standard linear regression model. Another common loss function is the one used in logistic regression, variously known as logistic loss, cross-entropy loss, or binomial likelihood. Ideally, the loss function we train on should be a close match to the business problem we're trying to solve. Having the flexibility to choose different loss functions at training time allows us to customize models to different use cases. In this section, we'll discuss when to use which loss function, and introduce several new loss functions that have been added to linear learner.",
"_____no_output_____"
],
[
"<img src=\"images/loss_functions.png\">",
"_____no_output_____"
],
[
"### Squared loss",
"_____no_output_____"
],
[
" predictor_type='regressor',\n loss='squared_loss',",
"_____no_output_____"
],
[
"$$\\text{argmin}_{w_0, \\mathbf{w}} \\sum_{i=1}^{N} (w_0 + \\mathbf{x_i}^\\intercal \\mathbf{w} - y_i)^2$$\n\nWe'll use the following notation in all of the loss functions we discuss:\n\n$w_0$ is the bias that the model learns\n\n$\\mathbf{w}$ is the vector of feature weights that the model learns\n\n$y_i$ and $\\mathbf{x_i}$ are the label and feature vector, respectively, from example $i$ of the training data\n\n$N$ is the total number of training examples\n\nSquared loss is a first choice for most regression problems. It has the nice property of producing an estimate of the mean of the label given the features. As seen in the plot above, squared loss implies that we pay a very high cost for very wrong predictions. This can cause problems if our training data include some extreme outliers. A model trained on squared loss will be very sensitive to outliers. Squared loss is sometimes known as mean squared error (MSE), ordinary least squares (OLS), or $\\text{L}_2$ loss. Read more about [squared loss](https://en.wikipedia.org/wiki/Least_squares) on wikipedia.",
"_____no_output_____"
],
[
"### Absolute loss",
"_____no_output_____"
],
[
" predictor_type='regressor',\n loss='absolute_loss',",
"_____no_output_____"
],
[
"$$\\text{argmin}_{w_0, \\mathbf{w}} \\sum_{i=1}^{N} |w_0 + \\mathbf{x_i}^\\intercal \\mathbf{w} - y_i|$$\n\nAbsolute loss is less common than squared loss, but can be very useful. The main difference between the two is that training a model on absolute loss will produces estimates of the median of the label given the features. Squared loss estimates the mean, and absolute loss estimates the median. Whether you want to estimate the mean or median will depend on your use case. Let's look at a few examples:\n* If an error of -2 costs you \\$2 and an error of +50 costs you \\$50, then absolute loss models your costs better than squared loss. \n* If an error of -2 costs you \\$2, while an error of +50 is simply unacceptably large, then it's important that your errors are generally small, and so squared loss is probably the right fit. \n* If it's important that your predictions are too high as often as they're too low, then you want to estimate the median with absolute loss. \n* If outliers in your training data are having too much influence on the model, try switching from squared to absolute loss. Large errors get a large amount of attention from absolute loss, but with squared loss, large errors get squared and become huge errors attracting a huge amount of attention. If the error is due to an outlier, it might not deserve a huge amount of attention.\n\nAbsolute loss is sometimes also known as $\\text{L}_1$ loss or least absolute error. Read more about [absolute loss](https://en.wikipedia.org/wiki/Least_absolute_deviations) on wikipedia.",
"_____no_output_____"
],
[
"### Quantile loss",
"_____no_output_____"
],
[
" predictor_type='regressor',\n loss='quantile_loss',\n quantile=0.9,",
"_____no_output_____"
],
[
"$$ \\text{argmin}_{w_0, \\mathbf{w}} \\sum_{i=1}^N q(y_i - w_o - \\mathbf{x_i}^\\intercal \\mathbf{w})^\\text{+} + (1-q)(w_0 + \\mathbf{x_i}^\\intercal \\mathbf{w} - y_i)^\\text{+} $$\n\n$$ \\text{where the parameter } q \\text{ is the quantile you want to predict}$$\n\nQuantile loss lets us predict an upper or lower bound for the label, given the features. To make predictions that are larger than the true label 90% of the time, train quantile loss with the 0.9 quantile. An example would be predicting electricity demand where we want to build near peak demand since building to the average would result in brown-outs and upset customers. Read more about [quantile loss](https://en.wikipedia.org/wiki/Quantile_regression) on wikipedia.",
"_____no_output_____"
],
[
"### Huber loss",
"_____no_output_____"
],
[
" predictor_type='regressor',\n loss='huber_loss',\n huber_delta=0.5,",
"_____no_output_____"
],
[
"$$ \\text{Let the error be } e_i = w_0 + \\mathbf{x_i}^\\intercal \\mathbf{w} - y_i \\text{. Then Huber loss solves:}$$\n\n$$ \\text{argmin}_{w_0, \\mathbf{w}} \\sum_{i=1}^N I(|e_i| < \\delta) \\frac{e_i^2}{2} + I(|e_i| >= \\delta) |e_i|\\delta - \\frac{\\delta^2}{2} $$\n\n$$ \\text{where } I(a) = 1 \\text{ if } a \\text{ is true, else } 0 $$\n\nHuber loss is an interesting hybrid of $\\text{L}_1$ and $\\text{L}_2$ losses. Huber loss counts small errors on a squared scale and large errors on an absolute scale. In the plot above, we see that Huber loss looks like squared loss when the error is near 0 and absolute loss beyond that. Huber loss is useful when we want to train with squared loss, but want to avoid squared loss's sensitivity to outliers. Huber loss gives less importance to outliers by not squaring the larger errors. Read more about [Huber loss](https://en.wikipedia.org/wiki/Huber_loss) on wikipedia.",
"_____no_output_____"
],
[
"### Epsilon-insensitive loss\n",
"_____no_output_____"
],
[
" predictor_type='regressor',\n loss='eps_insensitive_squared_loss',\n loss_insensitivity=0.25,",
"_____no_output_____"
],
[
"\nFor epsilon-insensitive squared loss, we minimize\n$$ \\text{argmin}_{w_0, \\mathbf{w}} \\sum_{i=1}^N max(0, (w_0 + \\mathbf{x_i}^\\intercal \\mathbf{w} - y_i)^2 - \\epsilon^2) $$\n\nAnd for epsilon-insensitive absolute loss, we minimize\n\n$$ \\text{argmin}_{w_0, \\mathbf{w}} \\sum_{i=1}^N max(0, |w_0 + \\mathbf{x_i}^\\intercal \\mathbf{w} - y_i| - \\epsilon) $$\n\nEpsilon-insensitive loss is useful when errors don't matter to you as long as they're below some threshold. Set the threshold that makes sense for your use case as epsilon. Epsilon-insensitive loss will allow the model to pay no cost for making errors smaller than epsilon.",
"_____no_output_____"
],
[
"### Logistic regression",
"_____no_output_____"
],
[
" predictor_type='binary_classifier',\n loss='logistic',\n binary_classifier_model_selection_criteria='recall_at_target_precision',\n target_precision=0.9,",
"_____no_output_____"
],
[
"Each of the losses we've discussed is for regression problems, where the labels are floating point numbers. The last two losses we'll cover, logistic regression and support vector machines, are for binary classification problems where the labels are one of two classes. Linear learner expects the class labels to be 0 or 1. This may require some preprocessing, for example if your labels are coded as -1 and +1, or as blue and yellow. Logistic regression produces a predicted probability for each data point:\n\n$$ p_i = \\sigma(w_0 + \\mathbf{x_i}^\\intercal \\mathbf{w}) $$\n\nThe loss function minimized in training a logistic regression model is the log likelihood of a binomial distribution. It assigns the highest cost to predictions that are confident and wrong, for example a prediction of 0.99 when the true label was 0, or a prediction of 0.002 when the true label was positive. The loss function is:\n\n$$ \\text{argmin}_{w_0, \\mathbf{w}} \\sum_{i=1}^N y_i \\text{log}(p) - (1 - y_i) \\text{log}(1 - p) $$ \n\n$$ \\text{where } \\sigma(x) = \\frac{\\text{exp}(x)}{1 + \\text{exp}(x)} $$\n\nRead more about [logistic regression](https://en.wikipedia.org/wiki/Logistic_regression) on wikipedia.",
"_____no_output_____"
],
[
"### Hinge loss (support vector machine)",
"_____no_output_____"
],
[
" predictor_type='binary_classifier',\n loss='hinge_loss',\n margin=1.0,\n binary_classifier_model_selection_criteria='recall_at_target_precision',\n target_precision=0.9,",
"_____no_output_____"
],
[
"Another popular option for binary classification problems is the hinge loss, also known as a Support Vector Machine (SVM) or Support Vector Classifier (SVC) with a linear kernel. It places a high cost on any points that are misclassified or nearly misclassified. To tune the meaning of \"nearly\", adjust the margin parameter:",
"_____no_output_____"
],
[
"It's difficult to say in advance whether logistic regression or SVM will be the right model for a binary classification problem, though logistic regression is generally a more popular choice then SVM. If it's important to provide probabilities of the predicted class labels, then logistic regression will be the right choice. If all that matters is better accuracy, precision, or recall, then either model may be appropriate. One advantage of logistic regression is that it produces the probability of an example having a positive label. That can be useful, for example in an ad serving system where the predicted click probability is used as an input to a bidding mechanism. Hinge loss does not produce class probabilities.\n\nWhichever model you choose, you're likely to benefit from linear learner's options for tuning the threshold that separates positive from negative predictions\n\n$$\\text{argmin}_{w_0, \\mathbf{w}} \\sum_{i=1}^{N} y_i(\\frac{m+1}{2} - w_0 - \\mathbf{x_i}^\\text{T}\\mathbf{w})^\\text{+} + (1-y_i)\\frac{m-1}{2} + w_o + \\mathbf{x_i}^\\text{T}\\mathbf{w})^\\text{+}$$\n\n$$\\text{where } a^\\text{+} = \\text{max}(0, a)$$\n\n\nNote that the hinge loss we use is a reparameterization of the usual hinge loss: typically hinge loss expects the binary label to be in {-1, 1}, whereas ours expects the binary labels to be in {0, 1}. This reparameterization allows LinearLearner to accept the same data format for binary classification regardless of the training loss. Read more about [hinge loss](https://en.wikipedia.org/wiki/Hinge_loss) on wikipedia.",
"_____no_output_____"
],
[
"## Class weights\nIn some binary classification problems, we may find that our training data is highly unbalanced. For example, in credit card fraud detection, we're likely to have many more examples of non-fraudulent transactions than fraudulent. In these cases, balancing the class weights may improve model performance.\n \nSuppose we have 98% negative and 2% positive examples. To balance the total weight of each class, we can set the positive class weight to be 49. Now the average weight from the positive class is 0.98 $\\cdot$ 1 = 0.98, and the average weight from the negative class is 0.02 $\\cdot$ 49 = 0.98. The negative class weight multiplier is always 1.\n \nTo incorporate the positive class weight in training, we multiply the loss by the positive weight whenever we see a positive class label. For logistic regression, the weighted loss is:\n\nWeighted logistic regression:\n\n$$ \\text{argmin}_{w_0, \\mathbf{w}} \\sum_{i=1}^N p y_i \\text{log}(\\sigma(w_0 + \\mathbf{x_i}^\\intercal \\mathbf{w})) - (1 - y_i) \\text{log}(1 - \\sigma(w_0 + \\mathbf{x_i}^\\intercal \\mathbf{w})) $$ \n\n$$ \\text{where } p \\text{ is the weight for the positive class.} $$\n \nThe only difference between the weighted and unweighted logistic regression loss functions is the presense of the class weight, $p$ on the left-hand term in the loss. Class weights in the hinge loss (SVM) classifier are applied in the same way.\n\nTo apply class weights when training a model with linear learner, supply the weight for the positive class as a training parameter:",
"_____no_output_____"
],
[
" positive_example_weight_mult=200,",
"_____no_output_____"
],
[
"Or to ask linear learner to calculate the positive class weight for you:",
"_____no_output_____"
],
[
" positive_example_weight_mult='balanced',",
"_____no_output_____"
],
[
"## Hands-on example: Detecting credit card fraud\n\nIn this section, we'll look at a credit card fraud detection dataset. The data set (Dal Pozzolo et al. 2015) was downloaded from [Kaggle](https://www.kaggle.com/mlg-ulb/creditcardfraud/data). We have features and labels for over a quarter million credit card transactions, each of which is labeled as fraudulent or not fraudulent. We'd like to train a model based on the features of these transactions so that we can predict risky or fraudulent transactions in the future. This is a binary classification problem. \n\nWe'll walk through training linear learner with various settings and deploying an inference endpoint. We'll evaluate the quality of our models by hitting that endpoint with observations from the test set. We can take the real-time predictions returned by the endpoint and evaluate them against the ground-truth labels in our test set.\n\nNext, we'll apply the linear learner threshold tuning functionality to get better precision without sacrificing recall. Then, we'll push the precision even higher using the linear learner new class weights feature. Because fraud can be extremely costly, we would prefer to have high recall, even if this means more false positives. This is especially true if we are building a first line of defense, flagging potentially fraudulent transactions for further review before taking actions that affect customers.",
"_____no_output_____"
],
[
"First we'll do some preprocessing on this data set: we'll shuffle the examples and split them into train and test sets. To run this under notebook under your own AWS account, you'll need to change the Amazon S3 locations. First download the raw data from [Kaggle](https://www.kaggle.com/mlg-ulb/creditcardfraud/data) and upload to your SageMaker notebook instance (or wherever you're running this notebook). Only 0.17% of the data have positive labels, making this a challenging classification problem.",
"_____no_output_____"
]
],
[
[
"import boto3\nimport io\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\n\nimport sagemaker\nimport sagemaker.amazon.common as smac\nfrom sagemaker import get_execution_role\nfrom sagemaker.predictor import csv_serializer, json_deserializer",
"_____no_output_____"
],
[
"! aws s3 cp s3://sagemaker-sample-files/datasets/tabular/mlg-ulb-credit-card/creditcard_csv.csv .",
"_____no_output_____"
],
[
"# Set data locations\nbucket = sagemaker.Session().default_bucket()\nprefix = \"sagemaker/DEMO-linear-learner-loss-weights\" # replace this with your own prefix\ns3_train_key = \"{}/train/recordio-pb-data\".format(prefix)\ns3_train_path = os.path.join(\"s3://\", bucket, s3_train_key)\nlocal_raw_data = \"creditcard_csv.csv\"\nrole = get_execution_role()",
"_____no_output_____"
],
[
"# Read the data, shuffle, and split into train and test sets, separating the labels (last column) from the features\nraw_data = pd.read_csv(local_raw_data).values\nnp.random.seed(0)\nnp.random.shuffle(raw_data)\ntrain_size = int(raw_data.shape[0] * 0.7)\ntrain_features = raw_data[:train_size, :-1]\ntrain_labels = np.array([x.strip(\"'\") for x in raw_data[:train_size, -1]]).astype(np.int)\ntest_features = raw_data[train_size:, :-1]\ntest_labels = np.array([x.strip(\"'\") for x in raw_data[train_size:, -1]]).astype(np.int)\n\n\n# Convert the processed training data to protobuf and write to S3 for linear learner\nvectors = np.array([t.tolist() for t in train_features]).astype(\"float32\")\nlabels = np.array([t.tolist() for t in train_labels]).astype(\"float32\")\nbuf = io.BytesIO()\nsmac.write_numpy_to_dense_tensor(buf, vectors, labels)\nbuf.seek(0)\nboto3.resource(\"s3\").Bucket(bucket).Object(s3_train_key).upload_fileobj(buf)",
"_____no_output_____"
]
],
[
[
"We'll wrap the model training setup in a convenience function that takes in the S3 location of the training data, the model hyperparameters that define our training job, and the S3 output path for model artifacts. Inside the function, we'll hardcode the algorithm container, the number and type of EC2 instances to train on, and the input and output data formats.",
"_____no_output_____"
]
],
[
[
"from sagemaker.amazon.amazon_estimator import get_image_uri\n\n\ndef predictor_from_hyperparams(s3_train_data, hyperparams, output_path):\n \"\"\"\n Create an Estimator from the given hyperparams, fit to training data, and return a deployed predictor\n \"\"\"\n # specify algorithm containers and instantiate an Estimator with given hyperparams\n container = get_image_uri(boto3.Session().region_name, \"linear-learner\")\n\n linear = sagemaker.estimator.Estimator(\n container,\n role,\n train_instance_count=1,\n train_instance_type=\"ml.m4.xlarge\",\n output_path=output_path,\n sagemaker_session=sagemaker.Session(),\n )\n linear.set_hyperparameters(**hyperparams)\n # train model\n linear.fit({\"train\": s3_train_data})\n # deploy a predictor\n linear_predictor = linear.deploy(initial_instance_count=1, instance_type=\"ml.m4.xlarge\")\n linear_predictor.serializer = csv_serializer\n linear_predictor.deserializer = json_deserializer\n return linear_predictor",
"_____no_output_____"
]
],
[
[
"And add another convenience function for setting up a hosting endpoint, making predictions, and evaluating the model. To make predictions, we need to set up a model hosting endpoint. Then we feed test features to the endpoint and receive predicted test labels. To evaluate the models we create in this exercise, we'll capture predicted test labels and compare them to actuals using some common binary classification metrics.",
"_____no_output_____"
]
],
[
[
"def evaluate(linear_predictor, test_features, test_labels, model_name, verbose=True):\n \"\"\"\n Evaluate a model on a test set given the prediction endpoint. Return binary classification metrics.\n \"\"\"\n # split the test data set into 100 batches and evaluate using prediction endpoint\n prediction_batches = [\n linear_predictor.predict(batch)[\"predictions\"]\n for batch in np.array_split(test_features, 100)\n ]\n # parse raw predictions json to exctract predicted label\n test_preds = np.concatenate(\n [np.array([x[\"predicted_label\"] for x in batch]) for batch in prediction_batches]\n )\n\n # calculate true positives, false positives, true negatives, false negatives\n tp = np.logical_and(test_labels, test_preds).sum()\n fp = np.logical_and(1 - test_labels, test_preds).sum()\n tn = np.logical_and(1 - test_labels, 1 - test_preds).sum()\n fn = np.logical_and(test_labels, 1 - test_preds).sum()\n\n # calculate binary classification metrics\n recall = tp / (tp + fn)\n precision = tp / (tp + fp)\n accuracy = (tp + tn) / (tp + fp + tn + fn)\n f1 = 2 * precision * recall / (precision + recall)\n\n if verbose:\n print(pd.crosstab(test_labels, test_preds, rownames=[\"actuals\"], colnames=[\"predictions\"]))\n print(\"\\n{:<11} {:.3f}\".format(\"Recall:\", recall))\n print(\"{:<11} {:.3f}\".format(\"Precision:\", precision))\n print(\"{:<11} {:.3f}\".format(\"Accuracy:\", accuracy))\n print(\"{:<11} {:.3f}\".format(\"F1:\", f1))\n\n return {\n \"TP\": tp,\n \"FP\": fp,\n \"FN\": fn,\n \"TN\": tn,\n \"Precision\": precision,\n \"Recall\": recall,\n \"Accuracy\": accuracy,\n \"F1\": f1,\n \"Model\": model_name,\n }",
"_____no_output_____"
]
],
[
[
"And finally we'll add a convenience function to delete prediction endpoints after we're done with them:",
"_____no_output_____"
]
],
[
[
"def delete_endpoint(predictor):\n try:\n predictor.delete_model()\n predictor.delete_endpoint()\n print(\"Deleted {}\".format(predictor.endpoint))\n except:\n print(\"Already deleted: {}\".format(predictor.endpoint))",
"_____no_output_____"
]
],
[
[
"Let's begin by training a binary classifier model with the linear learner default settings. Note that we're setting the number of epochs to 40, which is much higher than the default of 10 epochs. With early stopping, we don't have to worry about setting the number of epochs too high. Linear learner will stop training automatically after the model has converged.",
"_____no_output_____"
]
],
[
[
"# Training a binary classifier with default settings: logistic regression\ndefaults_hyperparams = {\"feature_dim\": 30, \"predictor_type\": \"binary_classifier\", \"epochs\": 40}\ndefaults_output_path = \"s3://{}/{}/defaults/output\".format(bucket, prefix)\ndefaults_predictor = predictor_from_hyperparams(\n s3_train_path, defaults_hyperparams, defaults_output_path\n)",
"_____no_output_____"
]
],
[
[
"And now we'll produce a model with a threshold tuned for the best possible precision with recall fixed at 90%:",
"_____no_output_____"
]
],
[
[
"# Training a binary classifier with automated threshold tuning\nautothresh_hyperparams = {\n \"feature_dim\": 30,\n \"predictor_type\": \"binary_classifier\",\n \"binary_classifier_model_selection_criteria\": \"precision_at_target_recall\",\n \"target_recall\": 0.9,\n \"epochs\": 40,\n}\nautothresh_output_path = \"s3://{}/{}/autothresh/output\".format(bucket, prefix)\nautothresh_predictor = predictor_from_hyperparams(\n s3_train_path, autothresh_hyperparams, autothresh_output_path\n)",
"_____no_output_____"
]
],
[
[
"### Improving recall with class weights\n\nNow we'll improve on these results using a new feature added to linear learner: class weights for binary classification. We introduced this feature in the *Class Weights* section, and now we'll look into its application to the credit card fraud dataset by training a new model with balanced class weights:",
"_____no_output_____"
]
],
[
[
"# Training a binary classifier with class weights and automated threshold tuning\nclass_weights_hyperparams = {\n \"feature_dim\": 30,\n \"predictor_type\": \"binary_classifier\",\n \"binary_classifier_model_selection_criteria\": \"precision_at_target_recall\",\n \"target_recall\": 0.9,\n \"positive_example_weight_mult\": \"balanced\",\n \"epochs\": 40,\n}\nclass_weights_output_path = \"s3://{}/{}/class_weights/output\".format(bucket, prefix)\nclass_weights_predictor = predictor_from_hyperparams(\n s3_train_path, class_weights_hyperparams, class_weights_output_path\n)",
"_____no_output_____"
]
],
[
[
"The first training examples used the default loss function for binary classification, logistic loss. Now let's train a model with hinge loss. This is also called a support vector machine (SVM) classifier with a linear kernel. Threshold tuning is supported for all binary classifier models in linear learner.",
"_____no_output_____"
]
],
[
[
"# Training a binary classifier with hinge loss and automated threshold tuning\nsvm_hyperparams = {\n \"feature_dim\": 30,\n \"predictor_type\": \"binary_classifier\",\n \"loss\": \"hinge_loss\",\n \"binary_classifier_model_selection_criteria\": \"precision_at_target_recall\",\n \"target_recall\": 0.9,\n \"epochs\": 40,\n}\nsvm_output_path = \"s3://{}/{}/svm/output\".format(bucket, prefix)\nsvm_predictor = predictor_from_hyperparams(s3_train_path, svm_hyperparams, svm_output_path)",
"_____no_output_____"
]
],
[
[
"And finally, let's see what happens with balancing the class weights for the SVM model:",
"_____no_output_____"
]
],
[
[
"# Training a binary classifier with hinge loss, balanced class weights, and automated threshold tuning\nsvm_balanced_hyperparams = {\n \"feature_dim\": 30,\n \"predictor_type\": \"binary_classifier\",\n \"loss\": \"hinge_loss\",\n \"binary_classifier_model_selection_criteria\": \"precision_at_target_recall\",\n \"target_recall\": 0.9,\n \"positive_example_weight_mult\": \"balanced\",\n \"epochs\": 40,\n}\nsvm_balanced_output_path = \"s3://{}/{}/svm_balanced/output\".format(bucket, prefix)\nsvm_balanced_predictor = predictor_from_hyperparams(\n s3_train_path, svm_balanced_hyperparams, svm_balanced_output_path\n)",
"_____no_output_____"
]
],
[
[
"Now we'll make use of the prediction endpoint we've set up for each model by sending them features from the test set and evaluating their predictions with standard binary classification metrics.",
"_____no_output_____"
]
],
[
[
"# Evaluate the trained models\npredictors = {\n \"Logistic\": defaults_predictor,\n \"Logistic with auto threshold\": autothresh_predictor,\n \"Logistic with class weights\": class_weights_predictor,\n \"Hinge with auto threshold\": svm_predictor,\n \"Hinge with class weights\": svm_balanced_predictor,\n}\nmetrics = {\n key: evaluate(predictor, test_features, test_labels, key, False)\n for key, predictor in predictors.items()\n}\npd.set_option(\"display.float_format\", lambda x: \"%.3f\" % x)\ndisplay(\n pd.DataFrame(list(metrics.values())).loc[:, [\"Model\", \"Recall\", \"Precision\", \"Accuracy\", \"F1\"]]\n)",
"_____no_output_____"
]
],
[
[
"The results are in! With threshold tuning, we can accurately predict 85-90% of the fraudulent transactions in the test set (due to randomness in training, recall will vary between 0.85-0.9 across multiple runs). But in addition to those true positives, we'll have a high number of false positives: 90-95% of the transactions we predict to be fraudulent are in fact not fraudulent (precision varies between 0.05-0.1). This model would work well as a first line of defense, flagging potentially fraudulent transactions for further review. If we instead want a model that gives very few false alarms, at the cost of catching far fewer of the fraudulent transactions, then we should optimize for higher precision:",
"_____no_output_____"
],
[
" binary_classifier_model_selection_criteria='recall_at_target_precision', \n target_precision=0.9,",
"_____no_output_____"
],
[
"And what about the results of using our new feature, class weights for binary classification? Training with class weights has made a huge improvement to this model's performance! The precision is roughly doubled, while recall is still held constant at 85-90%. \n\nBalancing class weights improved the performance of our SVM predictor, but it still does not match the corresponding logistic regression model for this dataset. Comparing all of the models we've fit so far, logistic regression with class weights and tuned thresholds did the best.",
"_____no_output_____"
],
[
"#### Note on target vs. observed recall\n\nIt's worth taking some time to look more closely at these results. If we asked linear learner for a model calibrated to a target recall of 0.9, then why didn't we get exactly 90% recall on the test set? The reason is the difference between training, validation, and testing. Linear learner calibrates thresholds for binary classification on the validation data set when one is provided, or else on the training set. Since we did not provide a validation data set, the threshold were calculated on the training data. Since the training, validation, and test data sets don't match exactly, the target recall we request is only an approximation. In this case, the threshold that produced 90% recall on the training data happened to produce only 85-90% recall on the test data (due to some randomness in training, the results will vary from one run to the next). The variation of recall in the test set versus the training set is dependent on the number of positive points. In this example, although we have over 280,000 examples in the entire dataset, we only have 337 positive examples, hence the large difference. The accuracy of this approximation can be improved by providing a large validation data set to get a more accurate threshold, and then evaluating on a large test set to get a more accurate benchmark of the model and its threshold. For even more fine-grained control, we can set the number of calibration samples to a higher number. It's default value is already quite high at 10 million samples:\n\n num_calibration_samples=10000000,",
"_____no_output_____"
],
[
"#### Clean Up\n\nFinally we'll clean up by deleting the prediction endpoints we set up:",
"_____no_output_____"
]
],
[
[
"for predictor in [\n defaults_predictor,\n autothresh_predictor,\n class_weights_predictor,\n svm_predictor,\n svm_balanced_predictor,\n]:\n delete_endpoint(predictor)",
"_____no_output_____"
]
],
[
[
"We've just shown how to use the linear learner new early stopping feature, new loss functions, and new class weights feature to improve credit card fraud prediction. Class weights can help you optimize recall or precision for all types of fraud detection, as well as other classification problems with rare events, like ad click prediction or mechanical failure prediction. Try using class weights in your binary classification problem, or try one of the new loss functions for your regression problems: use quantile prediction to put confidence intervals around your predictions by learning 5% and 95% quantiles. For more information about new loss functions and class weights, see the linear learner [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/linear-learner.html).",
"_____no_output_____"
],
[
"##### References\nAndrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015.",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
]
|
ec64f7837715e7b7876b7f3dd0412728d941d652 | 8,129 | ipynb | Jupyter Notebook | Taller 3 - Algebra Lineal/2_1_Lineal_Algebra_Practice_1.ipynb | deiry/DataScienceFEM | f579e54aad579e35f80656569781e39c2696e8ea | [
"MIT"
]
| null | null | null | Taller 3 - Algebra Lineal/2_1_Lineal_Algebra_Practice_1.ipynb | deiry/DataScienceFEM | f579e54aad579e35f80656569781e39c2696e8ea | [
"MIT"
]
| null | null | null | Taller 3 - Algebra Lineal/2_1_Lineal_Algebra_Practice_1.ipynb | deiry/DataScienceFEM | f579e54aad579e35f80656569781e39c2696e8ea | [
"MIT"
]
| null | null | null | 19.308789 | 255 | 0.417395 | [
[
[
"<a href=\"https://colab.research.google.com/github/salvarezmeneses/DataScience2020/blob/master/2_1_Lineal_Algebra_Practice_1.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Question 1.\n\n* Print the sum of matrixes A and B\n\n$$ A =\n\\left(\\begin{array}{cc} \n1 & 2\\\\\n3 & 4\\\\\n5 & 6\n\\end{array}\\right) + B =\n\\left(\\begin{array}{cc} \n6 & 5\\\\\n4 & 3\\\\\n2 & 1 \n\\end{array}\\right)\n$$ ",
"_____no_output_____"
]
],
[
[
"# Answer 1.\n'Your code here'\n\n\n",
"_____no_output_____"
]
],
[
[
"# Question 2\n\n* Print the sum of matrix + scalar.\n\n$$ M =\n\\left(\\begin{array}{cc} \n6 & 3\\\\\n9 & 5\\\\\n4 & 8\n\\end{array}\\right) + N =\n\\left(\\begin{array}{cc} \n2.5 \\\\ \n\\end{array}\\right)\n$$ ",
"_____no_output_____"
]
],
[
[
"# Answer 2.\n'Your code here'",
"_____no_output_____"
]
],
[
[
"# Question 3.\n\n* Print the transposed matrix of $J$ --> $J^t$. \n\n$$ J =\n\\left(\\begin{array}{cc} \n9 & 2 & 3\\\\\n3 & 7 & 4 \\\\\n5 & 6 & 9\n\\end{array}\\right) \n$$ ",
"_____no_output_____"
]
],
[
[
"# Answer 3.\n'Your code here'",
"_____no_output_____"
]
],
[
[
"# Question 4.\n\n* For the $ J ^ t $ matrix, calculate your transpose to obtain the original matrix J. \n\n$$ J =\n\\left(\\begin{array}{cc} \n9 & 2 & 3\\\\\n3 & 7 & 4 \\\\\n5 & 6 & 9\n\\end{array}\\right) \n$$ ",
"_____no_output_____"
]
],
[
[
"# Answer 4.\n'Your code here'\n\n\n",
"_____no_output_____"
]
],
[
[
"# Question 5.\n\n* \nWhat are the missing values? \n\n$$\n\\left(\\begin{array}{cc} \n? & 2\\\\\n0 & 2\n\\end{array}\\right)\n\\left(\\begin{array}{cc} \n1 & 0 \\\\ \n1 & ? \n\\end{array}\\right)\n$$ \n\n * so that the answer is like this:\n$\n\\left(\n \\begin {array}{ll} \n1 & 0 \\\\\n0 & 0\n\\end{array}\n\\right)\n$\n\n- [ A ] = 1 y 1\n- [ B ] = 0 y 1\n- [ C ] = 1 y 0\n- [ D ] = 0 y 0\n\n* Print the result and the answer.",
"_____no_output_____"
]
],
[
[
"# Answer 5.\n'your code here'",
"_____no_output_____"
]
],
[
[
"# Question 6.\n* ¿ What is the dot product matrixes $L$ y $K$ = : ?\n\n$$ L =\n\\left(\\begin{array}{cc} \n3 & 5 \\\\\n7 & 1 \\\\\n\\end{array}\\right) K =\n\\left(\\begin{array}{cc} \n3 & 12 \\\\\n6 & 4 \\\\\n\\end{array}\\right)\n$$ ",
"_____no_output_____"
]
],
[
[
"# Answer 6.\n'your code here'",
"_____no_output_____"
]
],
[
[
"# Question 7.\n\n* ¿ What is the determinant of the following matrix ? (Print the answer)\n\n$\n\\left(\n\\begin {array}{ll} \n1 & 2 & 0 \\\\\n1 & 2 & 0 \\\\\n0 & 1 & 1 \\\\\n\\end{array}\n\\right)\n$\n\n- [ A ] = 1 \n- [ B ] = 0 \n- [ C ] = 5\n- [ D ] = -5\n\n",
"_____no_output_____"
]
],
[
[
"# Answer 7.\n'your code here'",
"_____no_output_____"
]
],
[
[
"# Question 8.\n\n* ¿ What is the determinant of the following matrix ? (Print the answer)\n\n$\n\\left(\n\\begin {array}{ll} \n0 & 1 & 1 \\\\\n1 & 0 & 0 \\\\\n0 & 0 & 1 \\\\\n\\end{array}\n\\right)\n$\n\n- [ A ] = 0 \n- [ B ] = -1 \n- [ C ] = 3\n- [ D ] = 1\n",
"_____no_output_____"
]
],
[
[
"# Answer 8.\n'your code here'",
"_____no_output_____"
]
],
[
[
"# Question 9\n\n* ¿What is the missing value?\n$$ \n\\left(\\begin{array}{cc} \n1 & 2 & 3\\\\\n4 & 5 & 6\\\\\n7 & 8 & 9\n\\end{array}\\right) \n\\left(\\begin{array}{cc} \n0 \\\\ \n? \\\\\n16 \\\\\n\\end{array}\\right) =\n\\left(\\begin{array}{cc} \n4 \\\\ \n10 \\\\\n16 \\\\\n\\end{array}\\right)\n$$ ",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
]
|
ec6508d70cba3b9f50b8f56cb9a753e0078ca36e | 537,421 | ipynb | Jupyter Notebook | Module 2/Ex2.2A Epsilon Greedy-completed.ipynb | Aboubacar2012/DAT257x | c8a24219161bdecb4c210919fd48cbd64d33c029 | [
"Unlicense"
]
| null | null | null | Module 2/Ex2.2A Epsilon Greedy-completed.ipynb | Aboubacar2012/DAT257x | c8a24219161bdecb4c210919fd48cbd64d33c029 | [
"Unlicense"
]
| null | null | null | Module 2/Ex2.2A Epsilon Greedy-completed.ipynb | Aboubacar2012/DAT257x | c8a24219161bdecb4c210919fd48cbd64d33c029 | [
"Unlicense"
]
| null | null | null | 584.788901 | 24,176 | 0.951435 | [
[
[
"# DAT257x: Reinforcement Learning Explained\n\n## Lab 2: Bandits\n\n### Exercise 2.2A: Epsilon Greedy",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport sys\n\nif \"../\" not in sys.path:\n sys.path.append(\"../\") \n\nfrom lib.envs.bandit import BanditEnv\nfrom lib.simulation import Experiment",
"_____no_output_____"
],
[
"#Policy interface\nclass Policy:\n #num_actions: (int) Number of arms [indexed by 0 ... num_actions-1]\n def __init__(self, num_actions):\n self.num_actions = num_actions\n \n def act(self):\n pass\n \n def feedback(self, action, reward):\n pass",
"_____no_output_____"
],
[
"#Greedy policy\nclass Greedy(Policy):\n def __init__(self, num_actions):\n Policy.__init__(self, num_actions)\n self.name = \"Greedy\"\n self.total_rewards = np.zeros(num_actions, dtype = np.longdouble)\n self.total_counts = np.zeros(num_actions, dtype = np.longdouble)\n \n def act(self):\n current_averages = np.divide(self.total_rewards, self.total_counts, where = self.total_counts > 0)\n current_averages[self.total_counts <= 0] = 0.5 #Correctly handles Bernoulli rewards; over-estimates otherwise\n current_action = np.argmax(current_averages)\n return current_action\n \n def feedback(self, action, reward):\n self.total_rewards[action] += reward\n self.total_counts[action] += 1",
"_____no_output_____"
]
],
[
[
"Now let's implement an epsilon greedy policy based on the policy interface. The epsilon greedy policy will make sure we explore (i.e taking random actions) as set by the epsilon value, and take the most rewarding action (i.e greedy) the rest of the times. This is implemented in the act() function. ",
"_____no_output_____"
]
],
[
[
"#Epsilon Greedy policy\nclass EpsilonGreedy(Greedy):\n def __init__(self, num_actions, epsilon):\n Greedy.__init__(self, num_actions)\n if (epsilon is None or epsilon < 0 or epsilon > 1):\n print(\"EpsilonGreedy: Invalid value of epsilon\", flush = True)\n sys.exit(0)\n \n self.epsilon = epsilon\n self.name = \"Epsilon Greedy\"\n \n def act(self):\n choice = None\n if self.epsilon == 0:\n choice = 0\n elif self.epsilon == 1:\n choice = 1\n else:\n choice = np.random.binomial(1, self.epsilon)\n \n if choice == 1:\n return np.random.choice(self.num_actions)\n else:\n current_averages = np.divide(self.total_rewards, self.total_counts, where = self.total_counts > 0)\n current_averages[self.total_counts <= 0] = 0.5 #Correctly handles Bernoulli rewards; over-estimates otherwise\n current_action = np.argmax(current_averages)\n return current_action\n ",
"_____no_output_____"
]
],
[
[
"Now let's prepare the simulation. We'll use a different seed and have 10 arms/actions instead of 5.",
"_____no_output_____"
]
],
[
[
"evaluation_seed = 5016\nnum_actions = 10\ntrials = 10000\ndistribution = \"bernoulli\"",
"_____no_output_____"
]
],
[
[
"First, let's use epsilon = 0. Run the simulation and observe the results.",
"_____no_output_____"
]
],
[
[
"epsilon = 0\nenv = BanditEnv(num_actions, distribution, evaluation_seed)\nagent = EpsilonGreedy(num_actions, epsilon)\nexperiment = Experiment(env, agent)\nexperiment.run_bandit(trials)",
"Distribution: bernoulli [0.93160258 0.10144645 0.65359868 0.39412646 0.67036346 0.46602165\n 0.95371529 0.87460326 0.22253422 0.11576875]\nOptimal arm: 6\n"
]
],
[
[
"What about if epsilon = 1? Run the simulation again and observe the results.",
"_____no_output_____"
]
],
[
[
"epsilon = 1\nenv = BanditEnv(num_actions, distribution, evaluation_seed)\nagent = EpsilonGreedy(num_actions, epsilon)\nexperiment = Experiment(env, agent)\nexperiment.run_bandit(trials)",
"Distribution: bernoulli [0.93160258 0.10144645 0.65359868 0.39412646 0.67036346 0.46602165\n 0.95371529 0.87460326 0.22253422 0.11576875]\nOptimal arm: 6\n"
]
],
[
[
"Now, try several different number of epsilons (0.05, 0.1, 0.15). Run the simulations and observe the results.",
"_____no_output_____"
]
],
[
[
"epsilon = 0.05\nenv = BanditEnv(num_actions, distribution, evaluation_seed)\nagent = EpsilonGreedy(num_actions, epsilon)\nexperiment = Experiment(env, agent)\nexperiment.run_bandit(trials)",
"Distribution: bernoulli [0.93160258 0.10144645 0.65359868 0.39412646 0.67036346 0.46602165\n 0.95371529 0.87460326 0.22253422 0.11576875]\nOptimal arm: 6\n"
],
[
"epsilon = 0.1\nenv = BanditEnv(num_actions, distribution, evaluation_seed)\nagent = EpsilonGreedy(num_actions, epsilon)\nexperiment = Experiment(env, agent)\nexperiment.run_bandit(trials)",
"Distribution: bernoulli [0.93160258 0.10144645 0.65359868 0.39412646 0.67036346 0.46602165\n 0.95371529 0.87460326 0.22253422 0.11576875]\nOptimal arm: 6\n"
],
[
"epsilon = 0.15\nenv = BanditEnv(num_actions, distribution, evaluation_seed)\nagent = EpsilonGreedy(num_actions, epsilon)\nexperiment = Experiment(env, agent)\nexperiment.run_bandit(trials)",
"Distribution: bernoulli [0.93160258 0.10144645 0.65359868 0.39412646 0.67036346 0.46602165\n 0.95371529 0.87460326 0.22253422 0.11576875]\nOptimal arm: 6\n"
]
],
[
[
"Which epsilon performs best with this problem?",
"_____no_output_____"
],
[
"Now let's prepare another simulation by setting a different seed. ",
"_____no_output_____"
]
],
[
[
"evaluation_seed = 1239\nnum_actions = 10\ntrials = 10000\ndistribution = \"bernoulli\"",
"_____no_output_____"
]
],
[
[
"Try the range of epsilons again (0, 0.05, 0.1, 0.15, 1), run the simulations and observe the results.",
"_____no_output_____"
]
],
[
[
"for epsilon in (0, 0.05, 0.1, 0.15, 1):\n print(\"epsilon =\", epsilon)\n env = BanditEnv(num_actions, distribution, evaluation_seed)\n agent = EpsilonGreedy(num_actions, epsilon)\n experiment = Experiment(env, agent)\n experiment.run_bandit(trials)",
"epsilon = 0\nDistribution: bernoulli [0.5061565 0.74836123 0.53065236 0.37446716 0.88168477 0.83849367\n 0.3951277 0.13217982 0.44509856 0.03459039]\nOptimal arm: 4\n"
]
],
[
[
"Which epsilon performs best with this problem?",
"_____no_output_____"
],
[
"What do you learn about setting the epsilon value?",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
]
|
ec650ad802740efdab1e43fd4be7ecdb04c57a34 | 48,773 | ipynb | Jupyter Notebook | Tutorials/Boston Housing - XGBoost (Deploy) - High Level.ipynb | valmsmith39a/u-g-sentiment-analysis | e5ce8775d8415141a24816061306273472e33d1f | [
"MIT"
]
| null | null | null | Tutorials/Boston Housing - XGBoost (Deploy) - High Level.ipynb | valmsmith39a/u-g-sentiment-analysis | e5ce8775d8415141a24816061306273472e33d1f | [
"MIT"
]
| null | null | null | Tutorials/Boston Housing - XGBoost (Deploy) - High Level.ipynb | valmsmith39a/u-g-sentiment-analysis | e5ce8775d8415141a24816061306273472e33d1f | [
"MIT"
]
| null | null | null | 79.564437 | 15,484 | 0.756751 | [
[
[
"# Predicting Boston Housing Prices\n\n## Using XGBoost in SageMaker (Deploy)\n\n_Deep Learning Nanodegree Program | Deployment_\n\n---\n\nAs an introduction to using SageMaker's High Level Python API we will look at a relatively simple problem. Namely, we will use the [Boston Housing Dataset](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html) to predict the median value of a home in the area of Boston Mass.\n\nThe documentation for the high level API can be found on the [ReadTheDocs page](http://sagemaker.readthedocs.io/en/latest/)\n\n## General Outline\n\nTypically, when using a notebook instance with SageMaker, you will proceed through the following steps. Of course, not every step will need to be done with each project. Also, there is quite a lot of room for variation in many of the steps, as you will see throughout these lessons.\n\n1. Download or otherwise retrieve the data.\n2. Process / Prepare the data.\n3. Upload the processed data to S3.\n4. Train a chosen model.\n5. Test the trained model (typically using a batch transform job).\n6. Deploy the trained model.\n7. Use the deployed model.\n\nIn this notebook we will be skipping step 5, testing the model. We will still test the model but we will do so by first deploying the model and then sending the test data to the deployed model.",
"_____no_output_____"
],
[
"## Step 0: Setting up the notebook\n\nWe begin by setting up all of the necessary bits required to run our notebook. To start that means loading all of the Python modules we will need.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n\nimport os\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import load_boston\nimport sklearn.model_selection",
"_____no_output_____"
]
],
[
[
"In addition to the modules above, we need to import the various bits of SageMaker that we will be using. ",
"_____no_output_____"
]
],
[
[
"import sagemaker\nfrom sagemaker import get_execution_role\nfrom sagemaker.amazon.amazon_estimator import get_image_uri\nfrom sagemaker.predictor import csv_serializer\n\n# This is an object that represents the SageMaker session that we are currently operating in. This\n# object contains some useful information that we will need to access later such as our region.\nsession = sagemaker.Session()\n\n# This is an object that represents the IAM role that we are currently assigned. When we construct\n# and launch the training job later we will need to tell it what IAM role it should have. Since our\n# use case is relatively simple we will simply assign the training job the role we currently have.\nrole = get_execution_role()",
"_____no_output_____"
]
],
[
[
"## Step 1: Downloading the data\n\nFortunately, this dataset can be retrieved using sklearn and so this step is relatively straightforward.",
"_____no_output_____"
]
],
[
[
"boston = load_boston()",
"_____no_output_____"
]
],
[
[
"## Step 2: Preparing and splitting the data\n\nGiven that this is clean tabular data, we don't need to do any processing. However, we do need to split the rows in the dataset up into train, test and validation sets.",
"_____no_output_____"
]
],
[
[
"# First we package up the input data and the target variable (the median value) as pandas dataframes. This\n# will make saving the data to a file a little easier later on.\n\nX_bos_pd = pd.DataFrame(boston.data, columns=boston.feature_names)\nY_bos_pd = pd.DataFrame(boston.target)\n\n# We split the dataset into 2/3 training and 1/3 testing sets.\nX_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X_bos_pd, Y_bos_pd, test_size=0.33)\n\n# Then we split the training set further into 2/3 training and 1/3 validation sets.\nX_train, X_val, Y_train, Y_val = sklearn.model_selection.train_test_split(X_train, Y_train, test_size=0.33)",
"_____no_output_____"
]
],
[
[
"## Step 3: Uploading the training and validation files to S3\n\nWhen a training job is constructed using SageMaker, a container is executed which performs the training operation. This container is given access to data that is stored in S3. This means that we need to upload the data we want to use for training to S3. We can use the SageMaker API to do this and hide some of the details.\n\n### Save the data locally\n\nFirst we need to create the train and validation csv files which we will then upload to S3.",
"_____no_output_____"
]
],
[
[
"# This is our local data directory. We need to make sure that it exists.\ndata_dir = '../data/boston'\nif not os.path.exists(data_dir):\n os.makedirs(data_dir)",
"_____no_output_____"
],
[
"# We use pandas to save our train and validation data to csv files. Note that we make sure not to include header\n# information or an index as this is required by the built in algorithms provided by Amazon. Also, it is assumed\n# that the first entry in each row is the target variable.\n\npd.concat([Y_val, X_val], axis=1).to_csv(os.path.join(data_dir, 'validation.csv'), header=False, index=False)\npd.concat([Y_train, X_train], axis=1).to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False)",
"_____no_output_____"
]
],
[
[
"### Upload to S3\n\nSince we are currently running inside of a SageMaker session, we can use the object which represents this session to upload our data to the 'default' S3 bucket. Note that it is good practice to provide a custom prefix (essentially an S3 folder) to make sure that you don't accidentally interfere with data uploaded from some other notebook or project.",
"_____no_output_____"
]
],
[
[
"prefix = 'boston-xgboost-deploy-hl'\n\nval_location = session.upload_data(os.path.join(data_dir, 'validation.csv'), key_prefix=prefix)\ntrain_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix)",
"_____no_output_____"
]
],
[
[
"## Step 4: Train the XGBoost model\n\nNow that we have the training and validation data uploaded to S3, we can construct our XGBoost model and train it. We will be making use of the high level SageMaker API to do this which will make the resulting code a little easier to read at the cost of some flexibility.\n\nTo construct an estimator, the object which we wish to train, we need to provide the location of a container which contains the training code. Since we are using a built in algorithm this container is provided by Amazon. However, the full name of the container is a bit lengthy and depends on the region that we are operating in. Fortunately, SageMaker provides a useful utility method called `get_image_uri` that constructs the image name for us.\n\nTo use the `get_image_uri` method we need to provide it with our current region, which can be obtained from the session object, and the name of the algorithm we wish to use. In this notebook we will be using XGBoost however you could try another algorithm if you wish. The list of built in algorithms can be found in the list of [Common Parameters](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-algo-docker-registry-paths.html).",
"_____no_output_____"
]
],
[
[
"# As stated above, we use this utility method to construct the image name for the training container.\ncontainer = get_image_uri(session.boto_region_name, 'xgboost')\n\n# Now that we know which container to use, we can construct the estimator object.\nxgb = sagemaker.estimator.Estimator(container, # The name of the training container\n role, # The IAM role to use (our current role in this case)\n train_instance_count=1, # The number of instances to use for training\n train_instance_type='ml.m4.xlarge', # The type of instance ot use for training\n output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix),\n # Where to save the output (the model artifacts)\n sagemaker_session=session) # The current SageMaker session",
"WARNING:root:There is a more up to date SageMaker XGBoost image.To use the newer image, please set 'repo_version'='0.90-1. For example:\n\tget_image_uri(region, 'xgboost', '0.90-1').\n"
]
],
[
[
"Before asking SageMaker to begin the training job, we should probably set any model specific hyperparameters. There are quite a few that can be set when using the XGBoost algorithm, below are just a few of them. If you would like to change the hyperparameters below or modify additional ones you can find additional information on the [XGBoost hyperparameter page](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost_hyperparameters.html)",
"_____no_output_____"
]
],
[
[
"xgb.set_hyperparameters(max_depth=5,\n eta=0.2,\n gamma=4,\n min_child_weight=6,\n subsample=0.8,\n objective='reg:linear',\n early_stopping_rounds=10,\n num_round=200)",
"_____no_output_____"
]
],
[
[
"Now that we have our estimator object completely set up, it is time to train it. To do this we make sure that SageMaker knows our input data is in csv format and then execute the `fit` method.",
"_____no_output_____"
]
],
[
[
"# This is a wrapper around the location of our train and validation data, to make sure that SageMaker\n# knows our data is in csv format.\ns3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='csv')\ns3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='csv')\n\nxgb.fit({'train': s3_input_train, 'validation': s3_input_validation})",
"2019-09-27 00:48:45 Starting - Starting the training job...\n2019-09-27 00:49:01 Starting - Launching requested ML instances......\n2019-09-27 00:50:01 Starting - Preparing the instances for training......\n2019-09-27 00:51:04 Downloading - Downloading input data\n2019-09-27 00:51:04 Training - Downloading the training image..\u001b[31mArguments: train\u001b[0m\n\u001b[31m[2019-09-27:00:51:22:INFO] Running standalone xgboost training.\u001b[0m\n\u001b[31m[2019-09-27:00:51:22:INFO] File size need to be processed in the node: 0.02mb. Available memory size in the node: 8592.59mb\u001b[0m\n\u001b[31m[2019-09-27:00:51:22:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[31m[00:51:22] S3DistributionType set as FullyReplicated\u001b[0m\n\u001b[31m[00:51:22] 227x13 matrix with 2951 entries loaded from /opt/ml/input/data/train?format=csv&label_column=0&delimiter=,\u001b[0m\n\u001b[31m[2019-09-27:00:51:22:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[31m[00:51:22] S3DistributionType set as FullyReplicated\u001b[0m\n\u001b[31m[00:51:22] 112x13 matrix with 1456 entries loaded from /opt/ml/input/data/validation?format=csv&label_column=0&delimiter=,\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 8 extra nodes, 0 pruned nodes, max_depth=3\u001b[0m\n\u001b[31m[0]#011train-rmse:19.4283#011validation-rmse:18.5198\u001b[0m\n\u001b[31mMultiple eval metrics have been passed: 'validation-rmse' will be used for early stopping.\n\u001b[0m\n\u001b[31mWill train until validation-rmse hasn't improved in 10 rounds.\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 8 extra nodes, 0 pruned nodes, max_depth=3\u001b[0m\n\u001b[31m[1]#011train-rmse:15.8937#011validation-rmse:15.1023\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 10 extra nodes, 0 pruned nodes, max_depth=3\u001b[0m\n\u001b[31m[2]#011train-rmse:13.0538#011validation-rmse:12.4312\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 14 extra nodes, 0 pruned nodes, max_depth=4\u001b[0m\n\u001b[31m[3]#011train-rmse:10.7531#011validation-rmse:10.2276\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 16 extra nodes, 2 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[4]#011train-rmse:9.00224#011validation-rmse:8.64769\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 14 extra nodes, 2 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[5]#011train-rmse:7.52595#011validation-rmse:7.31931\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 16 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[6]#011train-rmse:6.32334#011validation-rmse:6.22524\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 16 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[7]#011train-rmse:5.39518#011validation-rmse:5.43374\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 20 extra nodes, 2 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[8]#011train-rmse:4.60424#011validation-rmse:4.83092\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 28 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[9]#011train-rmse:3.99439#011validation-rmse:4.45347\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 24 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[10]#011train-rmse:3.5019#011validation-rmse:4.11782\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 16 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[11]#011train-rmse:3.13619#011validation-rmse:3.85609\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 18 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[12]#011train-rmse:2.83954#011validation-rmse:3.72274\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 18 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[13]#011train-rmse:2.59099#011validation-rmse:3.60599\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 22 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[14]#011train-rmse:2.41157#011validation-rmse:3.51922\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 20 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[15]#011train-rmse:2.25737#011validation-rmse:3.48951\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 16 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[16]#011train-rmse:2.16459#011validation-rmse:3.43156\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 18 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[17]#011train-rmse:2.0598#011validation-rmse:3.41847\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 16 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[18]#011train-rmse:1.98121#011validation-rmse:3.4047\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 24 extra nodes, 2 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[19]#011train-rmse:1.88595#011validation-rmse:3.39742\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 12 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[20]#011train-rmse:1.82412#011validation-rmse:3.36511\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 16 extra nodes, 2 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[21]#011train-rmse:1.76178#011validation-rmse:3.35221\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 12 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[22]#011train-rmse:1.72449#011validation-rmse:3.33196\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 24 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[23]#011train-rmse:1.64612#011validation-rmse:3.29645\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 18 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[24]#011train-rmse:1.59188#011validation-rmse:3.25772\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 14 extra nodes, 4 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[25]#011train-rmse:1.55999#011validation-rmse:3.25213\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 18 extra nodes, 4 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[26]#011train-rmse:1.52317#011validation-rmse:3.24148\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 16 extra nodes, 4 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[27]#011train-rmse:1.48706#011validation-rmse:3.21867\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 18 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[28]#011train-rmse:1.4464#011validation-rmse:3.20588\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 26 extra nodes, 14 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[29]#011train-rmse:1.37875#011validation-rmse:3.20759\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 12 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[30]#011train-rmse:1.35292#011validation-rmse:3.20525\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 18 extra nodes, 4 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[31]#011train-rmse:1.31939#011validation-rmse:3.20037\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 22 extra nodes, 6 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[32]#011train-rmse:1.28053#011validation-rmse:3.2028\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 18 extra nodes, 10 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[33]#011train-rmse:1.24921#011validation-rmse:3.18475\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 12 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[34]#011train-rmse:1.23073#011validation-rmse:3.17747\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 22 extra nodes, 2 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[35]#011train-rmse:1.18329#011validation-rmse:3.15926\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 14 extra nodes, 8 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[36]#011train-rmse:1.16861#011validation-rmse:3.16329\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 18 extra nodes, 18 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[37]#011train-rmse:1.1317#011validation-rmse:3.14873\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 12 extra nodes, 2 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[38]#011train-rmse:1.11413#011validation-rmse:3.15158\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 14 extra nodes, 2 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[39]#011train-rmse:1.09575#011validation-rmse:3.13999\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 10 extra nodes, 2 pruned nodes, max_depth=4\u001b[0m\n\u001b[31m[40]#011train-rmse:1.08421#011validation-rmse:3.14906\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 10 extra nodes, 4 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[41]#011train-rmse:1.07083#011validation-rmse:3.13662\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 2 extra nodes, 16 pruned nodes, max_depth=1\u001b[0m\n\u001b[31m[42]#011train-rmse:1.06984#011validation-rmse:3.12598\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 14 extra nodes, 4 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[43]#011train-rmse:1.04553#011validation-rmse:3.11646\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 18 extra nodes, 4 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[44]#011train-rmse:1.02426#011validation-rmse:3.11679\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 10 extra nodes, 24 pruned nodes, max_depth=4\u001b[0m\n\u001b[31m[45]#011train-rmse:1.00221#011validation-rmse:3.1121\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 8 extra nodes, 8 pruned nodes, max_depth=3\u001b[0m\n\u001b[31m[46]#011train-rmse:0.992057#011validation-rmse:3.117\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 10 extra nodes, 18 pruned nodes, max_depth=4\u001b[0m\n\u001b[31m[47]#011train-rmse:0.980223#011validation-rmse:3.11095\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 14 extra nodes, 8 pruned nodes, max_depth=4\u001b[0m\n\u001b[31m[48]#011train-rmse:0.964379#011validation-rmse:3.10015\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 6 extra nodes, 18 pruned nodes, max_depth=3\u001b[0m\n\u001b[31m[49]#011train-rmse:0.958785#011validation-rmse:3.10225\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 6 extra nodes, 14 pruned nodes, max_depth=3\u001b[0m\n\u001b[31m[50]#011train-rmse:0.952435#011validation-rmse:3.11283\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 8 extra nodes, 18 pruned nodes, max_depth=4\u001b[0m\n\u001b[31m[51]#011train-rmse:0.946235#011validation-rmse:3.11084\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 14 extra nodes, 8 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[52]#011train-rmse:0.928587#011validation-rmse:3.11306\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 10 extra nodes, 4 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[53]#011train-rmse:0.914079#011validation-rmse:3.11819\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 10 extra nodes, 8 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[54]#011train-rmse:0.904525#011validation-rmse:3.11659\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 4 extra nodes, 28 pruned nodes, max_depth=2\u001b[0m\n\u001b[31m[55]#011train-rmse:0.899027#011validation-rmse:3.11117\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 10 extra nodes, 8 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[56]#011train-rmse:0.890184#011validation-rmse:3.10942\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 4 extra nodes, 10 pruned nodes, max_depth=2\u001b[0m\n\u001b[31m[57]#011train-rmse:0.885669#011validation-rmse:3.1071\u001b[0m\n\u001b[31m[00:51:22] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 0 extra nodes, 28 pruned nodes, max_depth=0\u001b[0m\n\u001b[31m[58]#011train-rmse:0.885731#011validation-rmse:3.10731\u001b[0m\n\u001b[31mStopping. Best iteration:\u001b[0m\n\u001b[31m[48]#011train-rmse:0.964379#011validation-rmse:3.10015\n\u001b[0m\n"
]
],
[
[
"## Step 5: Test the trained model\n\nWe will be skipping this step for now. We will still test our trained model but we are going to do it by using the deployed model, rather than setting up a batch transform job.\n\n\n## Step 6: Deploy the trained model\n\nNow that we have fit our model to the training data, using the validation data to avoid overfitting, we can deploy our model and test it. Deploying is very simple when we use the high level API, we need only call the `deploy` method of our trained estimator.\n\n**NOTE:** When deploying a model you are asking SageMaker to launch an compute instance that will wait for data to be sent to it. As a result, this compute instance will continue to run until *you* shut it down. This is important to know since the cost of a deployed endpoint depends on how long it has been running for.\n\nIn other words **If you are no longer using a deployed endpoint, shut it down!**",
"_____no_output_____"
]
],
[
[
"xgb_predictor = xgb.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')",
"WARNING:sagemaker:Using already existing model: xgboost-2019-09-27-00-48-45-585\n"
]
],
[
[
"## Step 7: Use the model\n\nNow that our model is trained and deployed we can send the test data to it and evaluate the results. Here, because our test data is so small, we can send it all using a single call to our endpoint. If our test dataset was larger we would need to split it up and send the data in chunks, making sure to accumulate the results.",
"_____no_output_____"
]
],
[
[
"# We need to tell the endpoint what format the data we are sending is in\nxgb_predictor.content_type = 'text/csv'\nxgb_predictor.serializer = csv_serializer\n\nY_pred = xgb_predictor.predict(X_test.values).decode('utf-8')\n# predictions is currently a comma delimited string and so we would like to break it up\n# as a numpy array.\nY_pred = np.fromstring(Y_pred, sep=',')",
"_____no_output_____"
]
],
[
[
"To see how well our model works we can create a simple scatter plot between the predicted and actual values. If the model was completely accurate the resulting scatter plot would look like the line $x=y$. As we can see, our model seems to have done okay but there is room for improvement.",
"_____no_output_____"
]
],
[
[
"plt.scatter(Y_test, Y_pred)\nplt.xlabel(\"Median Price\")\nplt.ylabel(\"Predicted Price\")\nplt.title(\"Median Price vs Predicted Price\")",
"_____no_output_____"
]
],
[
[
"## Delete the endpoint\n\nSince we are no longer using the deployed model we need to make sure to shut it down. Remember that you have to pay for the length of time that your endpoint is deployed so the longer it is left running, the more it costs.",
"_____no_output_____"
]
],
[
[
"xgb_predictor.delete_endpoint()",
"_____no_output_____"
]
],
[
[
"## Optional: Clean up\n\nThe default notebook instance on SageMaker doesn't have a lot of excess disk space available. As you continue to complete and execute notebooks you will eventually fill up this disk space, leading to errors which can be difficult to diagnose. Once you are completely finished using a notebook it is a good idea to remove the files that you created along the way. Of course, you can do this from the terminal or from the notebook hub if you would like. The cell below contains some commands to clean up the created files from within the notebook.",
"_____no_output_____"
]
],
[
[
"# First we will remove all of the files contained in the data_dir directory\n!rm $data_dir/*\n\n# And then we delete the directory itself\n!rmdir $data_dir",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec650b165462075fb4ffc9899d2dc270c29360b5 | 15,080 | ipynb | Jupyter Notebook | ADVI.ipynb | DanDoge/MCS-project | a08dd44952aab64abb93a09a7380dee0f5d7d1a8 | [
"MIT"
]
| null | null | null | ADVI.ipynb | DanDoge/MCS-project | a08dd44952aab64abb93a09a7380dee0f5d7d1a8 | [
"MIT"
]
| null | null | null | ADVI.ipynb | DanDoge/MCS-project | a08dd44952aab64abb93a09a7380dee0f5d7d1a8 | [
"MIT"
]
| 1 | 2019-11-12T13:11:40.000Z | 2019-11-12T13:11:40.000Z | 61.300813 | 7,464 | 0.696021 | [
[
[
"在不同数据集上实现ADVI算法。ADVI的模型构建和训练测试在“ADVI_bnn.py”实现",
"_____no_output_____"
]
],
[
[
"import os\nimport numpy as np\nimport pickle\nfrom scipy import stats\nimport matplotlib.pyplot as plt\n\nfrom dataset.UCIdataset import UCIDataset\nfrom ADVI_bnn import train, test, construct_nn",
"_____no_output_____"
]
],
[
[
"这里先定义两个显示测试结果的函数,分布得到模型在测试集上的log-likelihood和ELBO随迭代次数的变化",
"_____no_output_____"
]
],
[
[
"def log_prob(ppc, Y_test):\n pred = ppc['out']\n sd = np.std(ppc['out'], axis=0)\n logp = [np.mean(stats.norm.logpdf(pred[:, i] - Y_test[i],\n loc=0, scale=sd[i])) for i in range(len(sd))]\n print ('log-likelihood mean: %.2f'%np.mean(logp))\n print('log-likelihood std: %.3f' % np.std(logp))\n return np.mean(logp), np.std(logp)\n\n\ndef show_inference(inference, b=0, e=100):\n plt.plot(-inference.hist[b:e], alpha=.3)\n plt.ylabel('ELBO')\n plt.xlabel('iteration')\n plt.show()\n return None",
"_____no_output_____"
]
],
[
[
"打包训练和测试的函数",
"_____no_output_____"
]
],
[
[
"def run(dataset_name, root_path, hypers, shape='train_test'):\n np.random.seed(123)\n\n for time in range(hypers['times']):\n outpath = os.path.join(root_path, str(time))\n if not os.path.exists(outpath):\n os.makedirs(outpath)\n if dataset_name == 'face':\n data = FaceDataset(\"./dataset\", 0.9)\n else:\n data = UCIDataset(dataset_name, 0.9)\n\n X = np.append(data.Xtrain, data.Xtest, axis=0)\n Y = np.append(data.Ytrain, data.Ytest)\n data.Ytrain = data.Ytrain.reshape(len(data.Ytrain), )\n data.Ytest = data.Ytest.reshape(len(data.Ytest), )\n neural_network = construct_nn(\n X, Y, data.Xtrain, data.Ytrain, hypers)\n\n approx_file = os.path.join(outpath, \"approx.pkl\")\n inference_file = os.path.join(outpath, \"inference.pkl\")\n ppc_file = os.path.join(outpath, \"ppc.pkl\")\n if shape == 'train_test':\n inference, approx = train(\n neural_network, inference_file, approx_file, hypers)\n ppc = test(\n neural_network,\n approx,\n data.Xtest,\n data.Ytest,\n ppc_file,\n trace_samples=hypers['pred_samples'],\n pred_samples=hypers['pred_samples'])\n if shape == 'test':\n with open(inference_file, 'rb') as f:\n inference = pickle.load(f)\n with open(approx_file, 'rb') as f:\n approx = pickle.load(f)\n\n ppc = test(\n neural_network,\n approx,\n data.Xtest,\n data.Ytest,\n ppc_file,\n trace_samples=hypers['pred_samples'],\n pred_samples=hypers['pred_samples'])\n if shape == 'show':\n with open(inference_file, 'rb') as f:\n inference = pickle.load(f)\n with open(approx_file, 'rb') as f:\n approx = pickle.load(f)\n with open(ppc_file, 'rb') as f:\n ppc = pickle.load(f)\n\n show_inference(inference, b=0, e=hypers['n_sample'])\n log_prob(ppc, data.Ytest)\n\n return None",
"_____no_output_____"
]
],
[
[
"开始训练模型并给出测试集上的相关结果,这里以“conc”数据集为例。",
"_____no_output_____"
]
],
[
[
"dataset_name = \"conc\" #数据集名称\nroot_path = os.path.join('/home/yunnd/project/ADVI_result_test', dataset_name) #存储结果的路径\nhypers = {'conc': {'sd': 1,\n 'lr': 0.01,\n 'n_hidden': 50,\n 'n_sample': 100000,\n 'pred_samples': 5000,\n 'times': 1\n },\n 'powe': {'sd': 1,\n 'lr': 0.01,\n 'n_hidden': 50,\n 'n_sample': 100000,\n 'pred_samples': 5000,\n 'times': 1\n },\n 'yach': {'sd': 1,\n 'lr': 0.01,\n 'n_hidden': 50,\n 'n_sample': 100000,\n 'pred_samples': 5000,\n 'times': 1\n },\n 'prot': {'sd': 1,\n 'lr': 0.01,\n 'n_hidden': 50,\n 'n_sample': 100000,\n 'pred_samples': 5000,\n 'times': 1\n },\n 'face':\n {'in': {\"scale\": [[1.0]], \"shift\": [[0.0]]},\n 'out': {\"scale\": [[1.0, 0.1]], \"shift\": [[0.0, -1.0]]},\n 'hidden_dims': [50],\n 'learning_rate': 0.3,\n 'n_epochs': 800,\n 'epochs': [225, 400]\n }\n }\nrun(dataset_name, root_path, hypers[dataset_name], 'train_test')",
"Average Loss = 3,725.6: 100%|██████████| 100000/100000 [02:19<00:00, 718.67it/s] \nFinished [100%]: Average Loss = 3,725.5\n100%|██████████| 5000/5000 [00:46<00:00, 106.91it/s]\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec6518b3962a4a0a64e1979d009a379c74ee1c1b | 1,104 | ipynb | Jupyter Notebook | nbs/18_interp.ipynb | vtecftwy/unpackai | 5c6ff1ff141b15430ccf02cab34e9690ec168fda | [
"MIT"
]
| 18 | 2021-08-30T00:15:06.000Z | 2022-01-28T01:41:16.000Z | nbs/18_interp.ipynb | vtecftwy/unpackai | 5c6ff1ff141b15430ccf02cab34e9690ec168fda | [
"MIT"
]
| 75 | 2021-09-01T08:13:25.000Z | 2022-02-07T13:18:55.000Z | nbs/18_interp.ipynb | vtecftwy/unpackai | 5c6ff1ff141b15430ccf02cab34e9690ec168fda | [
"MIT"
]
| 4 | 2021-08-30T03:22:28.000Z | 2021-11-13T12:48:06.000Z | 19.714286 | 74 | 0.550725 | [
[
[
"# Interpretaion\n> handles all the interpretation for learning",
"_____no_output_____"
]
],
[
[
"# default_exp interp.__init__",
"_____no_output_____"
],
[
"# export\nfrom unpackai.utils import hush\nfrom unpackai.bug import ishell, BUGBOOK\nfrom unpackai.interp.latent import CosineSearch, InterpEmbeddings\nfrom unpackai.interp.vision import (\n image_upload,\n upload_and_visualize_cnn,\n IMAGENET_CLASSES,\n IMAGENET_MEAN,\n IMAGENET_STD)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code"
]
]
|
ec651eba0b2056ac417b419eba70449abdadfdd6 | 34,614 | ipynb | Jupyter Notebook | Labs/Basis Functions.ipynb | wwyqianqian/COMP6509 | 3af48e462771e801253c45893dbb4fabff404e25 | [
"MIT"
]
| 2 | 2022-02-16T11:24:28.000Z | 2022-03-29T07:08:35.000Z | Labs/Basis Functions.ipynb | wwyqianqian/COMP6509 | 3af48e462771e801253c45893dbb4fabff404e25 | [
"MIT"
]
| null | null | null | Labs/Basis Functions.ipynb | wwyqianqian/COMP6509 | 3af48e462771e801253c45893dbb4fabff404e25 | [
"MIT"
]
| null | null | null | 186.096774 | 15,132 | 0.903189 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nx = np.linspace(-2, 2, 100)\nx",
"_____no_output_____"
],
[
"phi1 = np.exp(-2*(x+1)**2)\nphi2 = np.exp(-2*(x)**2)\nphi3 = np.exp(-2*(x-1)**2)",
"_____no_output_____"
],
[
"w = np.random.randn(3)\nw",
"_____no_output_____"
],
[
"plt.plot(x, w[0]*phi1 + w[1]*phi2 + w[2]*phi3, \"-\")",
"_____no_output_____"
],
[
"anotherW = np.random.randn(3)\nprint(anotherW)\nplt.plot(x, anotherW[0]*phi1 + anotherW[1]*phi2 + anotherW[2]*phi3, \"-\")",
"[0.33165082 0.5827635 0.56403497]\n"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code"
]
]
|
ec65253e02e6239ca6edcaef81021ba74cdcddca | 51,973 | ipynb | Jupyter Notebook | nbs/RndMdl0814_2_seed105.ipynb | yang-zhang/aptos2019-blindness-detection | 9f06be13ea468e5977e0552c0de1a1f8dbe836dc | [
"Apache-2.0"
]
| null | null | null | nbs/RndMdl0814_2_seed105.ipynb | yang-zhang/aptos2019-blindness-detection | 9f06be13ea468e5977e0552c0de1a1f8dbe836dc | [
"Apache-2.0"
]
| null | null | null | nbs/RndMdl0814_2_seed105.ipynb | yang-zhang/aptos2019-blindness-detection | 9f06be13ea468e5977e0552c0de1a1f8dbe836dc | [
"Apache-2.0"
]
| null | null | null | 40.010008 | 19,328 | 0.645258 | [
[
[
"# start",
"_____no_output_____"
]
],
[
[
"SEED = 105",
"_____no_output_____"
],
[
"PRFX = f'RndMdl0814_2_seed{SEED}'",
"_____no_output_____"
],
[
"p_o = f'../output/{PRFX}'\n\n# p_o = f'.'\n\nfrom pathlib import Path\nPath(p_o).mkdir(exist_ok=True, parents=True)\n",
"_____no_output_____"
],
[
"dbg = False\nif dbg: dbgsz=500\n\nfrom fastai.vision import * ",
"_____no_output_____"
]
],
[
[
"!pip install ../input/efficientnetpytorch/efficientnet_pytorch-0.3.0-py3-none-any.whl",
"_____no_output_____"
]
],
[
[
"!nvidia-smi",
"Thu Aug 15 04:27:00 2019 \r\n+-----------------------------------------------------------------------------+\r\n| NVIDIA-SMI 418.56 Driver Version: 418.56 CUDA Version: 10.1 |\r\n|-------------------------------+----------------------+----------------------+\r\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\r\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\r\n|===============================+======================+======================|\r\n| 0 Tesla V100-SXM2... Off | 00000000:00:1E.0 Off | 0 |\r\n| N/A 44C P0 41W / 300W | 10MiB / 16130MiB | 0% Default |\r\n+-------------------------------+----------------------+----------------------+\r\n \r\n+-----------------------------------------------------------------------------+\r\n| Processes: GPU Memory |\r\n| GPU PID Type Process name Usage |\r\n|=============================================================================|\r\n| No running processes found |\r\n+-----------------------------------------------------------------------------+\r\n"
],
[
"# Downloading: \"http://storage.googleapis.com/public-models/efficientnet-b3-c8376fa2.pth\" to /tmp/.cache/torch/checkpoints/efficientnet-b3-c8376fa2.pth\nimport os\nif not os.path.exists('/tmp/.cache/torch/checkpoints/'):\n os.makedirs('/tmp/.cache/torch/checkpoints/')\n\n!cp ../input/efficientnetpytorch/*.pth /tmp/.cache/torch/checkpoints/",
"_____no_output_____"
]
],
[
[
"# params",
"_____no_output_____"
]
],
[
[
"BS = 16\nFP16 = True\nPERC_VAL = 0.1\nWD = 0.01\n\n\nMODEL_NAME = 'efficientnet-b5'\nfrom efficientnet_pytorch import EfficientNet\nSZ = EfficientNet.get_image_size(MODEL_NAME)\nfor i in range(6):\n print(f'efficientnet-b{i} size', EfficientNet.get_image_size(f'efficientnet-b{i}'))\nprint('SZ:', SZ)",
"efficientnet-b0 size 224\nefficientnet-b1 size 240\nefficientnet-b2 size 260\nefficientnet-b3 size 300\nefficientnet-b4 size 380\nefficientnet-b5 size 456\nSZ: 456\n"
]
],
[
[
"## img proc",
"_____no_output_____"
]
],
[
[
"use_open_yz = True\n\nfrom fastai.vision import *\nimport cv2\ndef load_ben_color(fn)->Image:\n image = cv2.imread(fn)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n# image = crop_image_from_gray(image)\n image, _ = crop_margin(image)\n image = center_crop(image)\n image = cv2.resize(image, (640, 480))#most common in test\n# image = cv2.resize(image, (SZ, SZ))\n image = cv2.addWeighted ( image,4, cv2.GaussianBlur( image , (0,0) , sigmaX=10) , -4 ,128)\n return image",
"_____no_output_____"
]
],
[
[
"> get_transforms(do_flip:bool=True, flip_vert:bool=False, max_rotate:float=10.0, max_zoom:float=1.1, max_lighting:float=0.2, max_warp:float=0.2, p_affine:float=0.75, p_lighting:float=0.75, xtra_tfms:Optional[Collection[Transform]]=None) → Collection[Transform]",
"_____no_output_____"
]
],
[
[
"params_tfms = dict(\n do_flip=True,\n flip_vert=True,\n max_rotate=360,\n)",
"_____no_output_____"
]
],
[
[
"> By default, the library resizes the image while keeping its original ratio so that the smaller size corresponds to the given size, then takes a crop (ResizeMethod.CROP). You can choose to resize the image while keeping its original ratio so that the bigger size corresponds to the given size, then take a pad (ResizeMethod.PAD). Another way is to just squish the image to the given size (ResizeMethod.SQUISH).",
"_____no_output_____"
]
],
[
[
"kwargs_tfms = dict(\n resize_method=ResizeMethod.SQUISH,\n padding_mode='zeros'\n)",
"_____no_output_____"
]
],
[
[
"# setup",
"_____no_output_____"
]
],
[
[
"from fastai import *\nfrom fastai.vision import *\nfrom fastai.callbacks import *",
"_____no_output_____"
]
],
[
[
"## set seed",
"_____no_output_____"
]
],
[
[
"def set_torch_seed(seed=SEED):\n os.environ['PYTHONHASHSEED'] = str(seed)\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n \n if torch.cuda.is_available(): \n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) \n torch.backends.cudnn.deterministic = True\n# torch.backends.cudnn.benchmark = False\nset_torch_seed()",
"_____no_output_____"
]
],
[
[
"## image processing",
"_____no_output_____"
]
],
[
[
"import cv2\n\ndef crop_margin(image, keep_less=0.83):\n \n output = image.copy()\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n ret,gray = cv2.threshold(gray,10,255,cv2.THRESH_BINARY)\n contours,hierarchy = cv2.findContours(gray,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n if not contours:\n #print('no contours!')\n flag = 0\n return image, flag\n cnt = max(contours, key=cv2.contourArea)\n ((x, y), r) = cv2.minEnclosingCircle(cnt)\n r = r*keep_less\n x = int(x); y = int(y); r = int(r)\n flag = 1\n #print(x,y,r)\n if r > 100:\n return output[0 + (y-r)*int(r<y):-1 + (y+r+1)*int(r<y),0 + (x-r)*int(r<x):-1 + (x+r+1)*int(r<x)], flag\n else:\n #print('none!')\n flag = 0\n return image,flag\n\n \ndef crop_image1(img,tol=7):\n # img is image data\n # tol is tolerance\n \n mask = img>tol\n return img[np.ix_(mask.any(1),mask.any(0))]\n\ndef crop_image_from_gray(img,tol=7):\n if img.ndim ==2:\n mask = img>tol\n return img[np.ix_(mask.any(1),mask.any(0))]\n elif img.ndim==3:\n gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n mask = gray_img>tol\n \n check_shape = img[:,:,0][np.ix_(mask.any(1),mask.any(0))].shape[0]\n if (check_shape == 0): # image is too dark so that we crop out everything,\n return img # return original image\n else:\n img1=img[:,:,0][np.ix_(mask.any(1),mask.any(0))]\n img2=img[:,:,1][np.ix_(mask.any(1),mask.any(0))]\n img3=img[:,:,2][np.ix_(mask.any(1),mask.any(0))]\n # print(img1.shape,img2.shape,img3.shape)\n img = np.stack([img1,img2,img3],axis=-1)\n # print(img.shape)\n return img\n \n# https://stackoverflow.com/questions/16646183/crop-an-image-in-the-centre-using-pil\ndef center_crop(img): \n \n h0, w0 = 480, 640 #most common in test\n ratio = h0/w0 #most common in test\n height, width, _= img.shape\n new_width, new_height = width, math.ceil(width*ratio)\n\n width = img.shape[1]\n height = img.shape[0]\n\n if new_width is None:\n new_width = min(width, height)\n\n if new_height is None:\n new_height = min(width, height)\n\n left = int(np.ceil((width - new_width) / 2))\n right = width - int(np.floor((width - new_width) / 2))\n\n top = int(np.ceil((height - new_height) / 2))\n bottom = height - int(np.floor((height - new_height) / 2))\n\n if len(img.shape) == 2:\n center_cropped_img = img[top:bottom, left:right]\n else:\n center_cropped_img = img[top:bottom, left:right, ...]\n\n return center_cropped_img\n\ndef open_yz(fn, convert_mode, after_open)->Image:\n image = load_ben_color(fn)\n return Image(pil2tensor(image, np.float32).div_(255))\n \nif use_open_yz:\n vision.data.open_image = open_yz",
"_____no_output_____"
]
],
[
[
"## QWK",
"_____no_output_____"
]
],
[
[
"import scipy as sp\nfrom sklearn.metrics import cohen_kappa_score\n\ndef quadratic_weighted_kappa(y1, y2):\n return cohen_kappa_score(y1, y2, weights='quadratic')\n\ndef qwk(y_pred, y):\n return torch.tensor(\n# quadratic_weighted_kappa(torch.round(y_pred), y),\n quadratic_weighted_kappa(np.argmax(y_pred,1), y),\n device='cuda:0')",
"_____no_output_____"
]
],
[
[
"## TTTA",
"_____no_output_____"
]
],
[
[
"from fastai.core import *\nfrom fastai.basic_data import *\nfrom fastai.basic_train import *\nfrom fastai.torch_core import *\ndef _tta_only(learn:Learner, ds_type:DatasetType=DatasetType.Valid, num_pred:int=5) -> Iterator[List[Tensor]]:\n \"Computes the outputs for several augmented inputs for TTA\"\n dl = learn.dl(ds_type)\n ds = dl.dataset\n old = ds.tfms\n aug_tfms = [o for o in learn.data.train_ds.tfms if o.tfm !=zoom]\n try:\n pbar = master_bar(range(num_pred))\n for i in pbar:\n ds.tfms = aug_tfms\n yield get_preds(learn.model, dl, pbar=pbar)[0]\n finally: ds.tfms = old\n\nLearner.tta_only = _tta_only\n\ndef _TTA(learn:Learner, beta:float=0, ds_type:DatasetType=DatasetType.Valid, num_pred:int=5, with_loss:bool=False) -> Tensors:\n \"Applies TTA to predict on `ds_type` dataset.\"\n preds,y = learn.get_preds(ds_type)\n all_preds = list(learn.tta_only(ds_type=ds_type, num_pred=num_pred))\n avg_preds = torch.stack(all_preds).mean(0)\n if beta is None: return preds,avg_preds,y\n else: \n final_preds = preds*beta + avg_preds*(1-beta)\n if with_loss: \n with NoneReduceOnCPU(learn.loss_func) as lf: loss = lf(final_preds, y)\n return final_preds, y, loss\n return final_preds, y\n\nLearner.TTA = _TTA",
"_____no_output_____"
]
],
[
[
"# preprocess",
"_____no_output_____"
],
[
"## prep",
"_____no_output_____"
]
],
[
[
"img2grd = []\n\np = '../input/aptos2019-blindness-detection'\npp = Path(p)\ntrain = pd.read_csv(pp/'train.csv')\ntest = pd.read_csv(pp/'test.csv')\nlen_blnd = len(train)\nlen_blnd_test = len(test)\n\nimg2grd_blnd = [(f'{p}/train_images/{o[0]}.png',o[1],'blnd') for o in train.values]\n\nlen_blnd, len_blnd_test",
"_____no_output_____"
],
[
"img2grd += img2grd_blnd\ndisplay(len(img2grd))\ncnt = Counter(o[1] for o in img2grd)\nt2c_trn_has = dict(cnt)\ndisplay(cnt.most_common())\nsm = sum(cnt.values())\ndisplay([(o[0], o[1]/sm) for o in cnt.most_common()])",
"_____no_output_____"
],
[
"p = '../input/diabetic-retinopathy-resized'\npp = Path(p)\ntrain = pd.read_csv(pp/'trainLabels.csv')\nimg2grd_diab = [(f'{p}/resized_train/{o[0]}.jpeg',o[1],'diab') for o in train.values]\n# img2grd_diab = [(f'{p}/resized_train/resized_train/{o[0]}.jpeg',o[1],'diab') for o in train.values]\nimg2grd += img2grd_diab\n",
"_____no_output_____"
],
[
"df = pd.DataFrame(img2grd)\ndf.columns = ['fnm', 'target', 'src']\ndf = df.reset_index()\ndf.shape",
"_____no_output_____"
],
[
"df.sample(5).values",
"_____no_output_____"
],
[
"if not np.all([Path(o[0]).exists() for o in img2grd]): print('Some files are missing!!!')",
"_____no_output_____"
]
],
[
[
"## df2use",
"_____no_output_____"
]
],
[
[
"df.target.value_counts()",
"_____no_output_____"
],
[
"df2use = df[df.src=='blnd'].copy()\n\ndf2use.target.value_counts()",
"_____no_output_____"
],
[
"def get_randint(low=300, high=900):\n res = np.random.randn()*300+600\n return int(min(max(low, res), high))\n\n# set_torch_seed()\nn_t_extra = {2:get_randint(),3:get_randint(),4:get_randint(),1:get_randint()}\nn_t_extra",
"_____no_output_____"
],
[
"set_torch_seed()\nfor t,n in n_t_extra.items():\n df_t_diab = df[(df.target==t) & (df.src=='diab')]\n df2use = pd.concat([df2use, df_t_diab.sample(min(n, len(df_t_diab)))])",
"_____no_output_____"
],
[
"df2use.shape",
"_____no_output_____"
],
[
"df2use.target.value_counts()",
"_____no_output_____"
],
[
"if dbg: \n df2use = df2use.head(dbgsz)",
"_____no_output_____"
]
],
[
[
"## dataset",
"_____no_output_____"
]
],
[
[
"%%time\ntfms = get_transforms(**params_tfms)\n\ndef get_data(sz=SZ, bs=BS):\n src = (ImageList.from_df(df=df2use,path='./',cols='fnm') \n# .split_by_rand_pct(0.2) \n .split_none()\n .label_from_df(cols='target', \n #label_cls=FloatList\n )\n )\n\n data= (src.transform(tfms, size=sz,\n **kwargs_tfms\n ) #Data augmentation\n .databunch(bs=bs) #DataBunch\n .normalize(imagenet_stats) #Normalize \n )\n return data\n\n\nset_torch_seed()\ndata = get_data()",
"CPU times: user 4.95 s, sys: 157 ms, total: 5.11 s\nWall time: 2.7 s\n"
]
],
[
[
"%%time\ndata.show_batch(rows=3, figsize=(10, 10))",
"_____no_output_____"
]
],
[
[
"## add test dataset",
"_____no_output_____"
]
],
[
[
"p = '../input/aptos2019-blindness-detection'\npp = Path(p)\ntest = pd.read_csv(pp/'test.csv')",
"_____no_output_____"
],
[
"if dbg: test = test.head(dbgsz)",
"_____no_output_____"
],
[
"data.add_test(ImageList.from_df(test,\n '../input/aptos2019-blindness-detection',\n folder='test_images',\n suffix='.png'))",
"_____no_output_____"
]
],
[
[
"%%time\ndata.show_batch(rows=3, figsize=(10, 10), ds_type=DatasetType.Test)",
"_____no_output_____"
]
],
[
[
"## train",
"_____no_output_____"
]
],
[
[
"model = EfficientNet.from_pretrained(MODEL_NAME, num_classes=5) \nlearn = Learner(data, model, path=p_o, \n# wd=WD, \n# metrics=[accuracy, qwk],\n )\nif FP16: learn = learn.to_fp16()",
"Loaded pretrained weights for efficientnet-b5\n"
]
],
[
[
"%%time\nlearn.lr_find()",
"_____no_output_____"
],
[
"!nvidia-smi",
"_____no_output_____"
],
[
"learn.recorder.plot(suggestion=True, skip_end=15)",
"_____no_output_____"
]
],
[
[
"set_torch_seed()\nlearn.fit_one_cycle(10, max_lr=1e-3, \n# callbacks=[SaveModelCallback(learn, \n# every='epoch', \n# name=f'{PRFX}_model')]\n )\n\n",
"_____no_output_____"
],
[
"learn.save(f'rndmdl_seed_{SEED}')",
"_____no_output_____"
],
[
"learn.recorder.plot_losses()",
"_____no_output_____"
]
],
[
[
"# testing",
"_____no_output_____"
]
],
[
[
"learn = learn.to_fp32()",
"_____no_output_____"
],
[
"%%time\nset_torch_seed()\npreds_tst, _ = learn.get_preds(ds_type=DatasetType.Test)\npreds_tst = preds_tst.numpy().squeeze()\npreds_tst = np.argmax(preds_tst, 1)",
"CPU times: user 12.7 s, sys: 4.4 s, total: 17.1 s\nWall time: 50.8 s\n"
]
],
[
[
"%%time\nset_torch_seed()\npreds_tst_tta, _ = learn.TTA(ds_type=DatasetType.Test)\npreds_tst_tta = preds_tst_tta.numpy().squeeze()\npreds_tst_tta = np.argmax(preds_tst_tta, 1)",
"_____no_output_____"
]
],
[
[
"pd.Series(preds_tst.astype(int)).value_counts()",
"_____no_output_____"
]
],
[
[
"pd.Series(preds_tst_tta.astype(int)).value_counts()",
"_____no_output_____"
]
],
[
[
"## submit",
"_____no_output_____"
]
],
[
[
"subm = pd.read_csv(\"../input/aptos2019-blindness-detection/test.csv\")\nsubm['diagnosis'] = preds_tst\nsubm.head()",
"_____no_output_____"
],
[
"subm.diagnosis.value_counts()",
"_____no_output_____"
],
[
"subm.to_csv(f\"{p_o}/submission.csv\", index=False)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"raw",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"raw",
"markdown",
"code",
"raw",
"markdown",
"code",
"raw",
"code",
"markdown",
"code",
"raw",
"code",
"raw",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"raw"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"raw"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"raw"
],
[
"markdown"
],
[
"code"
],
[
"raw",
"raw",
"raw"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
]
|
ec6528f2c4c5aa4bd4af6cd6ffb6e75ebd81f58c | 354,064 | ipynb | Jupyter Notebook | LigninPolymerizationNotebook.ipynb | runxuanjiang/lignin-kmc | 458428eb4e730ce43417ddf5a6eda9ba9f21072e | [
"MIT"
]
| 5 | 2019-06-05T00:53:05.000Z | 2021-09-20T07:55:44.000Z | LigninPolymerizationNotebook.ipynb | runxuanjiang/lignin-kmc | 458428eb4e730ce43417ddf5a6eda9ba9f21072e | [
"MIT"
]
| 11 | 2019-08-05T18:45:15.000Z | 2021-09-20T18:41:49.000Z | LigninPolymerizationNotebook.ipynb | runxuanjiang/lignin-kmc | 458428eb4e730ce43417ddf5a6eda9ba9f21072e | [
"MIT"
]
| 6 | 2019-07-02T20:08:13.000Z | 2022-03-01T03:13:07.000Z | 169.489708 | 27,272 | 0.860779 | [
[
[
"# Kinetic Monte Carlo Simulation of Lignin Polymerization\nWritten by: Michael Orella <br>\n2019 January 24 <br>\nUpdated by: Heather Mayes <br>\n2019 November 27 <br>\n\nThe code in this notebook performs calculations analogous to those performed in [\"Lignin-KMC: A Toolkit for Simulating Lignin Biosynthesis\"](https://pubs.acs.org/doi/abs/10.1021/acssuschemeng.9b03534), which depend on the results that were obtained from [DFT calculations of monolignol coupling kinetics](https://pubs.acs.org/doi/abs/10.1021/acssuschemeng.9b02506).\n\n**Note:** this notebook directly calls various functions in the package. Users can also run Lignin-KMC from the command line, without the need to directly interact with the python code. See the [README on github](https://github.com/michaelorella/lignin-kmc).",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"# functions, global variables, and default values (described below) from the lignin-kmc Package \nfrom ligninkmc.create_lignin import (calc_rates, create_initial_monomers, create_initial_events, \n create_initial_state, DEF_ADD_RATE)\nfrom ligninkmc.kmc_functions import (run_kmc, generate_mol, analyze_adj_matrix)\nfrom ligninkmc.kmc_common import (DEF_E_BARRIER_KCAL_MOL, GROW, OX, BO4, B5, BB, B1, C5O4, AO4, C5C5, G, S, C,\n MONOMER, MONO_LIST, ADJ_MATRIX, BONDS, CHAIN_LEN, RCF_YIELDS, Event, Monomer)\n\n# General Math and LinAlg\nimport numpy as np\nfrom scipy import optimize\n\n# Chemical Drawing\nfrom rdkit.Chem import MolFromMolBlock\nfrom rdkit.Chem.AllChem import Compute2DCoords\nfrom rdkit.Chem.Draw import MolToImage\nfrom IPython.display import display\n\n#Plotting\nimport matplotlib.pyplot as plt\n\n# For performance\nimport time\n\n#Parallelization\nimport joblib as par",
"_____no_output_____"
]
],
[
[
"## Input Data\nThe kinetic monte carlo code relies on rates of chemical reactions. The reactions that we are interested in here are the coupling of radicals on either individual monomers or oligomers respectively. The energetics necessary to compute the reaction rates were calculated by Terry Gani using [DFT methods](https://pubs.acs.org/doi/abs/10.1021/acssuschemeng.9b02506) for SG lignin and similar methods for C lignin. Once the reaction energies were calculated, they could converted to reaction rates using the Eyring equation:\n$$ r_i = \\dfrac{k_BT}{h}\\exp\\left(-\\dfrac{\\Delta G_i}{k_BT}\\right) $$\n\nNote: the temperature used must match the temperature at which the energy barriers were calculated.\nThe these Gibbs free energy barriers (at 298.15 K and 1 atm), as reported in the [Lignin-KMC paper](https://pubs.acs.org/doi/abs/10.1021/acssuschemeng.9b03534), [Tables S1 and S2](https://pubs.acs.org/doi/suppl/10.1021/acssuschemeng.9b03534/suppl_file/sc9b03534_si_001.pdf), are stored in the package's global variable DEF_E_BARRIER_KCAL_MOL (imported above). \n\nThe user may use other values. The assignment of energy barriers (in kcal/mol) to this global variable is shown below (using global variables for reaction types to help prevent typos from directly using strings) to provide a template for how to assign alternate values. Of course, the user only needs to (and must) supply energy barriers (or directly supply reaction rates) only for the monomer types to be modeled and the reaction types to be modeled at the temperature of interest. Here, the bond names stands for creation of that bond.\n\nThroughout the code, monomers are kept track of individually through the state, so there are no reaction degeneracies occurring, and bond events can be tracked faithfully.\n\nThe monomer types are: 0: guaiacol, 1: syringyl, 2: caffeoyl",
"_____no_output_____"
]
],
[
[
"# The definition for the default Gibbs free energy barriers in kcal/mol (at 298.15 K and 1 atm), shown here as a \n# template for user-input values. The gloval variables MONOMER, OLIGOMER, BO4, etc., can be imported from kmc.kmc_common. \n# OLIGOMER stands for dimer or longer (\"2+\" in the SI)\nDEF_E_BARRIER_KCAL_MOL = {C5O4: {((G, G): {(MONOMER, MONOMER): 11.2, (MONOMER, OLIGOMER): 14.6, \n (OLIGOMER, MONOMER): 14.6, (OLIGOMER, OLIGOMER): 4.4},\n ((S, G)): {(MONOMER, MONOMER): 10.9, (MONOMER, OLIGOMER): 14.6, \n (OLIGOMER, MONOMER): 14.6, (OLIGOMER, OLIGOMER): 4.4},\n ((C, C)): {(MONOMER, MONOMER): 11.9, (MONOMER, OLIGOMER): 11.9,\n (OLIGOMER, MONOMER): 11.9, (OLIGOMER, OLIGOMER): 11.9}},\n C5C5: {((G, G): {(MONOMER, MONOMER): 12.5, (MONOMER, OLIGOMER): 15.6, \n (OLIGOMER, MONOMER): 15.6, (OLIGOMER, OLIGOMER): 3.8},\n ((C, C)): {(MONOMER, MONOMER): 10.6, (MONOMER, OLIGOMER): 10.6,\n (OLIGOMER, MONOMER): 10.6, (OLIGOMER, OLIGOMER): 10.6}},\n B5: {((G, G): {(MONOMER, MONOMER): 5.5, (MONOMER, OLIGOMER): 5.8, \n (OLIGOMER, MONOMER): 5.8, (OLIGOMER, OLIGOMER): 5.8},\n ((G, S)): {(MONOMER, MONOMER): 5.5, (MONOMER, OLIGOMER): 5.8, \n (OLIGOMER, MONOMER): 5.8, (OLIGOMER, OLIGOMER): 5.8},\n ((C, C)): {(MONOMER, MONOMER): 1.9, (MONOMER, OLIGOMER): 5.8,\n (OLIGOMER, MONOMER): 5.8, (OLIGOMER, OLIGOMER): 5.8}},\n BB: {((G, G): {(MONOMER, MONOMER): 5.2, (MONOMER, OLIGOMER): 5.2,\n (OLIGOMER, MONOMER): 5.2, (OLIGOMER, OLIGOMER): 5.2},\n ((S, G)): {(MONOMER, MONOMER): 6.5, (MONOMER, OLIGOMER): 6.5, \n (OLIGOMER, MONOMER): 6.5, (OLIGOMER, OLIGOMER): 6.5},\n ((G, S)): {(MONOMER, MONOMER): 6.5, (MONOMER, OLIGOMER): 6.5, \n (OLIGOMER, MONOMER): 6.5, (OLIGOMER, OLIGOMER): 6.5},\n ((S, S)): {(MONOMER, MONOMER): 5.2, (MONOMER, OLIGOMER): 5.2, \n (OLIGOMER, MONOMER): 5.2, (OLIGOMER, OLIGOMER): 5.2},\n ((C, C)): {(MONOMER, MONOMER): 7.2, (MONOMER, OLIGOMER): 7.2,\n (OLIGOMER, MONOMER): 7.2, (OLIGOMER, OLIGOMER): 7.2}},\n BO4: {((G, G): {(MONOMER, MONOMER): 6.3, (MONOMER, OLIGOMER): 6.2, \n (OLIGOMER, MONOMER): 6.2, (OLIGOMER, OLIGOMER): 6.2},\n ((S, G)): {(MONOMER, MONOMER): 9.1, (MONOMER, OLIGOMER): 6.2,\n (OLIGOMER, MONOMER): 6.2, (OLIGOMER, OLIGOMER): 6.2},\n ((G, S)): {(MONOMER, MONOMER): 8.9, (MONOMER, OLIGOMER): 6.2,\n (OLIGOMER, MONOMER): 6.2, (OLIGOMER, OLIGOMER): 6.2},\n ((S, S)): {(MONOMER, MONOMER): 9.8, (MONOMER, OLIGOMER): 10.4,\n (OLIGOMER, MONOMER): 10.4}, (OLIGOMER, OLIGOMER): 10.4},\n ((C, C)): {(MONOMER, MONOMER): 4.9, (MONOMER, OLIGOMER): 1.3,\n (OLIGOMER, MONOMER): 1.3, (OLIGOMER, OLIGOMER): 1.3},\n AO4: {((G, G): {(MONOMER, MONOMER): 20.7, (MONOMER, OLIGOMER): 20.7,\n (OLIGOMER, MONOMER): 20.7, (OLIGOMER, OLIGOMER): 20.7},\n ((S, G)): {(MONOMER, MONOMER): 20.7, (MONOMER, OLIGOMER): 20.7,\n (OLIGOMER, MONOMER): 20.7, (OLIGOMER, OLIGOMER): 20.7},\n ((G, S)): {(MONOMER, MONOMER): 20.7, (MONOMER, OLIGOMER): 20.7,\n (OLIGOMER, MONOMER): 20.7, (OLIGOMER, OLIGOMER): 20.7},\n ((S, S)): {(MONOMER, MONOMER): 20.7, (MONOMER, OLIGOMER): 20.7,\n (OLIGOMER, MONOMER): 20.7, (OLIGOMER, OLIGOMER): 20.7},\n ((C, C)): {(MONOMER, MONOMER): 20.7, (MONOMER, OLIGOMER): 20.7,\n (OLIGOMER, MONOMER): 20.7, (OLIGOMER, OLIGOMER): 20.7}},\n B1: {((G, G): {(MONOMER, OLIGOMER): 9.6, (OLIGOMER, MONOMER): 9.6, (OLIGOMER, OLIGOMER): 9.6},\n ((S, G)): {(MONOMER, OLIGOMER): 11.7, (OLIGOMER, MONOMER): 11.7, (OLIGOMER, OLIGOMER): 11.7},\n ((G, S)): {(MONOMER, OLIGOMER): 10.7, (OLIGOMER, MONOMER): 10.7, (OLIGOMER, OLIGOMER): 10.7},\n ((S, S)): {(MONOMER, OLIGOMER): 11.9, (OLIGOMER, MONOMER): 11.9, (OLIGOMER, OLIGOMER): 11.9},\n ((C, C)): {(MONOMER, OLIGOMER): 9.6, (OLIGOMER, MONOMER): 9.6, (OLIGOMER, OLIGOMER): 9.6}},\n OX: {G: {MONOMER: 0.9, OLIGOMER: 6.3}, S: {MONOMER: 0.6, OLIGOMER: 2.2}, \n C: {MONOMER: 0.9, OLIGOMER: 0.9}},\n Q: {G: {MONOMER: 11.1, OLIGOMER: 11.1}, S: {MONOMER: 11.7, OLIGOMER: 11.7},\n C: {MONOMER: 11.1, OLIGOMER: 11.1}}}",
"_____no_output_____"
]
],
[
[
"# Calculate the rates of reaction in 1/s at the specified temp\ntemp = 298.15 # K\nrxn_rates = calc_rates(temp, ea_kcal_mol_dict=DEF_E_BARRIER_KCAL_MOL)\n\n# if the user has instead input Gibbs free energy barriers in Joules per particle (e.g., as `my_ea_j_part_dict`), \n# instead invoke:\n# rxn_rates = calc_rates(temp, ea_j_part_dict=my_ea_j_part_dict)",
"_____no_output_____"
]
],
[
[
"## Code Performance\nOne of the first things that we were interested in reporting was the performance and scaling of this code package. This was done by replicating runs of the algorithm with different numbers of monomers included in the simulation under batch conditions.",
"_____no_output_____"
]
],
[
[
"# Here, we are testing with equal amount of S and G (no C)\ntimes = []\nsg_ratio = 1\npct_s = sg_ratio / (1 + sg_ratio)\n\ntest_vals = np.linspace(50, 150, num=3, dtype ='int32')\nnum_repeats = 5\nfor num_monos in test_vals:\n times.append([])\n for _ in range(num_repeats):\n # Generate the initial monomers and events (oxidation)\n monomer_draw = np.random.rand(num_monos)\n initial_monomers = create_initial_monomers(pct_s, monomer_draw)\n initial_events = create_initial_events(initial_monomers, rxn_rates)\n \n # Set the state and add the option to join initial monomers\n initial_state = create_initial_state(initial_events, initial_monomers)\n initial_events.append(Event(GROW, [], rate=DEF_ADD_RATE))\n \n #Start timing the actual KMC part\n start = time.time()\n run_kmc(rxn_rates, initial_state, initial_events, sg_ratio=sg_ratio)\n end = time.time()\n times[-1].append(end-start)\n print(f'Time to complete simulation with {num_monos:5n} monomers: {np.sum(times[-1])/num_repeats:7.2f} seconds')",
"Time to complete simulation with 50 monomers: 0.27 seconds\nbo4 reaction between oligomers with 6 and 89\nTime to complete simulation with 100 monomers: 1.11 seconds\nTime to complete simulation with 150 monomers: 2.53 seconds\n"
]
],
[
[
"Now we want to fit the times that we just calculated to a generic power law expression $t = aN^b$ to find the scaling of our algorithm.",
"_____no_output_____"
]
],
[
[
"meas_t = [np.mean(time) for time in times]\nmeas_n = test_vals\n\nsim_t = lambda p, n: p[0] * np.power (n, p[1])\nloss = lambda p: np.linalg.norm(sim_t(p, meas_n) - meas_t)\n\nresults = optimize.minimize(loss, np.asarray([1e-5, 2.5]), bounds=[[0,1], [0,10]], options={'disp': True})\nprint(results)\n\nopt_p = results.x",
" fun: 0.0027496531269533327\n hess_inv: <2x2 LbfgsInvHessProduct with dtype=float64>\n jac: array([1.01339909e+03, 1.92533730e-01])\n message: b'CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH'\n nfev: 234\n nit: 43\n status: 0\n success: True\n x: array([9.39401741e-05, 2.03562879e+00])\n"
]
],
[
[
"Now we should plot both the measured values and the fit all together",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(3.5, 3.5))\nplt.errorbar(test_vals, [np.mean(time) for time in times],\n yerr=[np.sqrt(np.var(time)) for time in times],\n capsize=3, ecolor='black', linestyle='None', marker='.', markerSize=15, color='black', zorder=1)\nplt.plot(test_vals, sim_t(opt_p,meas_n), linestyle='--', color='r', linewidth=1.5, zorder=2)\nplt.tick_params(axis='both', which ='major', labelsize=10, direction='in',\n pad=8, top = True, right=True, width=1.5, length=5)\nplt.tick_params(axis='both', which='minor', direction='in',\n pad=8, top=True, right=True, width=1, length=3)\nax = plt.gca()\n[ax.spines[i].set_linewidth(1.5) for i in ['top', 'right', 'bottom', 'left']]\nax.fontsize = 10\nplt.xlabel('Number of Monomers', fontsize=10)\nplt.ylabel('Execution Time (s)', fontsize=10)\nplt.yscale('log')\nplt.xscale('log')\nplt.ylim([0.2, 200])\nplt.xlim([40, 200])\nplt.text(75, 0.4, f'$t = {opt_p[0]:3.1e}N^{{ {opt_p[1]:4.2f} }}$', fontsize=10, color='red')\nplt.text(200, 200, r'Measured', fontsize=10, color='black')\n# plt.savefig('performance.svg', format='svg', transparent=True, bbox_inches='tight')",
"_____no_output_____"
]
],
[
[
"## Example KMC Runs\nSmall examples of running the algorithm for KMC, using both SG lignin and C lignin.",
"_____no_output_____"
],
[
"### SG Lignin\nRun an example starting with 2 monomers and an S to G ratio of 1",
"_____no_output_____"
]
],
[
[
"num_monos = 2\nsg_ratio = 1\npct_s = sg_ratio / (1 + sg_ratio)\n\nmonomer_draw = np.random.rand(num_monos)\ninitial_monomers = create_initial_monomers(pct_s, monomer_draw)\ninitial_events = create_initial_events(initial_monomers, rxn_rates)\ninitial_state = create_initial_state(initial_events, initial_monomers)\ninitial_events.append(Event(GROW, [], rate=DEF_ADD_RATE))\n\nresult = run_kmc(rxn_rates, initial_state, initial_events, n_max=10, t_max=1, sg_ratio=sg_ratio)\nnodes = result[MONO_LIST]\nadj = result[ADJ_MATRIX]\nblock = generate_mol(adj, nodes)\nmol = MolFromMolBlock(block)\nCompute2DCoords(mol)\ndisplay(MolToImage(mol, size=(950, 250)))",
"_____no_output_____"
]
],
[
[
"### C lignin\nRun an example with 2 monomers but using the C lignin energies",
"_____no_output_____"
]
],
[
[
"ini_num_monos = 2\n\ninitial_monomers = [Monomer(C, i) for i in range(ini_num_monos)]\ninitial_events = [Event(OX, [i], rxn_rates[OX][C][MONOMER]) for i in range(len(initial_monomers))]\ninitial_state = create_initial_state(initial_events, initial_monomers)\n# `events` may be a set or a list\nevents = {initial_events[i] for i in range(len(initial_monomers))}\nevents.add(Event(GROW, [], rate=DEF_ADD_RATE))\n\nresult = run_kmc(rxn_rates, initial_state, sorted(events), n_max=10, t_max=1)\n\nnodes = result[MONO_LIST]\nadj = result[ADJ_MATRIX]\nblock = generate_mol(adj, nodes)\nmol = MolFromMolBlock(block)\nCompute2DCoords(mol)\ndisplay(MolToImage(mol, size=(950, 250)))",
"_____no_output_____"
]
],
[
[
"## Sensitivity Analyses Examples\nThe meat of the results and discussion for our paper lay in the predictions of how lignin composition should change with different sets of parameters used for lignification. These calculations were performed on desktop hardware over about a week's period, but for the sake of explanation, shorter examples are used here. We investigated the impact of S to G ratio and addition rate primarily.",
"_____no_output_____"
],
[
"### SG Batch Sensitivity\nThe first analysis performed is the dependence of monomer yields and bond contents on SG ratio, where we selected multiple SG ratios between 0.1 and 10 and ran the simulations for these scenarios.",
"_____no_output_____"
]
],
[
[
"sg_opts = [0.1, 0.2, 0.25, 0.33, 0.5, 1, 2, 3, 4, 5, 10]\nnum_repeats = 5\nnum_monos = 200\n\nfun = par.delayed(run_kmc)\nsg_result_list = []\n\nfor sg_ratio in sg_opts:\n # Set the percentage of S\n pct_s = sg_ratio / (1 + sg_ratio)\n \n # Make choices about what kinds of monomers there are and create them\n monomer_draw = np.random.rand(num_monos)\n initial_monomers = create_initial_monomers(pct_s, monomer_draw)\n \n # Initialize the monomers, events, and state\n initial_events = create_initial_events(initial_monomers, rxn_rates)\n initial_state = create_initial_state(initial_events, initial_monomers)\n\n results = par.Parallel(n_jobs=4)([fun(rxn_rates, initial_state, initial_events, n_max=num_monos, t_max=1)\n for _ in range(num_repeats)])\n \n sg_result_list.append(results)\n t = time.localtime()\n print('Completed sensitivity iteration for S to G ratio {:5.2f} on '\n '{}-{:02d}-{:02d} at {:02d}:{:02d}:{:02d}'.format(sg_ratio, t.tm_year, t.tm_mon, t.tm_mday,\n t.tm_hour, t.tm_min, t.tm_sec))",
"Completed sensitivity iteration for S to G ratio 0.10 on 2019-11-27 at 11:39:38\nCompleted sensitivity iteration for S to G ratio 0.20 on 2019-11-27 at 11:39:54\nCompleted sensitivity iteration for S to G ratio 0.25 on 2019-11-27 at 11:40:10\nCompleted sensitivity iteration for S to G ratio 0.33 on 2019-11-27 at 11:40:25\nCompleted sensitivity iteration for S to G ratio 0.50 on 2019-11-27 at 11:40:42\nCompleted sensitivity iteration for S to G ratio 1.00 on 2019-11-27 at 11:40:57\nCompleted sensitivity iteration for S to G ratio 2.00 on 2019-11-27 at 11:41:08\nCompleted sensitivity iteration for S to G ratio 3.00 on 2019-11-27 at 11:41:19\nCompleted sensitivity iteration for S to G ratio 4.00 on 2019-11-27 at 11:41:29\nCompleted sensitivity iteration for S to G ratio 5.00 on 2019-11-27 at 11:41:39\nCompleted sensitivity iteration for S to G ratio 10.00 on 2019-11-27 at 11:41:49\n"
]
],
[
[
"We now aggregate the results that we obtained in the form of adjacency matrices and monomer lists and times to meaningful values of monomer yields and bond contents.",
"_____no_output_____"
]
],
[
[
"num_sg_opts = len(sg_opts)\nanalysis = []\n\nfor i in range(num_sg_opts):\n sg_results = sg_result_list[i]\n cur_adjs = [sg_results[j][ADJ_MATRIX] for j in range(num_repeats)]\n analysis.append(par.Parallel(n_jobs=4)(par.delayed(analyze_adj_matrix)(cur_adjs[j])\n for j in range(num_repeats))) \n t = time.localtime()\n print('Finished analysis for S:G of {:5.2f} on {}-{:02d}-{:02d} at '\n '{:02d}:{:02d}:{:02d}'.format(sg_opts[i], t.tm_year, t.tm_mon, t.tm_mday, \n t.tm_hour, t.tm_min, t.tm_sec))",
"Finished analysis for S:G of 0.10 on 2019-11-27 at 11:41:51\nFinished analysis for S:G of 0.20 on 2019-11-27 at 11:41:52\nFinished analysis for S:G of 0.25 on 2019-11-27 at 11:41:54\nFinished analysis for S:G of 0.33 on 2019-11-27 at 11:41:56\nFinished analysis for S:G of 0.50 on 2019-11-27 at 11:41:58\nFinished analysis for S:G of 1.00 on 2019-11-27 at 11:42:00\nFinished analysis for S:G of 2.00 on 2019-11-27 at 11:42:02\nFinished analysis for S:G of 3.00 on 2019-11-27 at 11:42:04\nFinished analysis for S:G of 4.00 on 2019-11-27 at 11:42:06\nFinished analysis for S:G of 5.00 on 2019-11-27 at 11:42:07\nFinished analysis for S:G of 10.00 on 2019-11-27 at 11:42:09\n"
]
],
[
[
"Now as an example, we will look at the distribution of monomer yields from RCF (assuming C-O bonds (beta-O4, alpha-O4, and 5-O4 bonds), and only C-O bonds are broken in this process), and then summarize that distribution with the sample mean and standard deviation, which will be used for all of the subsequent analyses. To visualize these results, we first need to extract the information from the analysis data structure, and normalize the data to the total number of monomers or total number of bonds.",
"_____no_output_____"
]
],
[
[
"num_monos = [[sum([analysis[j][i][CHAIN_LEN][k] * k for k in analysis[j][i][CHAIN_LEN]]) \n for i in range(num_repeats)] for j in range(num_sg_opts)]\n\nmonomer_yields = [[analysis[j][i][RCF_YIELDS][1]/num_monos[j][i] \n for i in range(num_repeats)] for j in range(num_sg_opts)]\n\nbetaO4_content = [[analysis[j][i][BONDS][BO4]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nbeta5_content = [[analysis[j][i][BONDS][B5]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nbetabeta_content = [[analysis[j][i][BONDS][BB]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nbeta1_content = [[analysis[j][i][BONDS][B1]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nc5o4_content = [[analysis[j][i][BONDS][C5O4]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nalphaO4_content = [[analysis[j][i][BONDS][AO4]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nfivefive_content = [[analysis[j][i][BONDS][C5C5]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\n\nav_mon_yield = [np.mean(percent) for percent in monomer_yields]\nstd_mon_yield = [np.sqrt(np.var(percent)) for percent in monomer_yields]\n\nav_bo4_content = [np.mean(percent) for percent in betaO4_content]\nstd_bo4_content = [np.sqrt(np.var(percent)) for percent in betaO4_content]\n\nav_b5_content = [np.mean(percent) for percent in beta5_content]\nstd_b5_content = [np.sqrt(np.var(percent)) for percent in beta5_content]\n\nav_bb_content = [np.mean(percent) for percent in betabeta_content]\nstd_bb_content = [np.sqrt(np.var(percent)) for percent in betabeta_content]\n\nav_b1_content = [np.mean(percent) for percent in beta1_content]\nstd_b1_content = [np.sqrt(np.var(percent)) for percent in beta1_content]\n\nav_5o4_content = [np.mean(percent) for percent in c5o4_content]\nstd_5o4_content = [np.sqrt(np.var(percent)) for percent in c5o4_content]\n\nav_ao4_content = [np.mean(percent) for percent in alphaO4_content]\nstd_ao4_content = [np.sqrt(np.var(percent)) for percent in alphaO4_content]\n\nav_55_content = [np.mean(percent) for percent in fivefive_content]\nstd_55_content = [np.sqrt(np.var(percent)) for percent in fivefive_content]",
"_____no_output_____"
]
],
[
[
"With this aggregation complete, we now plot the histograms of beta04 bond yields for 3 cases of SG ratios - 0.1, 1, and 10. With so few repeats, these histograms look very poor, but with greater repeats there is a much clearer trend and approximation of the normal distribution - which justifies the use of sample norm and standard deviation as summary statistics.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(3.5, 3.5))\n\nplt.hist(monomer_yields[0], bins=np.linspace(0, 1.0, 21), density=1, facecolor='black', alpha=0.5, \n edgecolor='black', label='S/G=0.1')\nplt.hist(monomer_yields[5], bins=np.linspace(0, 1.0, 21), density=1, facecolor='red', alpha=0.5, \n edgecolor='black', label='S/G=1')\nplt.hist(monomer_yields[10], bins=np.linspace(0, 1.0, 21), density=1, facecolor='blue', alpha=0.5, \n edgecolor='black', label='S/G=10')\n[plt.gca().spines[i].set_linewidth(1.5) for i in ['top','right','bottom','left']]\nplt.gca().tick_params(axis='both', which='major', labelsize=14, direction='in', pad=8, top=True, \n right=True, width=1.5, length=6)\nplt.xlabel('Monomer Yield (%)', fontsize=14)\nplt.legend(fontsize=14, loc='best')",
"_____no_output_____"
]
],
[
[
"As mentioned above, with more repeats these distributions approach the normal distribution. In addition, if we want to add more sensitivity results to this plot, it starts to look messier and more crowded. Therefore, it becomes much easier to use the mean and variance of our samples as summary statistics, and just plot these values (using the standard deviation as the error bar on every point).",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(3.5, 3.5))\nplt.errorbar(sg_opts, av_mon_yield, yerr=std_mon_yield, linestyle='none', marker='.', markersize=10, \n markerfacecolor='black', markeredgecolor='black', capsize=3, ecolor='black')\nplt.xscale('log')\n[plt.gca().spines[i].set_linewidth(1.5) for i in ['top', 'right', 'bottom', 'left']]\nplt.gca().tick_params(axis='both', which='major', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1.5, length=6)\nplt.gca().tick_params(axis='both', which='minor', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1, length=4)\nplt.ylabel('Monomer Yield (%)', fontsize=14)\nplt.xlabel('SG Ratio', fontsize=14)\nplt.ylim([0.0, 1.0])",
"_____no_output_____"
]
],
[
[
"The monomer yields plot above is easy, because there is just one trend that we look at. For this instance, that trend is fairly messy, again because there are fewer repeats and fewer monomers in each simulation, but the idea is still the same.\n\nLet us now examine the bond contents that were predicted from this simulation.",
"_____no_output_____"
]
],
[
[
"colors = [(0, 0, 0), (1, 0, 0), (0, 0, 1), (0, 0.6, 0), (0.6, 0, 0.6), (1, 0.549, 0), \n (0, 0.6, 0.6), (1, 0.8, 0), (0.6078, 0.2980, 0), (0.6, 0, 0), (0, 0, 0.6)]\nplt.figure(figsize=(3.5,3.5))\nplt.errorbar(sg_opts, av_bo4_content, yerr=std_bo4_content, linestyle='none', marker='.',\n markersize=10, markerfacecolor=colors[0], markeredgecolor=colors[0], label=BO4,\n capsize=3,ecolor=colors[0])\nplt.errorbar(sg_opts, av_bb_content, yerr=std_bb_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[1], markeredgecolor=colors[1], label=BB, capsize=3, ecolor=colors[1])\nplt.errorbar(sg_opts, av_b5_content, yerr=std_b5_content, linestyle='none', marker='.', markersize=10,\n markerfacecolor=colors[2], markeredgecolor=colors[2], label=B5, capsize=3, ecolor=colors[2])\nplt.errorbar(sg_opts, av_b1_content, yerr=std_b1_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[3], markeredgecolor=colors[3], label=B1, capsize=3, ecolor=colors[3])\nplt.errorbar(sg_opts, av_5o4_content, yerr=std_5o4_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[4], markeredgecolor=colors[4], label=C5O4, capsize=3, ecolor=colors[4])\nplt.errorbar(sg_opts, av_ao4_content, yerr=std_ao4_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[5], markeredgecolor=colors[5], label=AO4, capsize=3, ecolor=colors[5])\nplt.errorbar(sg_opts, av_55_content, yerr=std_55_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[6], markeredgecolor=colors[6], label=C5C5, capsize=3, ecolor=colors[6])\nplt.xscale('log')\n\n[plt.gca().spines[i].set_linewidth(1.5) for i in ['top', 'right', 'bottom', 'left']]\nplt.gca().tick_params(axis='both', which='major', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1.5, length=6)\nplt.gca().tick_params(axis='both', which='minor', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1, length=4)\nplt.ylabel('Bond Type Yield (%)', fontsize=14)\nplt.xlabel('SG Ratio', fontsize=14)\nplt.ylim([0.0, 1.0])\nplt.legend(fontsize=14, loc='upper center', bbox_to_anchor=(1.2, 1.05), frameon=False)",
"_____no_output_____"
]
],
[
[
"### Addition rate sensitivity\nWe will now perform the same task on different monomer addition rates.",
"_____no_output_____"
]
],
[
[
"add_rates = np.logspace(4, 14, 21)\n\nfun = par.delayed(run_kmc)\nadd_result_list = []\n\n# Set the percentage of S\nsg_ratio = 1\npct_s = sg_ratio / (1 + sg_ratio)\n\n# Set the initial and maximum number of monomers to be modeled.\nini_num_monos = 4\nmax_monos = 200\n \nfor add_rate in add_rates:\n # Initialize the monomers, events, and state\n monomer_draw = np.random.rand(ini_num_monos)\n initial_monomers = create_initial_monomers(pct_s, monomer_draw)\n initial_events = create_initial_events(initial_monomers, rxn_rates)\n initial_state = create_initial_state(initial_events, initial_monomers)\n initial_events.append(Event(GROW, [], rate=add_rate))\n\n results = par.Parallel(n_jobs = 4)([fun(rxn_rates, initial_state, initial_events, n_max=max_monos, \n sg_ratio=sg_ratio) for _ in range(num_repeats)])\n \n add_result_list.append(results)\n t = time.localtime()\n print('Completed sensitivity iteration for addition rate {:.2e} monomers/s on ' \n '{}-{:02d}-{:02d} at {:02d}:{:02d}:{:02d}.'.format(add_rate, t.tm_year, t.tm_mon, t.tm_mday, \n t.tm_hour, t.tm_min, t.tm_sec))",
"Completed sensitivity iteration for addition rate 1.00e+04 monomers/s on 2019-11-27 at 11:42:13.\nCompleted sensitivity iteration for addition rate 3.16e+04 monomers/s on 2019-11-27 at 11:42:15.\nCompleted sensitivity iteration for addition rate 1.00e+05 monomers/s on 2019-11-27 at 11:42:17.\nCompleted sensitivity iteration for addition rate 3.16e+05 monomers/s on 2019-11-27 at 11:42:20.\nCompleted sensitivity iteration for addition rate 1.00e+06 monomers/s on 2019-11-27 at 11:42:23.\nCompleted sensitivity iteration for addition rate 3.16e+06 monomers/s on 2019-11-27 at 11:42:27.\nCompleted sensitivity iteration for addition rate 1.00e+07 monomers/s on 2019-11-27 at 11:42:30.\nCompleted sensitivity iteration for addition rate 3.16e+07 monomers/s on 2019-11-27 at 11:42:35.\nCompleted sensitivity iteration for addition rate 1.00e+08 monomers/s on 2019-11-27 at 11:42:40.\nCompleted sensitivity iteration for addition rate 3.16e+08 monomers/s on 2019-11-27 at 11:42:48.\nCompleted sensitivity iteration for addition rate 1.00e+09 monomers/s on 2019-11-27 at 11:43:00.\nCompleted sensitivity iteration for addition rate 3.16e+09 monomers/s on 2019-11-27 at 11:43:19.\nCompleted sensitivity iteration for addition rate 1.00e+10 monomers/s on 2019-11-27 at 11:43:50.\nCompleted sensitivity iteration for addition rate 3.16e+10 monomers/s on 2019-11-27 at 11:44:27.\nCompleted sensitivity iteration for addition rate 1.00e+11 monomers/s on 2019-11-27 at 11:45:05.\nCompleted sensitivity iteration for addition rate 3.16e+11 monomers/s on 2019-11-27 at 11:45:41.\nCompleted sensitivity iteration for addition rate 1.00e+12 monomers/s on 2019-11-27 at 11:46:16.\nCompleted sensitivity iteration for addition rate 3.16e+12 monomers/s on 2019-11-27 at 11:46:45.\nCompleted sensitivity iteration for addition rate 1.00e+13 monomers/s on 2019-11-27 at 11:47:03.\nCompleted sensitivity iteration for addition rate 3.16e+13 monomers/s on 2019-11-27 at 11:47:17.\nCompleted sensitivity iteration for addition rate 1.00e+14 monomers/s on 2019-11-27 at 11:47:30.\n"
],
[
"num_opts = len(add_rates)\nanalysis = []\n\nfor i in range(num_opts):\n opt_results = add_result_list[i]\n cur_adjs = [opt_results[j][ADJ_MATRIX] for j in range(num_repeats)]\n analysis.append(par.Parallel(n_jobs=4)(par.delayed(analyze_adj_matrix)(cur_adjs[j])\n for j in range(num_repeats)))\n \n t = time.localtime()\n print('Finished analysis for monomer addition rate {:5.2e} on {}-{:02d}-{:02d} at '\n '{:02d}:{:02d}:{:02d}'.format(add_rates[i], t.tm_year, t.tm_mon, t.tm_mday, \n t.tm_hour, t.tm_min, t.tm_sec))",
"Finished analysis for monomer addition rate 1.00e+04 on 2019-11-27 at 11:47:32\nFinished analysis for monomer addition rate 3.16e+04 on 2019-11-27 at 11:47:34\nFinished analysis for monomer addition rate 1.00e+05 on 2019-11-27 at 11:47:35\nFinished analysis for monomer addition rate 3.16e+05 on 2019-11-27 at 11:47:37\nFinished analysis for monomer addition rate 1.00e+06 on 2019-11-27 at 11:47:39\nFinished analysis for monomer addition rate 3.16e+06 on 2019-11-27 at 11:47:41\nFinished analysis for monomer addition rate 1.00e+07 on 2019-11-27 at 11:47:43\nFinished analysis for monomer addition rate 3.16e+07 on 2019-11-27 at 11:47:45\nFinished analysis for monomer addition rate 1.00e+08 on 2019-11-27 at 11:47:47\nFinished analysis for monomer addition rate 3.16e+08 on 2019-11-27 at 11:47:49\nFinished analysis for monomer addition rate 1.00e+09 on 2019-11-27 at 11:47:51\nFinished analysis for monomer addition rate 3.16e+09 on 2019-11-27 at 11:47:53\nFinished analysis for monomer addition rate 1.00e+10 on 2019-11-27 at 11:47:55\nFinished analysis for monomer addition rate 3.16e+10 on 2019-11-27 at 11:47:56\nFinished analysis for monomer addition rate 1.00e+11 on 2019-11-27 at 11:47:58\nFinished analysis for monomer addition rate 3.16e+11 on 2019-11-27 at 11:48:00\nFinished analysis for monomer addition rate 1.00e+12 on 2019-11-27 at 11:48:02\nFinished analysis for monomer addition rate 3.16e+12 on 2019-11-27 at 11:48:04\nFinished analysis for monomer addition rate 1.00e+13 on 2019-11-27 at 11:48:06\nFinished analysis for monomer addition rate 3.16e+13 on 2019-11-27 at 11:48:08\nFinished analysis for monomer addition rate 1.00e+14 on 2019-11-27 at 11:48:10\n"
],
[
"num_monos = [[sum([analysis[j][i][CHAIN_LEN][k] * k for k in analysis[j][i][CHAIN_LEN]]) \n for i in range(num_repeats)] for j in range(num_opts)]\n\nmonomer_yields = [[analysis[j][i][RCF_YIELDS][1]/num_monos[j][i] \n for i in range(num_repeats)] for j in range(num_opts)]\n\nbetaO4_content = [[analysis[j][i][BONDS][BO4]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_opts)]\nbeta5_content = [[analysis[j][i][BONDS][B5]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_opts)]\nbetabeta_content = [[analysis[j][i][BONDS][BB]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_opts)]\nbeta1_content = [[analysis[j][i][BONDS][B1]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_opts)]\nc5o4_content = [[analysis[j][i][BONDS][C5O4]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_opts)]\nalphaO4_content = [[analysis[j][i][BONDS][AO4]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_opts)]\nfivefive_content = [[analysis[j][i][BONDS][C5C5]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_opts)]\n\nav_mon_yield = [np.mean(percent) for percent in monomer_yields]\nstd_mon_yield = [np.sqrt(np.var(percent)) for percent in monomer_yields]\n\nav_bo4_content = [np.mean(percent) for percent in betaO4_content]\nstd_bo4_content = [np.sqrt(np.var(percent)) for percent in betaO4_content]\n\nav_b5_content = [np.mean(percent) for percent in beta5_content]\nstd_b5_content = [np.sqrt(np.var(percent)) for percent in beta5_content]\n\nav_bb_content = [np.mean(percent) for percent in betabeta_content]\nstd_bb_content = [np.sqrt(np.var(percent)) for percent in betabeta_content]\n\nav_b1_content = [np.mean(percent) for percent in beta1_content]\nstd_b1_content = [np.sqrt(np.var(percent)) for percent in beta1_content]\n\nav_5o4_content = [np.mean(percent) for percent in c5o4_content]\nstd_5o4_content = [np.sqrt(np.var(percent)) for percent in c5o4_content]\n\nav_ao4_content = [np.mean(percent) for percent in alphaO4_content]\nstd_ao4_content = [np.sqrt(np.var(percent)) for percent in alphaO4_content]\n\nav_55_content = [np.mean(percent) for percent in fivefive_content]\nstd_55_content = [np.sqrt(np.var(percent)) for percent in fivefive_content]",
"_____no_output_____"
],
[
"plt.figure(figsize=(3.5, 3.5))\nplt.errorbar(add_rates, av_mon_yield, yerr=std_mon_yield, linestyle='none', marker='.', markersize=10, \n markerfacecolor='black', markeredgecolor='black', capsize=3, ecolor='black')\nplt.xscale('log')\n[plt.gca().spines[i].set_linewidth(1.5) for i in ['top', 'right', 'bottom', 'left']]\nplt.gca().tick_params(axis='both', which='major', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1.5, length=6)\nplt.gca().tick_params(axis='both', which='minor', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1, length=4)\nplt.ylabel('Monomer Yield (%)', fontsize=14)\nplt.xlabel('Addition Rate (monomers/s)', fontsize=14)",
"_____no_output_____"
],
[
"plt.figure(figsize=(3.5, 3.5))\nplt.errorbar(add_rates, av_bo4_content, yerr=std_bo4_content, linestyle='none', marker='.',\n markersize=10, markerfacecolor=colors[0], markeredgecolor=colors[0], label=BO4,\n capsize=3,ecolor=colors[0])\nplt.errorbar(add_rates, av_bb_content, yerr=std_bb_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[1], markeredgecolor=colors[1], label=BB, capsize=3, ecolor=colors[1])\nplt.errorbar(add_rates, av_b5_content, yerr=std_b5_content, linestyle='none', marker='.', markersize=10,\n markerfacecolor=colors[2], markeredgecolor=colors[2], label=B5, capsize=3, ecolor=colors[2])\nplt.errorbar(add_rates, av_b1_content, yerr=std_b1_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[3], markeredgecolor=colors[3], label=B1, capsize=3, ecolor=colors[3])\nplt.errorbar(add_rates, av_5o4_content, yerr=std_5o4_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[4], markeredgecolor=colors[4], label=C5O4, capsize=3, ecolor=colors[4])\nplt.errorbar(add_rates, av_ao4_content, yerr=std_ao4_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[5], markeredgecolor=colors[5], label=AO4, capsize=3, ecolor=colors[5])\nplt.errorbar(add_rates, av_55_content, yerr=std_55_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[6], markeredgecolor=colors[6], label=C5C5, capsize=3, ecolor=colors[6])\nplt.xscale('log')\n\n[plt.gca().spines[i].set_linewidth(1.5) for i in ['top', 'right', 'bottom', 'left']]\nplt.gca().tick_params(axis='both', which='major', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1.5, length=6)\nplt.gca().tick_params(axis='both', which='minor', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1, length=4)\nplt.ylabel('Bond Type Yield (%)', fontsize=14)\nplt.xlabel('Addition Rate (monomers/s)', fontsize=14)\nplt.ylim([0.0, 1.0])\nplt.legend(fontsize=14, loc='upper center', bbox_to_anchor=(1.2, 1.05), frameon=False)",
"_____no_output_____"
]
],
[
[
"### Continuous monomer addition, SG ratio sensitivity\nWe will now perform the same task on different SG ratios with a slow addition rate.",
"_____no_output_____"
]
],
[
[
"sg_opts = [0.1, 0.2, 0.25, 0.33, 0.5, 1, 2, 3, 4, 5, 10]\nnum_repeats = 5\nfun = par.delayed(run_kmc)\nsg_result_list = []\n\nfor sg_ratio in sg_opts:\n # Set the percentage of S\n pct_s = sg_ratio / (1 + sg_ratio)\n \n # Make choices about what kinds of monomers there are and create them\n ini_num_monos = 5\n max_monos = 200\n monomer_draw = np.random.rand(ini_num_monos)\n initial_monomers = create_initial_monomers(pct_s, monomer_draw)\n \n # Initialize the monomers, events, and state\n initial_events = create_initial_events(initial_monomers, rxn_rates)\n initial_state = create_initial_state(initial_events, initial_monomers)\n initial_events.append(Event(GROW, [], rate=1e4))\n\n results = par.Parallel(n_jobs=4)([fun(rxn_rates, initial_state, initial_events, n_max=max_monos, t_max=1, \n sg_ratio=sg_ratio) for _ in range(num_repeats)])\n \n sg_result_list.append(results)\n t = time.localtime()\n print('Completed sensitivity iteration for S to G ratio {:5.2f} on '\n '{}-{:02d}-{:02d} at {:02d}:{:02d}:{:02d}'.format(sg_ratio, t.tm_year, t.tm_mon, t.tm_mday,\n t.tm_hour, t.tm_min, t.tm_sec))",
"Completed sensitivity iteration for S to G ratio 0.10 on 2019-11-27 at 11:48:14\nCompleted sensitivity iteration for S to G ratio 0.20 on 2019-11-27 at 11:48:17\nCompleted sensitivity iteration for S to G ratio 0.25 on 2019-11-27 at 11:48:19\nCompleted sensitivity iteration for S to G ratio 0.33 on 2019-11-27 at 11:48:20\nCompleted sensitivity iteration for S to G ratio 0.50 on 2019-11-27 at 11:48:22\nCompleted sensitivity iteration for S to G ratio 1.00 on 2019-11-27 at 11:48:24\nCompleted sensitivity iteration for S to G ratio 2.00 on 2019-11-27 at 11:48:26\nCompleted sensitivity iteration for S to G ratio 3.00 on 2019-11-27 at 11:48:29\nCompleted sensitivity iteration for S to G ratio 4.00 on 2019-11-27 at 11:48:32\nCompleted sensitivity iteration for S to G ratio 5.00 on 2019-11-27 at 11:48:34\nCompleted sensitivity iteration for S to G ratio 10.00 on 2019-11-27 at 11:48:37\n"
],
[
"num_sg_opts = len(sg_opts)\nanalysis = []\n\nfor i in range(num_sg_opts):\n sg_results = sg_result_list[i]\n cur_adjs = [sg_results[j][ADJ_MATRIX] for j in range(num_repeats)]\n analysis.append(par.Parallel(n_jobs=4)(par.delayed(analyze_adj_matrix)(cur_adjs[j])\n for j in range(num_repeats))) \n t = time.localtime()\n print('Finished analysis for S:G of {:5.2f} on {}-{:02d}-{:02d} at '\n '{:02d}:{:02d}:{:02d}'.format(sg_opts[i], t.tm_year, t.tm_mon, t.tm_mday, \n t.tm_hour, t.tm_min, t.tm_sec))",
"Finished analysis for S:G of 0.10 on 2019-11-27 at 11:48:39\nFinished analysis for S:G of 0.20 on 2019-11-27 at 11:48:41\nFinished analysis for S:G of 0.25 on 2019-11-27 at 11:48:44\nFinished analysis for S:G of 0.33 on 2019-11-27 at 11:48:46\nFinished analysis for S:G of 0.50 on 2019-11-27 at 11:48:48\nFinished analysis for S:G of 1.00 on 2019-11-27 at 11:48:50\nFinished analysis for S:G of 2.00 on 2019-11-27 at 11:48:52\nFinished analysis for S:G of 3.00 on 2019-11-27 at 11:48:54\nFinished analysis for S:G of 4.00 on 2019-11-27 at 11:48:56\nFinished analysis for S:G of 5.00 on 2019-11-27 at 11:48:58\nFinished analysis for S:G of 10.00 on 2019-11-27 at 11:49:00\n"
],
[
"num_monos = [[sum([analysis[j][i][CHAIN_LEN][k] * k for k in analysis[j][i][CHAIN_LEN]]) \n for i in range(num_repeats)] for j in range(num_sg_opts)]\n\nmonomer_yields = [[analysis[j][i][RCF_YIELDS][1]/num_monos[j][i] \n for i in range(num_repeats)] for j in range(num_sg_opts)]\n\nbetaO4_content = [[analysis[j][i][BONDS][BO4]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nbeta5_content = [[analysis[j][i][BONDS][B5]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nbetabeta_content = [[analysis[j][i][BONDS][BB]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nbeta1_content = [[analysis[j][i][BONDS][B1]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nc5o4_content = [[analysis[j][i][BONDS][C5O4]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nalphaO4_content = [[analysis[j][i][BONDS][AO4]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nfivefive_content = [[analysis[j][i][BONDS][C5C5]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\n\nav_mon_yield = [np.mean(percent) for percent in monomer_yields]\nstd_mon_yield = [np.sqrt(np.var(percent)) for percent in monomer_yields]\n\nav_bo4_content = [np.mean(percent) for percent in betaO4_content]\nstd_bo4_content = [np.sqrt(np.var(percent)) for percent in betaO4_content]\n\nav_b5_content = [np.mean(percent) for percent in beta5_content]\nstd_b5_content = [np.sqrt(np.var(percent)) for percent in beta5_content]\n\nav_bb_content = [np.mean(percent) for percent in betabeta_content]\nstd_bb_content = [np.sqrt(np.var(percent)) for percent in betabeta_content]\n\nav_b1_content = [np.mean(percent) for percent in beta1_content]\nstd_b1_content = [np.sqrt(np.var(percent)) for percent in beta1_content]\n\nav_5o4_content = [np.mean(percent) for percent in c5o4_content]\nstd_5o4_content = [np.sqrt(np.var(percent)) for percent in c5o4_content]\n\nav_ao4_content = [np.mean(percent) for percent in alphaO4_content]\nstd_ao4_content = [np.sqrt(np.var(percent)) for percent in alphaO4_content]\n\nav_55_content = [np.mean(percent) for percent in fivefive_content]\nstd_55_content = [np.sqrt(np.var(percent)) for percent in fivefive_content]",
"_____no_output_____"
],
[
"plt.figure(figsize=(3.5, 3.5))\nplt.errorbar(sg_opts, av_mon_yield, yerr=std_mon_yield, linestyle='none', marker='.', markersize=10, \n markerfacecolor='black', markeredgecolor='black', capsize=3, ecolor='black')\nplt.xscale('log')\n[plt.gca().spines[i].set_linewidth(1.5) for i in ['top', 'right', 'bottom', 'left']]\nplt.gca().tick_params(axis='both', which='major', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1.5, length=6)\nplt.gca().tick_params(axis='both', which='minor', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1, length=4)\nplt.ylabel('Monomer Yield (%)', fontsize=14)\nplt.xlabel('SG Ratio', fontsize=14)\nplt.ylim([0.0, 1.0])",
"_____no_output_____"
],
[
"plt.figure(figsize=(3.5, 3.5))\nplt.errorbar(sg_opts, av_bo4_content, yerr=std_bo4_content, linestyle='none', marker='.',\n markersize=10, markerfacecolor=colors[0], markeredgecolor=colors[0], label=BO4,\n capsize=3,ecolor=colors[0])\nplt.errorbar(sg_opts, av_bb_content, yerr=std_bb_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[1], markeredgecolor=colors[1], label=BB, capsize=3, ecolor=colors[1])\nplt.errorbar(sg_opts, av_b5_content, yerr=std_b5_content, linestyle='none', marker='.', markersize=10,\n markerfacecolor=colors[2], markeredgecolor=colors[2], label=B5, capsize=3, ecolor=colors[2])\nplt.errorbar(sg_opts, av_b1_content, yerr=std_b1_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[3], markeredgecolor=colors[3], label=B1, capsize=3, ecolor=colors[3])\nplt.errorbar(sg_opts, av_5o4_content, yerr=std_5o4_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[4], markeredgecolor=colors[4], label=C5O4, capsize=3, ecolor=colors[4])\nplt.errorbar(sg_opts, av_ao4_content, yerr=std_ao4_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[5], markeredgecolor=colors[5], label=AO4, capsize=3, ecolor=colors[5])\nplt.errorbar(sg_opts, av_55_content, yerr=std_55_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[6], markeredgecolor=colors[6], label=C5C5, capsize=3, ecolor=colors[6])\nplt.xscale('log')\n\n[plt.gca().spines[i].set_linewidth(1.5) for i in ['top', 'right', 'bottom', 'left']]\nplt.gca().tick_params(axis='both', which='major', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1.5, length=6)\nplt.gca().tick_params(axis='both', which='minor', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1, length=4)\nplt.ylabel('Bond Type Yield (%)', fontsize=14)\nplt.xlabel('SG Ratio', fontsize=14)\nplt.ylim([0.0, 1.0])\nplt.legend(fontsize=14, loc='upper center', bbox_to_anchor=(1.2, 1.05), frameon=False)",
"_____no_output_____"
]
],
[
[
"### Continuous monomer addition, SG ratio sensitivity at 1e7 monomers/s\nWe will now perform the same task on different SG ratios with three addition rates",
"_____no_output_____"
]
],
[
[
"sg_opts = [0.1, 0.2, 0.25, 0.33, 0.5, 1, 2, 3, 4, 5, 10]\nnum_repeats = 5\n\nfun = par.delayed(run_kmc)\nsg_result_list = []\n\nfor sg_ratio in sg_opts:\n # Set the percentage of S\n pct_s = sg_ratio / (1 + sg_ratio)\n \n # Make choices about what kinds of monomers there are and create them\n ini_num_monos = 5\n max_monos = 200\n monomer_draw = np.random.rand(ini_num_monos)\n initial_monomers = create_initial_monomers(pct_s, monomer_draw)\n \n # Initialize the monomers, events, and state\n initial_events = create_initial_events(initial_monomers, rxn_rates)\n initial_state = create_initial_state(initial_events, initial_monomers)\n initial_events.append(Event(GROW, [], rate=1e7))\n\n results = par.Parallel(n_jobs=4)([fun(rxn_rates, initial_state, initial_events, n_max=max_monos, t_max=1, \n sg_ratio=sg_ratio) for _ in range(num_repeats)])\n\n sg_result_list.append(results)\n t = time.localtime()\n print('Completed sensitivity iteration for SG ratio {:5.2f} on ' \n '{}-{:02d}-{:02d} at {:02d}:{:02d}:{:02d}.'.format(sg_ratio, t.tm_year, t.tm_mon, t.tm_mday, \n t.tm_hour, t.tm_min, t.tm_sec))",
"Completed sensitivity iteration for SG ratio 0.10 on 2019-11-27 at 11:49:05.\nCompleted sensitivity iteration for SG ratio 0.20 on 2019-11-27 at 11:49:08.\nCompleted sensitivity iteration for SG ratio 0.25 on 2019-11-27 at 11:49:11.\nCompleted sensitivity iteration for SG ratio 0.33 on 2019-11-27 at 11:49:14.\nCompleted sensitivity iteration for SG ratio 0.50 on 2019-11-27 at 11:49:17.\nCompleted sensitivity iteration for SG ratio 1.00 on 2019-11-27 at 11:49:21.\nCompleted sensitivity iteration for SG ratio 2.00 on 2019-11-27 at 11:49:25.\nCompleted sensitivity iteration for SG ratio 3.00 on 2019-11-27 at 11:49:30.\nCompleted sensitivity iteration for SG ratio 4.00 on 2019-11-27 at 11:49:34.\nCompleted sensitivity iteration for SG ratio 5.00 on 2019-11-27 at 11:49:39.\nCompleted sensitivity iteration for SG ratio 10.00 on 2019-11-27 at 11:49:44.\n"
],
[
"num_sg_opts = len(sg_opts)\nanalysis = []\n\nfor i in range(num_sg_opts):\n sg_results = sg_result_list[i]\n cur_adjs = [sg_results[j][ADJ_MATRIX] for j in range(num_repeats)]\n analysis.append(par.Parallel(n_jobs=4)(par.delayed(analyze_adj_matrix)(cur_adjs[j])\n for j in range(num_repeats))) \n t = time.localtime()\n print('Finished analysis for S:G of {:5.2f} on {}-{:02d}-{:02d} at '\n '{:02d}:{:02d}:{:02d}'.format(sg_opts[i], t.tm_year, t.tm_mon, t.tm_mday, \n t.tm_hour, t.tm_min, t.tm_sec))",
"Finished analysis for S:G of 0.10 on 2019-11-27 at 11:49:45\nFinished analysis for S:G of 0.20 on 2019-11-27 at 11:49:47\nFinished analysis for S:G of 0.25 on 2019-11-27 at 11:49:49\nFinished analysis for S:G of 0.33 on 2019-11-27 at 11:49:51\nFinished analysis for S:G of 0.50 on 2019-11-27 at 11:49:53\nFinished analysis for S:G of 1.00 on 2019-11-27 at 11:49:55\nFinished analysis for S:G of 2.00 on 2019-11-27 at 11:49:57\nFinished analysis for S:G of 3.00 on 2019-11-27 at 11:49:59\nFinished analysis for S:G of 4.00 on 2019-11-27 at 11:50:01\nFinished analysis for S:G of 5.00 on 2019-11-27 at 11:50:02\nFinished analysis for S:G of 10.00 on 2019-11-27 at 11:50:04\n"
],
[
"num_monos = [[sum([analysis[j][i][CHAIN_LEN][k] * k for k in analysis[j][i][CHAIN_LEN]]) \n for i in range(num_repeats)] for j in range(num_sg_opts)]\n\nmonomer_yields = [[analysis[j][i][RCF_YIELDS][1]/num_monos[j][i] \n for i in range(num_repeats)] for j in range(num_sg_opts)]\n\nbetaO4_content = [[analysis[j][i][BONDS][BO4]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nbeta5_content = [[analysis[j][i][BONDS][B5]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nbetabeta_content = [[analysis[j][i][BONDS][BB]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nbeta1_content = [[analysis[j][i][BONDS][B1]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nc5o4_content = [[analysis[j][i][BONDS][C5O4]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nalphaO4_content = [[analysis[j][i][BONDS][AO4]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nfivefive_content = [[analysis[j][i][BONDS][C5C5]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\n\nav_mon_yield = [np.mean(percent) for percent in monomer_yields]\nstd_mon_yield = [np.sqrt(np.var(percent)) for percent in monomer_yields]\n\nav_bo4_content = [np.mean(percent) for percent in betaO4_content]\nstd_bo4_content = [np.sqrt(np.var(percent)) for percent in betaO4_content]\n\nav_b5_content = [np.mean(percent) for percent in beta5_content]\nstd_b5_content = [np.sqrt(np.var(percent)) for percent in beta5_content]\n\nav_bb_content = [np.mean(percent) for percent in betabeta_content]\nstd_bb_content = [np.sqrt(np.var(percent)) for percent in betabeta_content]\n\nav_b1_content = [np.mean(percent) for percent in beta1_content]\nstd_b1_content = [np.sqrt(np.var(percent)) for percent in beta1_content]\n\nav_5o4_content = [np.mean(percent) for percent in c5o4_content]\nstd_5o4_content = [np.sqrt(np.var(percent)) for percent in c5o4_content]\n\nav_ao4_content = [np.mean(percent) for percent in alphaO4_content]\nstd_ao4_content = [np.sqrt(np.var(percent)) for percent in alphaO4_content]\n\nav_55_content = [np.mean(percent) for percent in fivefive_content]\nstd_55_content = [np.sqrt(np.var(percent)) for percent in fivefive_content]",
"_____no_output_____"
],
[
"plt.figure(figsize=(3.5, 3.5))\nplt.errorbar(sg_opts, av_mon_yield, yerr=std_mon_yield, linestyle='none', marker='.', markersize=10, \n markerfacecolor='black', markeredgecolor='black', capsize=3, ecolor='black')\nplt.xscale('log')\n[plt.gca().spines[i].set_linewidth(1.5) for i in ['top', 'right', 'bottom', 'left']]\nplt.gca().tick_params(axis='both', which='major', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1.5, length=6)\nplt.gca().tick_params(axis='both', which='minor', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1, length=4)\nplt.ylabel('Monomer Yield (%)', fontsize=14)\nplt.xlabel('SG Ratio', fontsize=14)\nplt.ylim([0.0, 1.0])",
"_____no_output_____"
],
[
"plt.figure(figsize=(3.5, 3.5))\nplt.errorbar(sg_opts, av_bo4_content, yerr=std_bo4_content, linestyle='none', marker='.',\n markersize=10, markerfacecolor=colors[0], markeredgecolor=colors[0], label=BO4,\n capsize=3,ecolor=colors[0])\nplt.errorbar(sg_opts, av_bb_content, yerr=std_bb_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[1], markeredgecolor=colors[1], label=BB, capsize=3, ecolor=colors[1])\nplt.errorbar(sg_opts, av_b5_content, yerr=std_b5_content, linestyle='none', marker='.', markersize=10,\n markerfacecolor=colors[2], markeredgecolor=colors[2], label=B5, capsize=3, ecolor=colors[2])\nplt.errorbar(sg_opts, av_b1_content, yerr=std_b1_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[3], markeredgecolor=colors[3], label=B1, capsize=3, ecolor=colors[3])\nplt.errorbar(sg_opts, av_5o4_content, yerr=std_5o4_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[4], markeredgecolor=colors[4], label=C5O4, capsize=3, ecolor=colors[4])\nplt.errorbar(sg_opts, av_ao4_content, yerr=std_ao4_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[5], markeredgecolor=colors[5], label=AO4, capsize=3, ecolor=colors[5])\nplt.errorbar(sg_opts, av_55_content, yerr=std_55_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[6], markeredgecolor=colors[6], label=C5C5, capsize=3, ecolor=colors[6])\nplt.xscale('log')\n\n[plt.gca().spines[i].set_linewidth(1.5) for i in ['top', 'right', 'bottom', 'left']]\nplt.gca().tick_params(axis='both', which='major', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1.5, length=6)\nplt.gca().tick_params(axis='both', which='minor', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1, length=4)\nplt.ylabel('Bond Type Yield (%)', fontsize=14)\nplt.xlabel('SG Ratio', fontsize=14)\nplt.ylim([0.0, 1.0])\nplt.legend(fontsize=14, loc='upper center', bbox_to_anchor=(1.2, 1.05), frameon=False)",
"_____no_output_____"
]
],
[
[
"### Continuous monomer addition, SG ratio sensitivity 1e9 monomers/second\n\nContinuing analysis of different SG ratios at different addition rates",
"_____no_output_____"
]
],
[
[
"sg_opts = [0.1, 0.2, 0.25, 0.33, 0.5, 1, 2, 3, 4, 5, 10]\nnum_repeats = 5\n\nfun = par.delayed(run_kmc)\nsg_result_list = []\n\nfor sg_ratio in sg_opts:\n # Set the percentage of S\n pct_s = sg_ratio / (1 + sg_ratio)\n \n # Make choices about what kinds of monomers there are and create them\n ini_num_monos = 5\n max_monos = 200\n monomer_draw = np.random.rand(ini_num_monos)\n initial_monomers = create_initial_monomers(pct_s, monomer_draw)\n \n # Initialize the monomers, events, and state\n initial_events = create_initial_events(initial_monomers, rxn_rates)\n initial_state = create_initial_state(initial_events, initial_monomers)\n initial_events.append(Event(GROW, [], rate=1e9))\n\n results = par.Parallel(n_jobs=4)([fun(rxn_rates, initial_state, initial_events, n_max=max_monos, t_max=1, \n sg_ratio=sg_ratio) for _ in range(num_repeats)])\n\n sg_result_list.append(results)\n t = time.localtime()\n print('Completed sensitivity iteration for SG ratio {:5.2f} on ' \n '{}-{:02d}-{:02d} at {:02d}:{:02d}:{:02d}.'.format(sg_ratio, t.tm_year, t.tm_mon, t.tm_mday, \n t.tm_hour, t.tm_min, t.tm_sec))",
"Completed sensitivity iteration for SG ratio 0.10 on 2019-11-27 at 11:50:17.\nCompleted sensitivity iteration for SG ratio 0.20 on 2019-11-27 at 11:50:28.\nCompleted sensitivity iteration for SG ratio 0.25 on 2019-11-27 at 11:50:39.\nCompleted sensitivity iteration for SG ratio 0.33 on 2019-11-27 at 11:50:53.\nCompleted sensitivity iteration for SG ratio 0.50 on 2019-11-27 at 11:51:04.\nCompleted sensitivity iteration for SG ratio 1.00 on 2019-11-27 at 11:51:15.\nCompleted sensitivity iteration for SG ratio 2.00 on 2019-11-27 at 11:51:25.\nCompleted sensitivity iteration for SG ratio 3.00 on 2019-11-27 at 11:51:36.\nCompleted sensitivity iteration for SG ratio 4.00 on 2019-11-27 at 11:51:46.\nCompleted sensitivity iteration for SG ratio 5.00 on 2019-11-27 at 11:51:58.\nCompleted sensitivity iteration for SG ratio 10.00 on 2019-11-27 at 11:52:08.\n"
],
[
"num_sg_opts = len(sg_opts)\nanalysis = []\n\nfor i in range(num_sg_opts):\n sg_results = sg_result_list[i]\n cur_adjs = [sg_results[j][ADJ_MATRIX] for j in range(num_repeats)]\n analysis.append(par.Parallel(n_jobs=4)(par.delayed(analyze_adj_matrix)(cur_adjs[j])\n for j in range(num_repeats))) \n t = time.localtime()\n print('Finished analysis for S:G of {:5.2f} on {}-{:02d}-{:02d} at '\n '{:02d}:{:02d}:{:02d}'.format(sg_opts[i], t.tm_year, t.tm_mon, t.tm_mday, \n t.tm_hour, t.tm_min, t.tm_sec))",
"Finished analysis for S:G of 0.10 on 2019-11-27 at 11:52:10\nFinished analysis for S:G of 0.20 on 2019-11-27 at 11:52:12\nFinished analysis for S:G of 0.25 on 2019-11-27 at 11:52:14\nFinished analysis for S:G of 0.33 on 2019-11-27 at 11:52:16\nFinished analysis for S:G of 0.50 on 2019-11-27 at 11:52:18\nFinished analysis for S:G of 1.00 on 2019-11-27 at 11:52:21\nFinished analysis for S:G of 2.00 on 2019-11-27 at 11:52:22\nFinished analysis for S:G of 3.00 on 2019-11-27 at 11:52:24\nFinished analysis for S:G of 4.00 on 2019-11-27 at 11:52:26\nFinished analysis for S:G of 5.00 on 2019-11-27 at 11:52:28\nFinished analysis for S:G of 10.00 on 2019-11-27 at 11:52:30\n"
],
[
"num_monos = [[sum([analysis[j][i][CHAIN_LEN][k] * k for k in analysis[j][i][CHAIN_LEN]]) \n for i in range(num_repeats)] for j in range(num_sg_opts)]\n\nmonomer_yields = [[analysis[j][i][RCF_YIELDS][1]/num_monos[j][i] \n for i in range(num_repeats)] for j in range(num_sg_opts)]\n\nbetaO4_content = [[analysis[j][i][BONDS][BO4]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nbeta5_content = [[analysis[j][i][BONDS][B5]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nbetabeta_content = [[analysis[j][i][BONDS][BB]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nbeta1_content = [[analysis[j][i][BONDS][B1]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nc5o4_content = [[analysis[j][i][BONDS][C5O4]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nalphaO4_content = [[analysis[j][i][BONDS][AO4]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nfivefive_content = [[analysis[j][i][BONDS][C5C5]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\n\nav_mon_yield = [np.mean(percent) for percent in monomer_yields]\nstd_mon_yield = [np.sqrt(np.var(percent)) for percent in monomer_yields]\n\nav_bo4_content = [np.mean(percent) for percent in betaO4_content]\nstd_bo4_content = [np.sqrt(np.var(percent)) for percent in betaO4_content]\n\nav_b5_content = [np.mean(percent) for percent in beta5_content]\nstd_b5_content = [np.sqrt(np.var(percent)) for percent in beta5_content]\n\nav_bb_content = [np.mean(percent) for percent in betabeta_content]\nstd_bb_content = [np.sqrt(np.var(percent)) for percent in betabeta_content]\n\nav_b1_content = [np.mean(percent) for percent in beta1_content]\nstd_b1_content = [np.sqrt(np.var(percent)) for percent in beta1_content]\n\nav_5o4_content = [np.mean(percent) for percent in c5o4_content]\nstd_5o4_content = [np.sqrt(np.var(percent)) for percent in c5o4_content]\n\nav_ao4_content = [np.mean(percent) for percent in alphaO4_content]\nstd_ao4_content = [np.sqrt(np.var(percent)) for percent in alphaO4_content]\n\nav_55_content = [np.mean(percent) for percent in fivefive_content]\nstd_55_content = [np.sqrt(np.var(percent)) for percent in fivefive_content]",
"_____no_output_____"
],
[
"plt.figure(figsize=(3.5, 3.5))\nplt.errorbar(sg_opts, av_mon_yield, yerr=std_mon_yield, linestyle='none', marker='.', markersize=10, \n markerfacecolor='black', markeredgecolor='black', capsize=3, ecolor='black')\nplt.xscale('log')\n[plt.gca().spines[i].set_linewidth(1.5) for i in ['top', 'right', 'bottom', 'left']]\nplt.gca().tick_params(axis='both', which='major', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1.5, length=6)\nplt.gca().tick_params(axis='both', which='minor', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1, length=4)\nplt.ylabel('Monomer Yield (%)', fontsize=14)\nplt.xlabel('SG Ratio', fontsize=14)\nplt.ylim([0.0, 1.0])",
"_____no_output_____"
],
[
"plt.figure(figsize=(3.5, 3.5))\nplt.errorbar(sg_opts, av_bo4_content, yerr=std_bo4_content, linestyle='none', marker='.',\n markersize=10, markerfacecolor=colors[0], markeredgecolor=colors[0], label=BO4,\n capsize=3,ecolor=colors[0])\nplt.errorbar(sg_opts, av_bb_content, yerr=std_bb_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[1], markeredgecolor=colors[1], label=BB, capsize=3, ecolor=colors[1])\nplt.errorbar(sg_opts, av_b5_content, yerr=std_b5_content, linestyle='none', marker='.', markersize=10,\n markerfacecolor=colors[2], markeredgecolor=colors[2], label=B5, capsize=3, ecolor=colors[2])\nplt.errorbar(sg_opts, av_b1_content, yerr=std_b1_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[3], markeredgecolor=colors[3], label=B1, capsize=3, ecolor=colors[3])\nplt.errorbar(sg_opts, av_5o4_content, yerr=std_5o4_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[4], markeredgecolor=colors[4], label=C5O4, capsize=3, ecolor=colors[4])\nplt.errorbar(sg_opts, av_ao4_content, yerr=std_ao4_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[5], markeredgecolor=colors[5], label=AO4, capsize=3, ecolor=colors[5])\nplt.errorbar(sg_opts, av_55_content, yerr=std_55_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[6], markeredgecolor=colors[6], label=C5C5, capsize=3, ecolor=colors[6])\nplt.xscale('log')\n\n[plt.gca().spines[i].set_linewidth(1.5) for i in ['top', 'right', 'bottom', 'left']]\nplt.gca().tick_params(axis='both', which='major', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1.5, length=6)\nplt.gca().tick_params(axis='both', which='minor', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1, length=4)\nplt.ylabel('Bond Type Yield (%)', fontsize=14)\nplt.xlabel('SG Ratio', fontsize=14)\nplt.ylim([0.0, 1.0])\nplt.legend(fontsize=14, loc='upper center', bbox_to_anchor=(1.2, 1.05), frameon=False)",
"_____no_output_____"
]
],
[
[
"### Continuous monomer addition, SG ratio sensitivity 1e11 monomers/second\nContinuing analysis of different SG ratios at different addition rates",
"_____no_output_____"
]
],
[
[
"sg_opts = [0.1, 0.2, 0.25, 0.33, 0.5, 1, 2, 3, 4, 5, 10]\nnum_repeats = 5\n\nfun = par.delayed(run_kmc)\nsg_result_list = []\n\nfor sg_ratio in sg_opts:\n # Set the percentage of S\n pct_s = sg_ratio / (1 + sg_ratio)\n \n # Make choices about what kinds of monomers there are and create them\n ini_num_monos = 5\n max_monos = 200\n monomer_draw = np.random.rand(ini_num_monos)\n initial_monomers = create_initial_monomers(pct_s, monomer_draw)\n \n # Initialize the monomers, events, and state\n initial_events = create_initial_events(initial_monomers, rxn_rates)\n initial_state = create_initial_state(initial_events, initial_monomers)\n initial_events.append(Event(GROW, [], rate=1e11))\n\n\n\n results = par.Parallel(n_jobs=4)([fun(rxn_rates, initial_state, initial_events, n_max=max_monos, t_max=1, \n sg_ratio=sg_ratio) for _ in range(num_repeats)])\n\n sg_result_list.append(results)\n t = time.localtime()\n print('Completed sensitivity iteration for SG ratio {:5.2f} on ' \n '{}-{:02d}-{:02d} at {:02d}:{:02d}:{:02d}.'.format(sg_ratio, t.tm_year, t.tm_mon, t.tm_mday, \n t.tm_hour, t.tm_min, t.tm_sec))",
"Completed sensitivity iteration for SG ratio 0.10 on 2019-11-27 at 11:53:07.\nCompleted sensitivity iteration for SG ratio 0.20 on 2019-11-27 at 11:53:46.\nCompleted sensitivity iteration for SG ratio 0.25 on 2019-11-27 at 11:54:24.\nCompleted sensitivity iteration for SG ratio 0.33 on 2019-11-27 at 11:55:01.\nCompleted sensitivity iteration for SG ratio 0.50 on 2019-11-27 at 11:55:36.\nCompleted sensitivity iteration for SG ratio 1.00 on 2019-11-27 at 11:56:11.\nCompleted sensitivity iteration for SG ratio 2.00 on 2019-11-27 at 11:56:44.\nCompleted sensitivity iteration for SG ratio 3.00 on 2019-11-27 at 11:57:12.\nCompleted sensitivity iteration for SG ratio 4.00 on 2019-11-27 at 11:57:38.\nCompleted sensitivity iteration for SG ratio 5.00 on 2019-11-27 at 11:58:01.\nCompleted sensitivity iteration for SG ratio 10.00 on 2019-11-27 at 11:58:26.\n"
],
[
"num_sg_opts = len(sg_opts)\nanalysis = []\n\nfor i in range(num_sg_opts):\n sg_results = sg_result_list[i]\n cur_adjs = [sg_results[j][ADJ_MATRIX] for j in range(num_repeats)]\n analysis.append(par.Parallel(n_jobs=4)(par.delayed(analyze_adj_matrix)(cur_adjs[j])\n for j in range(num_repeats))) \n t = time.localtime()\n print('Finished analysis for S:G of {:5.2f} on {}-{:02d}-{:02d} at '\n '{:02d}:{:02d}:{:02d}'.format(sg_opts[i], t.tm_year, t.tm_mon, t.tm_mday, \n t.tm_hour, t.tm_min, t.tm_sec))",
"Finished analysis for S:G of 0.10 on 2019-11-27 at 11:58:28\nFinished analysis for S:G of 0.20 on 2019-11-27 at 11:58:30\nFinished analysis for S:G of 0.25 on 2019-11-27 at 11:58:32\nFinished analysis for S:G of 0.33 on 2019-11-27 at 11:58:33\nFinished analysis for S:G of 0.50 on 2019-11-27 at 11:58:35\nFinished analysis for S:G of 1.00 on 2019-11-27 at 11:58:37\nFinished analysis for S:G of 2.00 on 2019-11-27 at 11:58:39\nFinished analysis for S:G of 3.00 on 2019-11-27 at 11:58:41\nFinished analysis for S:G of 4.00 on 2019-11-27 at 11:58:44\nFinished analysis for S:G of 5.00 on 2019-11-27 at 11:58:46\nFinished analysis for S:G of 10.00 on 2019-11-27 at 11:58:47\n"
],
[
"num_monos = [[sum([analysis[j][i][CHAIN_LEN][k] * k for k in analysis[j][i][CHAIN_LEN]]) \n for i in range(num_repeats)] for j in range(num_sg_opts)]\n\nmonomer_yields = [[analysis[j][i][RCF_YIELDS][1]/num_monos[j][i] \n for i in range(num_repeats)] for j in range(num_sg_opts)]\n\nbetaO4_content = [[analysis[j][i][BONDS][BO4]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nbeta5_content = [[analysis[j][i][BONDS][B5]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nbetabeta_content = [[analysis[j][i][BONDS][BB]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nbeta1_content = [[analysis[j][i][BONDS][B1]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nc5o4_content = [[analysis[j][i][BONDS][C5O4]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nalphaO4_content = [[analysis[j][i][BONDS][AO4]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\nfivefive_content = [[analysis[j][i][BONDS][C5C5]/sum(analysis[j][i][BONDS].values())\n for i in range(num_repeats)] for j in range(num_sg_opts)]\n\nav_mon_yield = [np.mean(percent) for percent in monomer_yields]\nstd_mon_yield = [np.sqrt(np.var(percent)) for percent in monomer_yields]\n\nav_bo4_content = [np.mean(percent) for percent in betaO4_content]\nstd_bo4_content = [np.sqrt(np.var(percent)) for percent in betaO4_content]\n\nav_b5_content = [np.mean(percent) for percent in beta5_content]\nstd_b5_content = [np.sqrt(np.var(percent)) for percent in beta5_content]\n\nav_bb_content = [np.mean(percent) for percent in betabeta_content]\nstd_bb_content = [np.sqrt(np.var(percent)) for percent in betabeta_content]\n\nav_b1_content = [np.mean(percent) for percent in beta1_content]\nstd_b1_content = [np.sqrt(np.var(percent)) for percent in beta1_content]\n\nav_5o4_content = [np.mean(percent) for percent in c5o4_content]\nstd_5o4_content = [np.sqrt(np.var(percent)) for percent in c5o4_content]\n\nav_ao4_content = [np.mean(percent) for percent in alphaO4_content]\nstd_ao4_content = [np.sqrt(np.var(percent)) for percent in alphaO4_content]\n\nav_55_content = [np.mean(percent) for percent in fivefive_content]\nstd_55_content = [np.sqrt(np.var(percent)) for percent in fivefive_content]",
"_____no_output_____"
],
[
"plt.figure(figsize=(3.5, 3.5))\nplt.errorbar(sg_opts, av_mon_yield, yerr=std_mon_yield, linestyle='none', marker='.', markersize=10, \n markerfacecolor='black', markeredgecolor='black', capsize=3, ecolor='black')\nplt.xscale('log')\n[plt.gca().spines[i].set_linewidth(1.5) for i in ['top', 'right', 'bottom', 'left']]\nplt.gca().tick_params(axis='both', which='major', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1.5, length=6)\nplt.gca().tick_params(axis='both', which='minor', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1, length=4)\nplt.ylabel('Monomer Yield (%)', fontsize=14)\nplt.xlabel('SG Ratio', fontsize=14)\nplt.ylim([0.0, 1.0])",
"_____no_output_____"
],
[
"plt.figure(figsize=(3.5, 3.5))\nplt.errorbar(sg_opts, av_bo4_content, yerr=std_bo4_content, linestyle='none', marker='.',\n markersize=10, markerfacecolor=colors[0], markeredgecolor=colors[0], label=BO4,\n capsize=3,ecolor=colors[0])\nplt.errorbar(sg_opts, av_bb_content, yerr=std_bb_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[1], markeredgecolor=colors[1], label=BB, capsize=3, ecolor=colors[1])\nplt.errorbar(sg_opts, av_b5_content, yerr=std_b5_content, linestyle='none', marker='.', markersize=10,\n markerfacecolor=colors[2], markeredgecolor=colors[2], label=B5, capsize=3, ecolor=colors[2])\nplt.errorbar(sg_opts, av_b1_content, yerr=std_b1_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[3], markeredgecolor=colors[3], label=B1, capsize=3, ecolor=colors[3])\nplt.errorbar(sg_opts, av_5o4_content, yerr=std_5o4_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[4], markeredgecolor=colors[4], label=C5O4, capsize=3, ecolor=colors[4])\nplt.errorbar(sg_opts, av_ao4_content, yerr=std_ao4_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[5], markeredgecolor=colors[5], label=AO4, capsize=3, ecolor=colors[5])\nplt.errorbar(sg_opts, av_55_content, yerr=std_55_content, linestyle='none', marker='.', markersize=10, \n markerfacecolor=colors[6], markeredgecolor=colors[6], label=C5C5, capsize=3, ecolor=colors[6])\nplt.xscale('log')\n\n[plt.gca().spines[i].set_linewidth(1.5) for i in ['top', 'right', 'bottom', 'left']]\nplt.gca().tick_params(axis='both', which='major', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1.5, length=6)\nplt.gca().tick_params(axis='both', which='minor', labelsize=14, direction='in', pad=8, top=True, right=True, \n width=1, length=4)\nplt.ylabel('Bond Type Yield (%)', fontsize=14)\nplt.xlabel('SG Ratio', fontsize=14)\nplt.ylim([0.0, 1.0])\nplt.legend(fontsize=14, loc='upper center', bbox_to_anchor=(1.2, 1.05), frameon=False)",
"_____no_output_____"
]
],
[
[
"As previously noted, this package can also be run from the command line. See the [README on github](https://github.com/michaelorella/lignin-kmc).",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"raw",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"raw"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
]
|
ec653661b014e03ac0bd5f6e114f61dbabe91c17 | 265,673 | ipynb | Jupyter Notebook | Results/Rest_Booking/no_warming_up/120_goals/plots/Learning_Curves_Full_Dataset.ipynb | IlievskiV/Master_Thesis_GO_Chatbots | 6fbba12afbbf51b7a6b6067e9448e5ef673fda16 | [
"MIT"
]
| 13 | 2018-03-29T16:25:08.000Z | 2020-10-23T18:46:06.000Z | Results/Rest_Booking/no_warming_up/120_goals/plots/Learning_Curves_Full_Dataset.ipynb | IlievskiV/Master_Thesis_GO_Chatbots | 6fbba12afbbf51b7a6b6067e9448e5ef673fda16 | [
"MIT"
]
| 1 | 2018-03-30T11:13:33.000Z | 2018-03-30T11:13:33.000Z | Results/Rest_Booking/no_warming_up/120_goals/plots/Learning_Curves_Full_Dataset.ipynb | IlievskiV/Master_Thesis_GO_Chatbots | 6fbba12afbbf51b7a6b6067e9448e5ef673fda16 | [
"MIT"
]
| 4 | 2019-04-29T22:06:09.000Z | 2020-08-01T04:09:59.000Z | 919.283737 | 133,300 | 0.942098 | [
[
[
"import seaborn as sns\nimport numpy as np\nimport json\nfrom pprint import pprint\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"full_pretrain_perf = []\n\n# open the file with the performances over 100 epochs using the pretrained model\nwith open(\"../50_episodes_1_9/run_1/full_pretrain_performances.json\", \"r\") as full_pretrain_perf_file:\n full_pretrain_perf.extend(json.load(full_pretrain_perf_file))\n \n# open the file with the performances over 100 epochs using the pretrained model\nwith open(\"../50_episodes_1_9/run_2/full_pretrain_performances.json\", \"r\") as full_pretrain_perf_file:\n full_pretrain_perf.extend(json.load(full_pretrain_perf_file))\n\n# open the file with the performances over 100 epochs using the pretrained model\nwith open(\"../50_episodes_1_9/run_3/full_pretrain_performances.json\", \"r\") as full_pretrain_perf_file:\n full_pretrain_perf.extend(json.load(full_pretrain_perf_file))\n\n\n\nfull_scratch_perf = []\n \n# open the file with the performances over 100 epochs using the scratch model\nwith open(\"../50_episodes_1_9/run_1/full_scratch_performances.json\", \"r\") as full_scratch_perf_file:\n full_scratch_perf.extend(json.load(full_scratch_perf_file))\n \nwith open(\"../50_episodes_1_9/run_2/full_scratch_performances.json\", \"r\") as full_scratch_perf_file:\n full_scratch_perf.extend(json.load(full_scratch_perf_file))\n \nwith open(\"../50_episodes_1_9/run_3/full_scratch_performances.json\", \"r\") as full_scratch_perf_file:\n full_scratch_perf.extend(json.load(full_scratch_perf_file))\n \n\n \nprint(len(full_pretrain_perf))\nprint(len(full_scratch_perf))",
"160\n160\n"
],
[
"# they have to be in columns, not in rows\n\nfull_pretrain_mean = []\nfull_scratch_mean = []\n\nfor i in range(160):\n full_pretrain_mean.append([full_pretrain_perf[i][str(j)] for j in range(50)])\n full_scratch_mean.append([full_scratch_perf[i][str(j)] for j in range(50)])",
"_____no_output_____"
],
[
"plt.figure(figsize=(15, 9))\nsns.set(font_scale=3)\nsns.set_style(\"whitegrid\")\n\n\ndata = np.asarray(full_pretrain_mean)\nax = sns.tsplot(data=data, ci=[90], color='blue', marker='d', markersize=15, markevery=5, linewidth=3.0, legend=True, condition=\"transfer learning, no warm-start\")\n\ndata = np.asarray(full_scratch_mean)\nax = sns.tsplot(data=data, ci=[90], color='red', marker='X', markersize=15, markevery=5, linewidth=3.0, legend=True, condition=\"no transfer learning, warm-start\")\n\n\n\nax.set_xlabel('Number of Epochs', weight='bold', size=35)\nax.set_ylabel('Success Rate', weight='bold', size=35)\nsns.plt.title('Learning curve over training data set', weight='bold', size=35)\n\nplt.xlim((0, 50))\nplt.ylim((0, 0.42))\nplt.setp(ax.get_legend().get_texts(), fontsize=\"35\")\nplt.savefig('learning_curve_training_data_set_200_reps_color.png', dpi=200, bbox_inches=\"tight\", pad_inches=0)\nplt.show()",
"_____no_output_____"
],
[
"full_pretrain_test_perf = []\n\n# open the file with the performances over 100 epochs using the pretrained model\nwith open(\"../50_episodes_1_9/run_1/full_pretrain_test_performances.json\", \"r\") as full_pretrain_test_perf_file:\n full_pretrain_test_perf.extend(json.load(full_pretrain_test_perf_file))\n \n# open the file with the performances over 100 epochs using the pretrained model\nwith open(\"../50_episodes_1_9/run_2/full_pretrain_test_performances.json\", \"r\") as full_pretrain_test_perf_file:\n full_pretrain_test_perf.extend(json.load(full_pretrain_test_perf_file))\n\n# open the file with the performances over 100 epochs using the pretrained model\nwith open(\"../50_episodes_1_9/run_3/full_pretrain_test_performances.json\", \"r\") as full_pretrain_test_perf_file:\n full_pretrain_test_perf.extend(json.load(full_pretrain_test_perf_file))\n \n \n\nfull_scratch_test_perf = []\n \n# open the file with the performances over 100 epochs using the scratch model\nwith open(\"../50_episodes_1_9/run_1/full_scratch_test_performances.json\", \"r\") as full_scratch_test_perf_file:\n full_scratch_test_perf.extend(json.load(full_scratch_test_perf_file))\n \n# open the file with the performances over 100 epochs using the scratch model\nwith open(\"../50_episodes_1_9/run_2/full_scratch_test_performances.json\", \"r\") as full_scratch_test_perf_file:\n full_scratch_test_perf.extend(json.load(full_scratch_test_perf_file))\n \n# open the file with the performances over 100 epochs using the scratch model\nwith open(\"../50_episodes_1_9/run_3/full_scratch_test_performances.json\", \"r\") as full_scratch_test_perf_file:\n full_scratch_test_perf.extend(json.load(full_scratch_test_perf_file))\n \n\n\n# no_warm_full_scratch_test_perf = []\n \n# # open the file with the performances over 100 epochs using the scratch model\n# with open(\"../50_episodes_1_9/run_1/no_warm_up_full_scratch_test_performances.json\", \"r\") as no_warm_full_scratch_test_perf_file:\n# no_warm_full_scratch_test_perf.extend(json.load(no_warm_full_scratch_test_perf_file))\n\n\n \nprint(len(full_scratch_test_perf))\n# print(len(warm_up_full_pretrain_test_perf))\nprint(len(full_scratch_test_perf))\n# print(len(no_warm_full_scratch_test_perf))",
"160\n160\n"
],
[
"# they have to be in columns, not in rows\n\nfull_pretrain_test_perf_mean = []\n# warm_up_full_pretrain_test_perf_mean = []\nfull_scratch_test_perf_mean = []\n# no_warm_full_scratch_test_perf_mean = []\n\nfor i in range(160):\n full_pretrain_test_perf_mean.append([full_pretrain_test_perf[i][str(j)] for j in range(50)])\n # warm_up_full_pretrain_test_perf_mean.append([warm_up_full_pretrain_test_perf[i][str(j)] for j in range(50)])\n full_scratch_test_perf_mean.append([full_scratch_test_perf[i][str(j)] for j in range(50)])\n # no_warm_full_scratch_test_perf_mean.append([no_warm_full_scratch_test_perf[i][str(j)] for j in range(50)])",
"_____no_output_____"
],
[
"plt.figure(figsize=(15, 9))\nsns.set(font_scale=3)\nsns.set_style(\"whitegrid\")\n\n\ndata = np.asarray(full_pretrain_test_perf_mean)\nax = sns.tsplot(data=data, ci=[85], color='blue', marker='d', markersize=15, markevery=5, legend=True, condition=\"transfer learning, no warm-start\")\n\ndata = np.asarray(full_scratch_test_perf_mean)\nax = sns.tsplot(data=data, ci=[90], color='red', marker='X', markersize=15, markevery=5, linewidth=3.0, legend=True, condition=\"no transfer learning, warm-start\")\n\nax.set_xlabel('Number of Epochs', weight='bold', size=35)\nax.set_ylabel('Success Rate', weight='bold', size=35)\nsns.plt.title('Learning curve over testing data set', weight='bold', size=35)\n\nplt.xlim((0,50))\nplt.ylim((0, 0.35))\nplt.setp(ax.get_legend().get_texts(), fontsize=\"35\")\nplt.savefig('learning_curve_testing_data_set_200_reps_volor.png', dpi=200, bbox_inches=\"tight\", pad_inches=0)\nplt.show()",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec653e74f191f650c1601f30f0bc6b82512a034e | 12,332 | ipynb | Jupyter Notebook | Chapter 09 - Pipelines/pipelines.ipynb | avinash-mishra/Data-Science-Essentials | cc39a78a16ef2e160b16d6eb31a6041f951c15d9 | [
"MIT"
]
| 3 | 2020-03-19T11:01:52.000Z | 2020-11-21T11:29:49.000Z | Chapter 09 - Pipelines/pipelines.ipynb | avinash-mishra/Data-Science-Essentials | cc39a78a16ef2e160b16d6eb31a6041f951c15d9 | [
"MIT"
]
| null | null | null | Chapter 09 - Pipelines/pipelines.ipynb | avinash-mishra/Data-Science-Essentials | cc39a78a16ef2e160b16d6eb31a6041f951c15d9 | [
"MIT"
]
| null | null | null | 27.343681 | 260 | 0.514839 | [
[
[
"empty"
]
]
]
| [
"empty"
]
| [
[
"empty"
]
]
|
ec6540afb4e4da2fba3f244c31d50b585b0b4dcf | 307,192 | ipynb | Jupyter Notebook | _notebooks/2020-12-26-Improved_Code_Commenter.ipynb | ncoop57/i-am-a-nerd | d8f6313b5dd230cae52107b9a94815004542def7 | [
"Apache-2.0"
]
| 17 | 2020-03-02T20:08:21.000Z | 2022-03-30T06:49:26.000Z | _notebooks/2020-12-26-Improved_Code_Commenter.ipynb | ncoop57/i-am-a-nerd | d8f6313b5dd230cae52107b9a94815004542def7 | [
"Apache-2.0"
]
| 10 | 2020-03-08T17:18:52.000Z | 2022-02-26T06:49:00.000Z | _notebooks/2020-12-26-Improved_Code_Commenter.ipynb | ncoop57/i-am-a-nerd | d8f6313b5dd230cae52107b9a94815004542def7 | [
"Apache-2.0"
]
| 2 | 2021-01-23T09:22:45.000Z | 2021-01-29T13:22:34.000Z | 50.9355 | 13,950 | 0.554162 | [
[
[
"# Improved Code Summarization\n> \"In this tutorial you'll learn about how to apply state of the art summarization models to source code!\"\n\n- toc: true\n- badges: true\n- comments: true\n- categories: [code, summarization, deep-learning, seq2seq]\n- image: images/code_commenting.png",
"_____no_output_____"
],
[
"# About\nHi there, in this post you'll learn how to finetune the a RoBERT based model that's been trained on code data to automatically generate comments for code!\n\nWe will be focusing on the Java programming language, but you can apply the same techniques in this post for any programming language that interests you. Additionally, you'll see how to incorporate this code commenter into a [VSCode](https://code.visualstudio.com/) extension so that you can generate comments for code snippets you highlight:\n\n(Insert GIF of tool working)\n\nAs always, we'll start with a bit of background of the data and model we are using, but feel free to skip if you want to get straight to the awesomeness ;). Alright, let's GO!",
"_____no_output_____"
],
[
"# Background\n\n## Data\nWe will be using the awesome [CodeSearchNet](https://github.com/github/codesearchnet) Challenge dataset, which contains millions of pairs of methods and their docstrings for a large variety of programming languages. The dataset was initially constructed for evaluating how well different approaches perform at searching for code. However, we can easily repurpose it for us and lucky for us, the awesome authors did an awesome job collecting, documenting, and cleaning the data.\n\nWe'll be performing a bit more cleaning and formatting of the data as well as adding some more examples. These examples won't be method/docstring pairs, but code snippet/inline comment pairs. This allows our model to generate comments for arbitrary code snippets that a developer may want to document instead of just generating the docstring of a method.\n\n## CodeBERT\nThe pretrained model we will be finetuning comes from the awesome paper from Microsoft's research division aptly named [CodeBERT: A Pre-Trained Model for Programming and Natural Languages](https://arxiv.org/abs/2002.08155). This model also used the CodeSearchNet challenge dataset, but instead of using it to generate comments it used to teach a RoBERTa based model to represent code and natural language in a useful way. This practice of eaching these large language models to represent text in a useful way is common practice now since these representations have been shown to be helpful in finetuning these models on other tasks. The CodeBERT paper showed these representations are helpful by finetuning them on the programming task of code search and comment generation, exactly what we will be doing! The difference between their comment generation task and ours is that we will do a bit more preprocessing and our model will be able to generate inline comments of code snippets and not just method level comments.\n\nSo, how does CodeBERT learn these representations? It combines two different training objectives that's been shown to be useful for natural language. The Masked Language Modeling objective (MLM), which is from the original [BERT](https://arxiv.org/abs/1810.04805) paper, and Replaced Token Detection (RTD) objective, which is from the [ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators](https://arxiv.org/abs/2003.10555) paper. The MLM objective is where we randomly mask out parts of the text that we feed into the model and ask the model to predict those masked out pieces. The RTD objective is where random tokens in the text are replaced and the model has to determine which of these tokens are replaced. However, to make it harder for the model, these replaced tokens attempt to be plausible alternatives and not just random words. The CodeBERT model actually used a n-gram based model to generate these alternatives where as the ELECTRA paper used a small BERT based model.\n\n (From ELECTRA Paper)\n\nInstead of using only natural language to apply these training objectives to, CodeBERT used code and docstrings. This allowed the CodeBERT model to learn a useful representation of code that could be used for other tasks.\n\nAlright with that quick background knowledge down, lets get into actually finetuning our model!\n",
"_____no_output_____"
]
],
[
[
"! nvidia-smi",
"Thu Jan 14 20:43:12 2021 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 460.27.04 Driver Version: 418.67 CUDA Version: 10.1 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n| N/A 42C P8 11W / 70W | 0MiB / 15079MiB | 0% Default |\n| | | ERR! |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n"
]
],
[
[
"# Data",
"_____no_output_____"
],
[
"First we'll install the necessary packages and download our data!",
"_____no_output_____"
]
],
[
[
"# collapse\n# Download and install the necessary dependencies\n! pip install -q torch==1.4.0 -f https://download.pytorch.org/whl/cu101/torch_stable.html\n! pip install -q transformers==3.5.0 fast-trees\n\n! git clone -q https://github.com/microsoft/CodeXGLUE.git\n\n# Download the CodeSearchNet Challenge dataset for the Java programming language\n! wget -q https://s3.amazonaws.com/code-search-net/CodeSearchNet/v2/java.zip\n! unzip -qq java.zip",
"\u001b[K |████████████████████████████████| 753.4MB 21kB/s \n\u001b[31mERROR: torchvision 0.8.1+cu101 has requirement torch==1.7.0, but you'll have torch 1.4.0 which is incompatible.\u001b[0m\n\u001b[K |████████████████████████████████| 1.3MB 12.6MB/s \n\u001b[K |████████████████████████████████| 890kB 51.7MB/s \n\u001b[K |████████████████████████████████| 2.9MB 50.3MB/s \n\u001b[K |████████████████████████████████| 1.1MB 61.3MB/s \n\u001b[K |████████████████████████████████| 112kB 64.8MB/s \n\u001b[K |████████████████████████████████| 163kB 60.4MB/s \n\u001b[K |████████████████████████████████| 71kB 11.8MB/s \n\u001b[?25h Building wheel for sacremoses (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for tree-sitter (setup.py) ... \u001b[?25l\u001b[?25hdone\n"
]
],
[
[
"Next let's read in our data and since these models take a long time to train, we will only select a subset of the data.",
"_____no_output_____"
]
],
[
[
"# collapse\nimport pandas as pd\n\nfrom pathlib import Path\nfrom typing import List, Optional\n\n# Code from CodeSearchNetChallenge: https://github.com/github/CodeSearchNet/blob/master/notebooks/ExploreData.ipynb\ndef jsonl_list_to_dataframe(file_list, columns=['code', 'docstring']):\n \"\"\"Load a list of jsonl.gz files into a pandas DataFrame.\"\"\"\n return pd.concat([pd.read_json(f,\n orient='records', \n compression='gzip',\n lines=True)[columns] \n for f in file_list], sort=False)\n\ndef get_dfs(path: Path) -> List[pd.DataFrame]:\n \"\"\"Grabs the different data splits and converts them into dataframes\"\"\"\n dfs = []\n for split in [\"train\", \"valid\", \"test\"]:\n files = sorted((path/split).glob(\"**/*.gz\"))\n df = jsonl_list_to_dataframe(files).rename(columns = {'code': 'mthd', 'docstring': 'cmt'})\n dfs.append(df)\n \n return dfs\n\npath = Path('.')\ndf_trn, df_val, df_tst = get_dfs(path/\"java/final/jsonl\")\nsample = 0.01\ndf_trn = df_trn.sample(frac = sample)\ndf_val = df_val.sample(frac = sample)\ndf_tst = df_tst.sample(frac = sample)\n\nlen(df_trn), len(df_val), len(df_tst)",
"_____no_output_____"
]
],
[
[
"Let's see how the data looks. As shown, we have the data in a good format with one column all of the methods (input into the model) and the other all of the comments (output of the model).",
"_____no_output_____"
]
],
[
[
"df_trn.head()",
"_____no_output_____"
]
],
[
[
"## Data Cleaning",
"_____no_output_____"
],
[
"Now, that we have the data, let's clean it! First, we'll remove any non-ascii characters to simplify the problem so that the model only has to think about generating English comments.",
"_____no_output_____"
]
],
[
[
"# collapse\n# From https://stackoverflow.com/a/27084708/5768407\ndef is_ascii(s):\n '''\n Determines if the given string contains only ascii characters\n\n :param s: the string to check\n :returns: whether or not the given string contains only ascii characters\n '''\n try:\n s.encode(encoding='utf-8').decode('ascii')\n except UnicodeDecodeError:\n return False\n else:\n return True\n\ndf_trn = df_trn[df_trn['mthd'].apply(lambda x: is_ascii(x))]\ndf_val = df_val[df_val['mthd'].apply(lambda x: is_ascii(x))]\ndf_tst = df_tst[df_tst['mthd'].apply(lambda x: is_ascii(x))]\n\ndf_trn = df_trn[df_trn['cmt'].apply(lambda x: is_ascii(x))]\ndf_val = df_val[df_val['cmt'].apply(lambda x: is_ascii(x))]\ndf_tst = df_tst[df_tst['cmt'].apply(lambda x: is_ascii(x))]\n\nlen(df_trn), len(df_val), len(df_tst)",
"_____no_output_____"
]
],
[
[
"Next, we'll remove any outdated comments by checking to see if the [JavaDoc](https://www.oracle.com/java/technologies/javase/javadoc.html)'s parameter list is different from the method's parameter list. This also will remove pairs where the docstring doesn't actually document the parameters, which probably means the pairs are poor quality (you should always properly document your code :) ).",
"_____no_output_____"
]
],
[
[
"# collapse\nimport re\n\nfrom fast_trees.core import FastParser\n\nparser = FastParser('java')\n\ndef get_cmt_params(cmt: str) -> List[str]:\n '''\n Grabs the parameter identifier names from a JavaDoc comment\n\n :param cmt: the comment to extract the parameter identifier names from\n :returns: an array of the parameter identifier names found in the given comment\n '''\n params = re.findall('@param+\\s+\\w+', cmt)\n param_names = []\n for param in params:\n param_names.append(param.split()[1])\n \n return param_names\n\ndef is_outdated(mthd: str, cmt: str, parser: FastParser) -> bool:\n '''\n Determines if a given method and comment are outdated by checking\n if the method's parameter identifier names match the comment's\n\n :param mthd: the method to compare against its corresponding comment\n :param cmt: the comment to compare against its corresponding method\n :param parser: parser for easily getting the parameter identifier names from a given method\n :returns: wheather or not a given comment is outdated compared to its corresponding method\n '''\n try:\n mthd_params = parser.get_params(mthd)\n except:\n return False\n \n cmt_params = get_cmt_params(cmt)\n\n return mthd_params != cmt_params\n\ndf_trn = df_trn[\n ~df_trn.apply(\n lambda x: is_outdated(x.mthd, x.cmt, parser), axis = 1\n )\n]\ndf_val = df_val[\n ~df_val.apply(\n lambda x: is_outdated(x.mthd, x.cmt, parser), axis = 1\n )\n]\ndf_tst = df_tst[\n ~df_tst.apply(\n lambda x: is_outdated(x.mthd, x.cmt, parser), axis = 1\n )\n]\n\nlen(df_trn), len(df_val), len(df_tst)",
"Downloading repo https://github.com/tree-sitter/tree-sitter-java to /usr/local/lib/python3.6/dist-packages/fast_trees/tree-sitter-java.\n"
]
],
[
[
"Now we'll add in the additional pairs of code snippets/inline comments.\n\nP.S. One thing to note with adding these pairs is that the inline comments will appear twice in the datasets. The first in the method where the inline comment came from and the second in the target for the code snippet. This is only a problem for the training set since it allows for the model to cheat by simply remembering the inline comment from the example method it came from. However, in my testing, I found this to not be an issue and the model seems to still work well despite this problem. Just thought ya should know :).",
"_____no_output_____"
]
],
[
[
"# collapse\nfrom tqdm.auto import tqdm\n\ndef get_inline_pairs(mthd):\n '''\n Get all pairs of inline comments and corresponding code snippets\n\n :param mthd: the method to retrieve the pairs of comments and corresponding\n code snippets from\n :returns: all pairs of comments and corresponding code snippets\n '''\n pairs = [[]]\n\n comment = False\n bracket = False\n indent_lvl = -1\n lines = mthd.split(\"\\n\")\n for line in lines:\n if \"//\" in line and not bracket and not \"://\" in line:\n pairs[-1].append(line)\n if '\\t' in line:\n indent_lvl = line.count('\\t')\n else:\n indent_lvl = line.split(\"//\")[0].count(' ')\n comment = True\n bracket = False\n elif comment:\n if '{' in line and not bracket:\n bracket = True\n pairs[-1].append(line)\n elif '}' in line:\n line_indent = -1\n if '\\t' in line:\n line_indent = line.count('\\t')\n else:\n line_indent = line.split(\"//\")[0].count(' ')\n if indent_lvl == line_indent:\n pairs[-1].append(line)\n if not bracket:\n pairs.append([])\n comment = False\n bracket = False\n elif line.isspace() or line == '' and not bracket:\n pairs.append([])\n comment = False\n else:\n pairs[-1].append(line)\n \n # Convert pairs into proper format of (code snippet, inline comment) dataframe\n code_snippets = []\n comments = []\n for pair in pairs:\n if pair and len(pair) < 5:\n code = []\n comment = []\n skip = False\n for line in pair:\n if \"TODO\" in line: break\n if \"//\" in line:\n comment.append(line.replace('//', ''))\n else:\n code.append(line)\n if len(code) > 1 and len(comment) > 0:\n code_snippets.append('\\n'.join(code))\n comments.append('\\n'.join(comment))\n\n pairs = pd.DataFrame(zip(code_snippets, comments), columns = [\"mthd\", \"cmt\"])\n return pairs\n\n\ndef add_inline(df: pd.DataFrame) -> pd.DataFrame:\n '''\n Helper function to go through all methods in a given dataframe and add all\n pairs of inline comments and corresponding code snippets\n\n :param df: the dataframe to retrieve and add all pairs of inline comments\n and corresponding code snippets to\n :returns: a new dataframe with the newly added pairs of inline comments and\n corresponding code snippets\n '''\n new_df = df[df['mthd'].str.contains(\"//\")]\n all_pairs = []\n for mthd in tqdm(new_df.mthd.values):\n pairs = get_inline_pairs(mthd)\n all_pairs.append(pairs)\n\n df_pairs = pd.concat([pairs for pairs in all_pairs])\n return pd.concat([df, df_pairs])\n\ndf_trn = add_inline(df_trn)\ndf_val = add_inline(df_val)\ndf_tst = add_inline(df_tst)\n\nlen(df_trn), len(df_val), len(df_tst)",
"_____no_output_____"
]
],
[
[
"We'll also remove pairs where the size of the code is smaller than the comment. This is because I found that in these cases the comments contain a bunch of extra information that the model won't have access to such as how the method is being used by other methods in the software system.",
"_____no_output_____"
]
],
[
[
"# collapse\ndf_trn = df_trn[df_trn.apply(lambda row: len(row.mthd) > len(row.cmt), axis = 1)]\ndf_val = df_val[df_val.apply(lambda row: len(row.mthd) > len(row.cmt), axis = 1)]\ndf_tst = df_tst[df_tst.apply(lambda row: len(row.mthd) > len(row.cmt), axis = 1)]\n\nlen(df_trn), len(df_val), len(df_tst)",
"_____no_output_____"
]
],
[
[
"Next, we'll remove any examples that have the special \\<code> tag since these also tend to contain extra information that the model doesn't have a good hope of generating.",
"_____no_output_____"
]
],
[
[
"# collapse\ndef has_code(cmt: str) -> bool:\n '''\n Determinine if the given comment contains the HTML <code> tag\n\n :param cmt: the comment to check whether it contains the HTML <code> tag\n :returns: whether or not the given comment contains the HTML <code> tag\n '''\n if '<code>' in cmt: return True\n else: return False\n\ndf_trn = df_trn[~df_trn['cmt'].apply(lambda x: has_code(x))]\ndf_val = df_val[~df_val['cmt'].apply(lambda x: has_code(x))]\ndf_tst = df_tst[~df_tst['cmt'].apply(lambda x: has_code(x))]\n\nlen(df_trn), len(df_val), len(df_tst)",
"_____no_output_____"
]
],
[
[
"Lastly, we're gonna remove the JavaDoc parts of the comments other than the description since that is really all we care about. The other pieces of information can usually be autogenerated or may require external knowledge to document them.",
"_____no_output_____"
]
],
[
[
"# collapse\ndef remove_jdocs(df: pd.DataFrame) -> pd.DataFrame:\n '''\n Remove the JavaDocs leaving only the description of the comment\n\n :param df: the pandas dataframe to remove the JavaDocs from\n :returns: a new pandas dataframe with the JavaDocs removed\n '''\n methods = []\n comments = []\n for i, row in tqdm(list(df.iterrows())):\n comment = row[\"cmt\"]\n # Remove {} text in comments from https://stackoverflow.com/questions/14596884/remove-text-between-and-in-python/14598135\n comment = re.sub(\"([\\{\\[]).*?([\\)\\}])\", '', comment)\n \n \n cleaned = []\n for line in comment.split('\\n'):\n if \"@\" in line: break\n cleaned.append(line)\n comments.append('\\n'.join(cleaned))\n methods.append(row[\"mthd\"])\n new_df = pd.DataFrame(zip(methods, comments), columns = [\"mthd\", \"cmt\"])\n\n return new_df\n\ndf_trn = remove_jdocs(df_trn);\ndf_val = remove_jdocs(df_val);\ndf_tst = remove_jdocs(df_tst);",
"_____no_output_____"
]
],
[
[
"Almost there! In this step, we'll remove any HTML tags from the comments so the model doesn't have to also learn HTML. Bless those that do...",
"_____no_output_____"
]
],
[
[
"# collapse\ndef clean_html(cmt: str) -> str:\n '''\n Remove any HTML tags from a given comment\n\n :param cmt: the comment to remove any HTML tags from\n :returns: the comment with any HTML tags removed\n '''\n result = re.sub(r\"<.?span[^>]*>|<.?code[^>]*>|<.?p[^>]*>|<.?hr[^>]*>|<.?h[1-3][^>]*>|<.?a[^>]*>|<.?b[^>]*>|<.?blockquote[^>]*>|<.?del[^>]*>|<.?dd[^>]*>|<.?dl[^>]*>|<.?dt[^>]*>|<.?em[^>]*>|<.?i[^>]*>|<.?img[^>]*>|<.?kbd[^>]*>|<.?li[^>]*>|<.?ol[^>]*>|<.?pre[^>]*>|<.?s[^>]*>|<.?sup[^>]*>|<.?sub[^>]*>|<.?strong[^>]*>|<.?strike[^>]*>|<.?ul[^>]*>|<.?br[^>]*>\", \"\", cmt)\n return result\n\ndf_trn.cmt = df_trn.cmt.apply(clean_html)\ndf_val.cmt = df_val.cmt.apply(clean_html)\ndf_tst.cmt = df_tst.cmt.apply(clean_html)",
"_____no_output_____"
]
],
[
[
"FINALLY!! We'll make everything lower case, remove extra whitespace, remove empty comments, and remove duplicates.",
"_____no_output_____"
]
],
[
[
"# collapse\ndf_trn = df_trn.applymap(lambda x: ' '.join(x.split()).lower())\ndf_val = df_val.applymap(lambda x: ' '.join(x.split()).lower())\ndf_tst = df_tst.applymap(lambda x: ' '.join(x.split()).lower())\n\ndf_trn = df_trn[~(df_trn['cmt'] == '')]\ndf_val = df_val[~(df_val['cmt'] == '')]\ndf_tst = df_tst[~(df_tst['cmt'] == '')]\n\ndf_trn = df_trn[~df_trn['cmt'].duplicated()]\ndf_val = df_val[~df_val['cmt'].duplicated()]\ndf_tst = df_tst[~df_tst['cmt'].duplicated()]\n\nlen(df_trn), len(df_val), len(df_tst)",
"_____no_output_____"
]
],
[
[
"Now let's see what the data looks like.",
"_____no_output_____"
]
],
[
[
"df_trn.head()",
"_____no_output_____"
]
],
[
[
"## Data Exploring\n\nAs good Data Scientists, we will also explore our data to uncover any secrets. Data can be sneaky like that :).",
"_____no_output_____"
]
],
[
[
"# collapse\nimport numpy as np\n\nfrom collections import Counter\nfrom statistics import mean, median, stdev\nfrom transformers import AutoTokenizer\n\ndef get_counter(df: pd.DataFrame, tokenizer: AutoTokenizer, col: str) -> Counter:\n '''\n Get the counts for each token in a given pandas dataframe column\n\n :param df: the pandas dataframe to get the counts of tokens from\n :param tokenizer: the tokenizer to use for tokenizing the rows in the pandas\n dataframe\n :param col: the column to grab rows from when tokenizing\n :returns: the counts of each token in the given pandas dataframe\n column\n '''\n toks = []\n for i, row in df.iterrows():\n toks.extend(tokenizer.tokenize(row[col]))\n \n cnt = Counter()\n for tok in toks:\n cnt[tok] += 1 \n return cnt\n\ntokenizer = AutoTokenizer.from_pretrained('microsoft/codebert-base')\nmthd_cnt = get_counter(df_trn, tokenizer, 'mthd')\ncmt_cnt = get_counter(df_trn, tokenizer, 'cmt')\nmthd_lens = df_trn.mthd.apply(lambda x: len(tokenizer.tokenize(x))).values\ncmt_lens = df_trn.cmt.apply(lambda x: len(tokenizer.tokenize(x))).values\nmax_mthd_len = int(np.quantile(mthd_lens, 0.95))\nmax_cmt_len = int(np.quantile(cmt_lens, 0.95))",
"_____no_output_____"
],
[
"# collapse\nimport matplotlib.pyplot as plt\n\ndef plot_counts(counts:Counter, top_k: Optional[int] = 30):\n '''\n Plot a bar chart of the most common tokens\n\n :param counts: the counts of each token\n :param top_k: the number of tokens to display in the plot\n '''\n labels, values = zip(*counts.most_common()[:top_k])\n\n indexes = np.arange(len(labels))\n width = 1\n plt.figure(num=None, figsize=(22, 4), dpi=60, facecolor='w', edgecolor='k')\n plt.bar(indexes, values, width)\n plt.xticks(indexes + width * 0.5, labels)\n plt.show()",
"_____no_output_____"
]
],
[
[
"Let's look at the most common tokens in our methods and comments.",
"_____no_output_____"
]
],
[
[
"plot_counts(mthd_cnt, top_k = 30)\nplot_counts(cmt_cnt, top_k = 30)",
"_____no_output_____"
],
[
"# collapse\ndef plot_hist(lens: List[int], n_bins: Optional[int] = 50):\n '''\n Plot a histogram of the given number of tokens in a column \n\n :param lens: the number of tokens in a column\n :param n_bins: the number of bins to sort the number of tokens into\n '''\n n, bins, patches = plt.hist(lens, n_bins, facecolor='blue', alpha=0.9)\n plt.show()",
"_____no_output_____"
]
],
[
[
"Now, let's look at the distribution of method and comment lengths.",
"_____no_output_____"
]
],
[
[
"print(mean(mthd_lens), median(mthd_lens), stdev(mthd_lens))\nplot_hist(mthd_lens)\nprint(mean(cmt_lens), median(cmt_lens), stdev(cmt_lens))\nplot_hist(cmt_lens)",
"177 102.0 283.76574846164925\n"
]
],
[
[
"Using this new information on the length distribution, we can remove outliers by filter by lengths of methods that fall outside of 95th percentile (chosen for completely arbitrary reasons)!",
"_____no_output_____"
]
],
[
[
"# collapse\ndef filter_len(\n row: pd.Series, tokenizer: AutoTokenizer, mthd_len: int, cmt_len: int\n ) -> bool:\n '''\n Determine if a given panda dataframe row has a method or comment that has\n more tokens than max length\n\n :param row: the row to check if it has a method or comment that is too long\n :param tokenizer: the tokenizer to tokenize a method or comment\n :param mthd_len: the max number of tokens a method can have\n :param cmt_len: the max number of tokens a comment can have\n :returns: whether or not the given row have a method or comment that have\n more tokens than a max length\n '''\n return len(tokenizer.tokenize(row.mthd)) < mthd_len and len(tokenizer.tokenize(row.cmt)) < cmt_len\n\ndf_trn = df_trn[df_trn.apply(\n lambda row: filter_len(\n row, tokenizer, max_mthd_len,\n max_cmt_len\n ), axis = 1\n)]\ndf_val = df_val[df_val.apply(\n lambda row: filter_len(\n row, tokenizer, max_mthd_len,\n max_cmt_len\n ), axis = 1\n)]\ndf_tst = df_tst[df_tst.apply(\n lambda row: filter_len(\n row, tokenizer, max_mthd_len,\n max_cmt_len\n ), axis = 1\n)]\n\nlen(df_trn), len(df_val), len(df_tst)",
"_____no_output_____"
],
[
"max_mthd_len, max_cmt_len",
"_____no_output_____"
]
],
[
[
"We could do a lot more exploring of our data as the above exploration was the bare minimum. As an exercise, I suggest for you to explore the data on your own using whatever means necessary!",
"_____no_output_____"
],
[
"# Training\n\nNow that we have our data processed and in a format we like, let's go ahead and start training! To accomplish this we will be using code from the awesome [CodeXGLUE](https://github.com/microsoft/CodeXGLUE) repository. This repository is similar to the NLP equivalent GLUE benchmarks where a ton of awesome code related benchmarks are standardized and put into one place for the community to use! They have a ton of interesting ones and I highly suggest looking through their repo if you are interested in other code related tasks.",
"_____no_output_____"
]
],
[
[
"cd ./CodeXGLUE/Code-Text/code-to-text/code",
"/content/CodeXGLUE/Code-Text/code-to-text/code\n"
],
[
"# hide\n! mkdir java",
"_____no_output_____"
]
],
[
[
"Okay, I lied, sorry :(. One last processing step is required of our data, which is to just output the data into the structure that the awesome CodeXGLUE Code-Text benchmark expects.",
"_____no_output_____"
]
],
[
[
"# collapse\nimport json\n\ndf_trn['code_tokens'] = df_trn.mthd.apply(lambda x: x.split())\ndf_trn['docstring_tokens'] = df_trn.cmt.apply(lambda x: x.split())\nwith open('java/train.jsonl','w') as f:\n for _, row in df_trn.iterrows():\n f.write(json.dumps(row.to_dict()) + '\\n')\n\ndf_val['code_tokens'] = df_val.mthd.apply(lambda x: x.split())\ndf_val['docstring_tokens'] = df_val.cmt.apply(lambda x: x.split())\nwith open('java/valid.jsonl','w') as f:\n for _, row in df_val.iterrows():\n f.write(json.dumps(row.to_dict()) + '\\n')\n\ndf_tst['code_tokens'] = df_tst.mthd.apply(lambda x: x.split())\ndf_tst['docstring_tokens'] = df_tst.cmt.apply(lambda x: x.split())\nwith open('java/test.jsonl','w') as f:\n for _, row in df_tst.iterrows():\n f.write(json.dumps(row.to_dict()) + '\\n')",
"_____no_output_____"
],
[
"lang = 'java' # programming language\nlr = 5e-5\nbatch_size = 8 # change depending on the GPU Colab gives you\nbeam_size = 10\nsource_length = 256\ntarget_length = max_cmt_len\ndata_dir = '.'\noutput_dir = f'model/{lang}'\ntrain_file = f'{data_dir}/{lang}/train.jsonl'\ndev_file = f'{data_dir}/{lang}/valid.jsonl'\nepochs = 10 \npretrained_model = 'microsoft/codebert-base'\n\n! python run.py \\\n --do_train \\\n --do_eval \\\n --do_lower_case \\\n --model_type roberta \\\n --model_name_or_path {pretrained_model} \\\n --train_filename {train_file} \\\n --dev_filename {dev_file} \\\n --output_dir {output_dir} \\\n --max_source_length {source_length} \\\n --max_target_length {target_length} \\\n --beam_size {beam_size} \\\n --train_batch_size {batch_size} \\\n --eval_batch_size {batch_size} \\\n --learning_rate {lr} \\\n --num_train_epochs {epochs}",
"2021-01-14 20:49:04.427229: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n01/14/2021 20:49:06 - INFO - __main__ - Namespace(adam_epsilon=1e-08, beam_size=10, config_name='', dev_filename='./java/valid.jsonl', do_eval=True, do_lower_case=True, do_test=False, do_train=True, eval_batch_size=8, eval_steps=-1, gradient_accumulation_steps=1, learning_rate=5e-05, load_model_path=None, local_rank=-1, max_grad_norm=1.0, max_source_length=256, max_steps=-1, max_target_length=48, model_name_or_path='microsoft/codebert-base', model_type='roberta', no_cuda=False, num_train_epochs=10, output_dir='model/java', seed=42, test_filename=None, tokenizer_name='', train_batch_size=8, train_filename='./java/train.jsonl', train_steps=-1, warmup_steps=0, weight_decay=0.0)\n01/14/2021 20:49:06 - WARNING - __main__ - Process rank: -1, device: cuda, n_gpu: 1, distributed training: False\n01/14/2021 20:49:06 - INFO - filelock - Lock 140293701425752 acquired on /root/.cache/torch/transformers/08477dcecf305af90229876aa01e4b0f3594dc8c638985a72277f39ea7d8d0c3.7fb14267817b1d26bb44a57cd5aa2fc003c25e87b75ef77e9c55c4804675b4cf.lock\nDownloading: 100% 499M/499M [00:06<00:00, 73.5MB/s]\n01/14/2021 20:49:13 - INFO - filelock - Lock 140293701425752 released on /root/.cache/torch/transformers/08477dcecf305af90229876aa01e4b0f3594dc8c638985a72277f39ea7d8d0c3.7fb14267817b1d26bb44a57cd5aa2fc003c25e87b75ef77e9c55c4804675b4cf.lock\n01/14/2021 20:49:30 - INFO - __main__ - *** Example ***\n01/14/2021 20:49:30 - INFO - __main__ - idx: 0\n01/14/2021 20:49:30 - INFO - __main__ - source_tokens: ['<s>', 'public', '_static', '_void', '_check', 'j', 'av', 'ain', 'ternal', 'access', '(', 'il', 'og', 'ger', '_logger', ')', '_{', '_if', '_(', 'log', 'ger', '_==', '_null', '_||', '_!', 'java', 'version', '.', 'is', 'at', 'le', 'ast', '(', 'java', 'version', '.', 'java', '_', '9', '))', '_{', '_//', '_older', '_java', '_versions', '_are', '_fine', '_with', '_the', '_reflection', '_return', ';', '_}', '_map', '<', 'string', ',', '_package', 'access', 'requ', 'irement', '[]', '>', '_requirements', '_=', '_new', '_tre', 'em', 'ap', '<', 'string', ',', '_package', 'access', 'requ', 'irement', '[]', '>', '();', '_requirements', '.', 'put', '(\"', 'java', '.', 'base', '\",', '_new', '_package', 'access', 'requ', 'irement', '[]', '_{', '_create', 'requ', 'irement', '(', 'false', ',', '_\"', 'j', 'dk', '.', 'internal', '.', 'ref', '\"),', '_create', 'requ', 'irement', '(', 'true', ',', '_\"', 'java', '.', 'lang', '\"),', '_create', 'requ', 'irement', '(', 'true', ',', '_\"', 'java', '.', 'n', 'io', '\"),', '_create', 'requ', 'irement', '(', 'true', ',', '_\"', 'sun', '.', 'n', 'io', '.', 'ch', '\")', '_});', '_requirements', '.', 'put', '(\"', 'j', 'dk', '.', 'management', '\",', '_get', 'j', 'dk', 'management', 'requ', 'irements', '());', '_requirements', '.', 'put', '(\"', 'java', '.', 'management', '\",', '_new', '_package', 'access', 'requ', 'irement', '[]', '_{', '_create', 'requ', 'irement', '(', 'true', ',', '_\"', 'sun', '.', 'management', '\")', '_});', '_check', 'package', 'requ', 'irements', '(', 'log', 'ger', ',', '_requirements', ');', '_}', '</s>']\n01/14/2021 20:49:30 - INFO - __main__ - source_ids: 0 15110 25156 13842 1649 267 1469 1851 46378 28300 1640 718 2154 2403 37764 43 25522 114 36 12376 2403 45994 23796 45056 27785 43830 21747 4 354 415 459 1988 1640 43830 21747 4 43830 1215 466 35122 25522 21277 2530 46900 7952 32 2051 19 5 12456 671 131 35524 5456 41552 20951 6 3737 28300 42172 34074 48992 15698 3471 5457 92 6110 991 1115 41552 20951 6 3737 28300 42172 34074 48992 15698 47006 3471 4 9179 46469 43830 4 11070 1297 92 3737 28300 42172 34074 48992 25522 1045 42172 34074 1640 22303 6 22 267 43357 4 37559 4 13043 16844 1045 42172 34074 1640 29225 6 22 43830 4 32373 16844 1045 42172 34074 1640 29225 6 22 43830 4 282 1020 16844 1045 42172 34074 1640 29225 6 22 21381 4 282 1020 4 611 8070 47771 3471 4 9179 46469 267 43357 4 14668 1297 120 267 43357 14668 42172 48227 49291 3471 4 9179 46469 43830 4 14668 1297 92 3737 28300 42172 34074 48992 25522 1045 42172 34074 1640 29225 6 22 21381 4 14668 8070 47771 1649 46181 42172 48227 1640 12376 2403 6 3471 4397 35524 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n01/14/2021 20:49:30 - INFO - __main__ - source_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n01/14/2021 20:49:30 - INFO - __main__ - target_tokens: ['<s>', 'prints', '_warning', '_to', '_given', '_if', '_haz', 'el', 'cast', '_is', '_not', '_provided', '_a', '_sufficient', '_access', '_to', '_java', '_internal', '_packages', '_on', '_java', '_9', '_and', '_newer', '.', '</s>']\n01/14/2021 20:49:30 - INFO - __main__ - target_ids: 0 31553 2892 7 576 114 32468 523 5182 16 45 1286 10 7719 899 7 46900 3425 8368 15 46900 361 8 13964 4 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n01/14/2021 20:49:30 - INFO - __main__ - target_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n01/14/2021 20:49:30 - INFO - __main__ - *** Example ***\n01/14/2021 20:49:30 - INFO - __main__ - idx: 1\n01/14/2021 20:49:30 - INFO - __main__ - source_tokens: ['<s>', 'public', '_void', '_marsh', 'all', '(', 's', 'ct', 'e', '20', 'pl', 'use', 'mb', 'edd', 'edd', 'est', 'inations', 'ettings', '_s', 'ct', 'e', '20', 'pl', 'use', 'mb', 'edd', 'edd', 'est', 'inations', 'ettings', ',', '_protocol', 'm', 'arsh', 'all', 'er', '_protocol', 'm', 'arsh', 'all', 'er', ')', '_{', '_if', '_(', 's', 'ct', 'e', '20', 'pl', 'use', 'mb', 'edd', 'edd', 'est', 'inations', 'ettings', '_==', '_null', ')', '_{', '_throw', '_new', '_s', 'dk', 'client', 'ex', 'ception', '(\"', 'in', 'valid', '_argument', '_passed', '_to', '_marsh', 'all', '(', '...)', '\");', '_}', '_try', '_{', '_}', '_catch', '_(', 'ex', 'ception', '_e', ')', '_{', '_throw', '_new', '_s', 'dk', 'client', 'ex', 'ception', '(\"', 'un', 'able', '_to', '_marsh', 'all', '_request', '_to', '_json', ':', '_\"', '_+', '_e', '.', 'get', 'message', '(),', '_e', ');', '_}', '_}', '</s>']\n01/14/2021 20:49:30 - INFO - __main__ - source_ids: 0 15110 13842 16377 1250 1640 29 3894 242 844 2911 3698 6648 13093 13093 990 17808 48496 579 3894 242 844 2911 3698 6648 13093 13093 990 17808 48496 6 11883 119 14980 1250 254 11883 119 14980 1250 254 43 25522 114 36 29 3894 242 844 2911 3698 6648 13093 13093 990 17808 48496 45994 23796 43 25522 3211 92 579 43357 38557 3463 20900 46469 179 42679 4795 1595 7 16377 1250 1640 41137 45751 35524 860 25522 35524 2916 36 3463 20900 364 43 25522 3211 92 579 43357 38557 3463 20900 46469 879 868 7 16377 1250 2069 7 49133 35 22 2055 364 4 6460 44773 49196 364 4397 35524 35524 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n01/14/2021 20:49:30 - INFO - __main__ - source_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n01/14/2021 20:49:30 - INFO - __main__ - target_tokens: ['<s>', 'm', 'arsh', 'all', '_the', '_given', '_parameter', '_object', '.', '</s>']\n01/14/2021 20:49:30 - INFO - __main__ - target_ids: 0 119 14980 1250 5 576 43797 7626 4 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n01/14/2021 20:49:30 - INFO - __main__ - target_mask: 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n01/14/2021 20:49:30 - INFO - __main__ - *** Example ***\n01/14/2021 20:49:30 - INFO - __main__ - idx: 2\n01/14/2021 20:49:30 - INFO - __main__ - source_tokens: ['<s>', '@', 'over', 'ride', '_public', '_void', '_pref', 'etch', 'token', '(', 'final', '_file', '_token', 'file', ',', '_final', '_props', '_props', ',', '_final', '_logger', '_logger', ')', '_throws', '_had', 'oop', 'security', 'man', 'age', 'rex', 'ception', '_{', '_final', '_string', '_us', 'ert', 'op', 'roxy', '_=', '_props', '.', 'get', 'string', '(', 'job', 'properties', '.', 'user', '_', 'to', '_', 'proxy', ');', '_logger', '.', 'info', '(\"', 'getting', '_had', 'oop', '_tokens', '_based', '_on', '_props', '_for', '_\"', '_+', '_us', 'ert', 'op', 'roxy', ');', '_dop', 'ref', 'etch', '(', 'token', 'file', ',', '_props', ',', '_logger', ',', '_us', 'ert', 'op', 'roxy', ');', '_}', '</s>']\n01/14/2021 20:49:30 - INFO - __main__ - source_ids: 0 1039 2137 23167 285 13842 33284 29094 46657 1640 6156 2870 19233 21710 6 507 26504 26504 6 507 37764 37764 43 6989 56 18042 15506 397 1580 19633 20900 25522 507 6755 201 2399 1517 46963 5457 26504 4 6460 20951 1640 30056 47276 4 12105 1215 560 1215 47315 4397 37764 4 23999 46469 31315 56 18042 22121 716 15 26504 13 22 2055 201 2399 1517 46963 4397 32331 13043 29094 1640 46657 21710 6 26504 6 37764 6 201 2399 1517 46963 4397 35524 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n01/14/2021 20:49:30 - INFO - __main__ - source_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n01/14/2021 20:49:30 - INFO - __main__ - target_tokens: ['<s>', '/*', '_gets', '_had', 'oop', '_tokens', '_for', '_a', '_user', '_to', '_run', '_map', 'red', '/', 'h', 'ive', '_jobs', '_on', '_a', '_secured', '_cluster', '</s>']\n01/14/2021 20:49:30 - INFO - __main__ - target_ids: 0 49051 1516 56 18042 22121 13 10 3018 7 422 5456 2050 73 298 2088 1315 15 10 5288 18016 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n01/14/2021 20:49:30 - INFO - __main__ - target_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n01/14/2021 20:49:30 - INFO - __main__ - *** Example ***\n01/14/2021 20:49:30 - INFO - __main__ - idx: 3\n01/14/2021 20:49:30 - INFO - __main__ - source_tokens: ['<s>', '@', 'over', 'ride', '_public', '_<', 'y', '>', '_singular', 'attribute', '<', 'x', ',', '_y', '>', '_get', 'decl', 'ared', 'id', '(', 'class', '<', 'y', '>', '_param', 'class', ')', '_{', '_if', '_(', 'id', 'attribute', '_!=', '_null', ')', '_{', '_if', '_(', 'id', 'attribute', '.', 'get', 'j', 'av', 'at', 'ype', '().', 'equ', 'als', '(', 'param', 'class', ')', '_&&', '_!', 'is', 'id', 'class', ')', '_{', '_return', '_(', 'sing', 'ular', 'attribute', '<', 'x', ',', '_y', '>)', '_id', 'attribute', ';', '_}', '_}', '_on', 'error', '();', '_return', '_null', ';', '_}', '</s>']\n01/14/2021 20:49:30 - INFO - __main__ - source_ids: 0 1039 2137 23167 285 28696 219 15698 23429 49202 41552 1178 6 1423 15698 120 32639 6537 808 1640 4684 41552 219 15698 40206 4684 43 25522 114 36 808 49202 49333 23796 43 25522 114 36 808 49202 4 6460 267 1469 415 37356 49123 8198 1536 1640 46669 4684 43 48200 27785 354 808 4684 43 25522 671 36 26058 8244 49202 41552 1178 6 1423 49798 13561 49202 131 35524 35524 15 44223 47006 671 23796 131 35524 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n01/14/2021 20:49:30 - INFO - __main__ - source_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n01/14/2021 20:49:30 - INFO - __main__ - target_tokens: ['<s>', '/*', '_(', 'non', '-', 'j', 'av', 'ad', 'oc', ')', '</s>']\n01/14/2021 20:49:30 - INFO - __main__ - target_ids: 0 49051 36 13424 12 267 1469 625 1975 43 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n01/14/2021 20:49:30 - INFO - __main__ - target_mask: 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n01/14/2021 20:49:30 - INFO - __main__ - *** Example ***\n01/14/2021 20:49:30 - INFO - __main__ - idx: 4\n01/14/2021 20:49:30 - INFO - __main__ - source_tokens: ['<s>', 'public', '_void', '_sync', '(', 'bo', 'olean', '_syn', 'call', 'se', 'gments', ')', '_{', '_commit', 'log', 'se', 'gment', '_current', '_=', '_alloc', 'ator', '.', 'all', 'ocating', 'from', '();', '_for', '_(', 'commit', 'log', 'se', 'gment', '_segment', '_:', '_alloc', 'ator', '.', 'get', 'act', 'ives', 'eg', 'ments', '())', '_{', '_if', '_(!', 'sync', 'all', 'se', 'gments', '_&&', '_segment', '.', 'id', '_>', '_current', '.', 'id', ')', '_return', ';', '_segment', '.', 'sync', '();', '_}', '_}', '</s>']\n01/14/2021 20:49:30 - INFO - __main__ - source_ids: 0 15110 13842 22785 1640 3983 48547 17796 16395 1090 30237 43 25522 6225 12376 1090 10757 595 5457 42793 2630 4 1250 18106 7761 47006 13 36 42721 12376 1090 10757 2835 4832 42793 2630 4 6460 7257 3699 3733 2963 49338 25522 114 48209 45176 1250 1090 30237 48200 2835 4 808 8061 595 4 808 43 671 131 2835 4 45176 47006 35524 35524 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n01/14/2021 20:49:30 - INFO - __main__ - source_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n01/14/2021 20:49:30 - INFO - __main__ - target_tokens: ['<s>', 'forces', '_a', '_disk', '_flush', '_on', '_the', '_commit', '_log', '_files', '_that', '_need', '_it', '.', '_blocking', '.', '</s>']\n01/14/2021 20:49:30 - INFO - __main__ - target_ids: 0 34532 10 21675 24841 15 5 6225 7425 6773 14 240 24 4 8890 4 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n01/14/2021 20:49:30 - INFO - __main__ - target_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n01/14/2021 20:49:33 - INFO - __main__ - ***** Running training *****\n01/14/2021 20:49:33 - INFO - __main__ - Num examples = 2809\n01/14/2021 20:49:33 - INFO - __main__ - Batch size = 8\n01/14/2021 20:49:33 - INFO - __main__ - Num epoch = 10\nepoch 0 loss 6.8534: 100% 352/352 [02:53<00:00, 2.03it/s]\n01/14/2021 20:52:27 - INFO - __main__ - \n***** Running evaluation *****\n01/14/2021 20:52:27 - INFO - __main__ - Num examples = 88\n01/14/2021 20:52:27 - INFO - __main__ - Batch size = 8\n01/14/2021 20:52:29 - INFO - __main__ - eval_ppl = 420.66683\n01/14/2021 20:52:29 - INFO - __main__ - global_step = 353\n01/14/2021 20:52:29 - INFO - __main__ - train_loss = 6.8534\n01/14/2021 20:52:29 - INFO - __main__ - ********************\n01/14/2021 20:52:31 - INFO - __main__ - Best ppl:420.66683\n01/14/2021 20:52:31 - INFO - __main__ - ********************\nTotal: 88\n01/14/2021 20:52:58 - INFO - __main__ - bleu-4 = 9.79 \n01/14/2021 20:52:58 - INFO - __main__ - ********************\n01/14/2021 20:52:58 - INFO - __main__ - Best bleu:9.79\n01/14/2021 20:52:58 - INFO - __main__ - ********************\nepoch 1 loss 5.2249: 100% 352/352 [02:57<00:00, 1.98it/s]\n01/14/2021 20:55:58 - INFO - __main__ - \n***** Running evaluation *****\n01/14/2021 20:55:58 - INFO - __main__ - Num examples = 88\n01/14/2021 20:55:58 - INFO - __main__ - Batch size = 8\n01/14/2021 20:56:00 - INFO - __main__ - eval_ppl = 223.30135\n01/14/2021 20:56:00 - INFO - __main__ - global_step = 705\n01/14/2021 20:56:00 - INFO - __main__ - train_loss = 5.2249\n01/14/2021 20:56:00 - INFO - __main__ - ********************\n01/14/2021 20:56:02 - INFO - __main__ - Best ppl:223.30135\n01/14/2021 20:56:02 - INFO - __main__ - ********************\nTotal: 88\n01/14/2021 20:56:30 - INFO - __main__ - bleu-4 = 10.3 \n01/14/2021 20:56:30 - INFO - __main__ - ********************\n01/14/2021 20:56:30 - INFO - __main__ - Best bleu:10.3\n01/14/2021 20:56:30 - INFO - __main__ - ********************\nepoch 2 loss 4.4676: 100% 352/352 [02:57<00:00, 1.98it/s]\n01/14/2021 20:59:31 - INFO - __main__ - \n***** Running evaluation *****\n01/14/2021 20:59:31 - INFO - __main__ - Num examples = 88\n01/14/2021 20:59:31 - INFO - __main__ - Batch size = 8\n01/14/2021 20:59:32 - INFO - __main__ - eval_ppl = 167.43889\n01/14/2021 20:59:32 - INFO - __main__ - global_step = 1057\n01/14/2021 20:59:32 - INFO - __main__ - train_loss = 4.4676\n01/14/2021 20:59:32 - INFO - __main__ - ********************\n01/14/2021 20:59:35 - INFO - __main__ - Best ppl:167.43889\n01/14/2021 20:59:35 - INFO - __main__ - ********************\nTotal: 88\n01/14/2021 21:00:05 - INFO - __main__ - bleu-4 = 10.68 \n01/14/2021 21:00:05 - INFO - __main__ - ********************\n01/14/2021 21:00:05 - INFO - __main__ - Best bleu:10.68\n01/14/2021 21:00:05 - INFO - __main__ - ********************\nepoch 3 loss 3.8263: 100% 352/352 [02:57<00:00, 1.98it/s]\n01/14/2021 21:03:05 - INFO - __main__ - \n***** Running evaluation *****\n01/14/2021 21:03:05 - INFO - __main__ - Num examples = 88\n01/14/2021 21:03:05 - INFO - __main__ - Batch size = 8\n01/14/2021 21:03:07 - INFO - __main__ - eval_ppl = 160.25635\n01/14/2021 21:03:07 - INFO - __main__ - global_step = 1409\n01/14/2021 21:03:07 - INFO - __main__ - train_loss = 3.8263\n01/14/2021 21:03:07 - INFO - __main__ - ********************\n01/14/2021 21:03:10 - INFO - __main__ - Best ppl:160.25635\n01/14/2021 21:03:10 - INFO - __main__ - ********************\nTotal: 88\n01/14/2021 21:03:38 - INFO - __main__ - bleu-4 = 11.04 \n01/14/2021 21:03:38 - INFO - __main__ - ********************\n01/14/2021 21:03:38 - INFO - __main__ - Best bleu:11.04\n01/14/2021 21:03:38 - INFO - __main__ - ********************\nepoch 4 loss 3.2797: 100% 352/352 [02:57<00:00, 1.98it/s]\n01/14/2021 21:06:38 - INFO - __main__ - \n***** Running evaluation *****\n01/14/2021 21:06:38 - INFO - __main__ - Num examples = 88\n01/14/2021 21:06:38 - INFO - __main__ - Batch size = 8\n01/14/2021 21:06:40 - INFO - __main__ - eval_ppl = 152.19858\n01/14/2021 21:06:40 - INFO - __main__ - global_step = 1761\n01/14/2021 21:06:40 - INFO - __main__ - train_loss = 3.2797\n01/14/2021 21:06:40 - INFO - __main__ - ********************\n01/14/2021 21:06:42 - INFO - __main__ - Best ppl:152.19858\n01/14/2021 21:06:42 - INFO - __main__ - ********************\nTotal: 88\n01/14/2021 21:07:14 - INFO - __main__ - bleu-4 = 10.36 \n01/14/2021 21:07:14 - INFO - __main__ - ********************\nepoch 5 loss 2.8204: 100% 352/352 [02:57<00:00, 1.98it/s]\n01/14/2021 21:10:12 - INFO - __main__ - \n***** Running evaluation *****\n01/14/2021 21:10:12 - INFO - __main__ - Num examples = 88\n01/14/2021 21:10:12 - INFO - __main__ - Batch size = 8\n01/14/2021 21:10:13 - INFO - __main__ - eval_ppl = 150.95443\n01/14/2021 21:10:13 - INFO - __main__ - global_step = 2113\n01/14/2021 21:10:13 - INFO - __main__ - train_loss = 2.8204\n01/14/2021 21:10:13 - INFO - __main__ - ********************\n01/14/2021 21:10:16 - INFO - __main__ - Best ppl:150.95443\n01/14/2021 21:10:16 - INFO - __main__ - ********************\nTotal: 88\n01/14/2021 21:10:45 - INFO - __main__ - bleu-4 = 11.57 \n01/14/2021 21:10:45 - INFO - __main__ - ********************\n01/14/2021 21:10:45 - INFO - __main__ - Best bleu:11.57\n01/14/2021 21:10:45 - INFO - __main__ - ********************\nepoch 6 loss 2.4442: 100% 352/352 [02:57<00:00, 1.98it/s]\n01/14/2021 21:13:46 - INFO - __main__ - \n***** Running evaluation *****\n01/14/2021 21:13:46 - INFO - __main__ - Num examples = 88\n01/14/2021 21:13:46 - INFO - __main__ - Batch size = 8\n01/14/2021 21:13:47 - INFO - __main__ - eval_ppl = 156.69898\n01/14/2021 21:13:47 - INFO - __main__ - global_step = 2465\n01/14/2021 21:13:47 - INFO - __main__ - train_loss = 2.4442\n01/14/2021 21:13:47 - INFO - __main__ - ********************\nTotal: 88\n01/14/2021 21:14:17 - INFO - __main__ - bleu-4 = 10.65 \n01/14/2021 21:14:17 - INFO - __main__ - ********************\nepoch 7 loss 2.1565: 100% 352/352 [02:57<00:00, 1.98it/s]\n01/14/2021 21:17:15 - INFO - __main__ - \n***** Running evaluation *****\n01/14/2021 21:17:15 - INFO - __main__ - Num examples = 88\n01/14/2021 21:17:15 - INFO - __main__ - Batch size = 8\n01/14/2021 21:17:16 - INFO - __main__ - eval_ppl = 163.34726\n01/14/2021 21:17:16 - INFO - __main__ - global_step = 2817\n01/14/2021 21:17:16 - INFO - __main__ - train_loss = 2.1565\n01/14/2021 21:17:16 - INFO - __main__ - ********************\nTotal: 88\n01/14/2021 21:17:50 - INFO - __main__ - bleu-4 = 10.56 \n01/14/2021 21:17:50 - INFO - __main__ - ********************\nepoch 8 loss 1.9398: 100% 352/352 [02:57<00:00, 1.98it/s]\n01/14/2021 21:20:47 - INFO - __main__ - \n***** Running evaluation *****\n01/14/2021 21:20:47 - INFO - __main__ - Num examples = 88\n01/14/2021 21:20:47 - INFO - __main__ - Batch size = 8\n01/14/2021 21:20:49 - INFO - __main__ - eval_ppl = 166.41823\n01/14/2021 21:20:49 - INFO - __main__ - global_step = 3169\n01/14/2021 21:20:49 - INFO - __main__ - train_loss = 1.9398\n01/14/2021 21:20:49 - INFO - __main__ - ********************\nTotal: 88\n01/14/2021 21:21:26 - INFO - __main__ - bleu-4 = 10.74 \n01/14/2021 21:21:26 - INFO - __main__ - ********************\nepoch 9 loss 1.7877: 100% 352/352 [02:57<00:00, 1.98it/s]\n01/14/2021 21:24:24 - INFO - __main__ - \n***** Running evaluation *****\n01/14/2021 21:24:24 - INFO - __main__ - Num examples = 88\n01/14/2021 21:24:24 - INFO - __main__ - Batch size = 8\n01/14/2021 21:24:25 - INFO - __main__ - eval_ppl = 169.37057\n01/14/2021 21:24:25 - INFO - __main__ - global_step = 3521\n01/14/2021 21:24:25 - INFO - __main__ - train_loss = 1.7877\n01/14/2021 21:24:25 - INFO - __main__ - ********************\nTotal: 88\n01/14/2021 21:24:59 - INFO - __main__ - bleu-4 = 10.28 \n01/14/2021 21:24:59 - INFO - __main__ - ********************\n"
]
],
[
[
"Yay! Our model has finished baking and we can now see how well it turned out by evaluating it!",
"_____no_output_____"
]
],
[
[
"batch_size=64\ndev_file=f\"{data_dir}/{lang}/valid.jsonl\"\ntest_file=f\"{data_dir}/{lang}/test.jsonl\"\ntest_model=f\"{output_dir}/checkpoint-best-bleu/pytorch_model.bin\" #checkpoint for test\n\n! python run.py \\\n --do_test \\\n --model_type roberta \\\n --model_name_or_path microsoft/codebert-base \\\n --load_model_path {test_model} \\\n --dev_filename {dev_file} \\\n --test_filename {test_file} \\\n --output_dir {output_dir} \\\n --max_source_length {source_length} \\\n --max_target_length {target_length} \\\n --beam_size {beam_size} \\\n --eval_batch_size {batch_size}",
"2021-01-14 21:25:04.498200: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n01/14/2021 21:25:07 - INFO - __main__ - Namespace(adam_epsilon=1e-08, beam_size=10, config_name='', dev_filename='./java/valid.jsonl', do_eval=False, do_lower_case=False, do_test=True, do_train=False, eval_batch_size=64, eval_steps=-1, gradient_accumulation_steps=1, learning_rate=5e-05, load_model_path='model/java/checkpoint-best-bleu/pytorch_model.bin', local_rank=-1, max_grad_norm=1.0, max_source_length=256, max_steps=-1, max_target_length=48, model_name_or_path='microsoft/codebert-base', model_type='roberta', no_cuda=False, num_train_epochs=3, output_dir='model/java', seed=42, test_filename='./java/test.jsonl', tokenizer_name='', train_batch_size=8, train_filename=None, train_steps=-1, warmup_steps=0, weight_decay=0.0)\n01/14/2021 21:25:07 - WARNING - __main__ - Process rank: -1, device: cuda, n_gpu: 1, distributed training: False\n01/14/2021 21:25:23 - INFO - __main__ - reload model from model/java/checkpoint-best-bleu/pytorch_model.bin\n01/14/2021 21:25:48 - INFO - __main__ - Test file: ./java/valid.jsonl\n100% 2/2 [00:26<00:00, 13.34s/it]\nTotal: 88\n01/14/2021 21:26:15 - INFO - __main__ - bleu-4 = 11.57 \n01/14/2021 21:26:15 - INFO - __main__ - ********************\n01/14/2021 21:26:15 - INFO - __main__ - Test file: ./java/test.jsonl\n100% 4/4 [00:55<00:00, 13.95s/it]\nTotal: 193\n01/14/2021 21:27:11 - INFO - __main__ - bleu-4 = 9.74 \n01/14/2021 21:27:11 - INFO - __main__ - ********************\n"
]
],
[
[
"Let's now load up our model and take it for a spin!",
"_____no_output_____"
]
],
[
[
"# collapse\nimport torch\n\nimport torch.nn as nn\n\nfrom model import Seq2Seq\nfrom transformers import RobertaConfig, RobertaModel\n\nconfig = RobertaConfig.from_pretrained(pretrained_model)\nencoder = RobertaModel.from_pretrained(pretrained_model, config = config) \ndecoder_layer = nn.TransformerDecoderLayer(d_model=config.hidden_size, nhead=config.num_attention_heads)\ndecoder = nn.TransformerDecoder(decoder_layer, num_layers=6)\nmodel = Seq2Seq(encoder = encoder,decoder = decoder,config=config,\n beam_size=beam_size,max_length=target_length,\n sos_id=tokenizer.cls_token_id,eos_id=tokenizer.sep_token_id)\nmodel.load_state_dict(torch.load(Path(output_dir)/\"checkpoint-last/pytorch_model.bin\"))\nmodel.to('cuda')",
"_____no_output_____"
],
[
"idx = 0\nTEXT_TO_SUMMARIZE = df_val.mthd.values[idx]\nprint('Code:', TEXT_TO_SUMMARIZE)\nprint('Original Comment:', df_val.cmt.values[idx])",
"Code: public static byte[] decode(final string s) { int delta = s.endswith(\"==\") ? 2 : s.endswith(\"=\") ? 1 : 0; byte[] buffer = new byte[s.length() * bytes_per_unencoded_block / bytes_per_encoded_block - delta]; int mask = 0xff; int pos = 0; for (int i = 0; i < s.length(); i += bytes_per_encoded_block) { int c0 = decode_table[s.charat(i)]; int c1 = decode_table[s.charat(i + 1)]; buffer[pos++] = (byte) (((c0 << 2) | (c1 >> 4)) & mask); if (pos >= buffer.length) { return buffer; } int c2 = decode_table[s.charat(i + 2)]; buffer[pos++] = (byte) (((c1 << 4) | (c2 >> 2)) & mask); if (pos >= buffer.length) { return buffer; } int c3 = decode_table[s.charat(i + 3)]; buffer[pos++] = (byte) (((c2 << 6) | c3) & mask); } return buffer; }\nOriginal Comment: decodes the given base64-encoded string.\n"
],
[
"# collapse\nfrom run import convert_examples_to_features, Example\n\nclass Args:\n max_source_length = source_length\n max_target_length = target_length\n\nargs = Args()\n\ndef get_preds(df: pd.DataFrame):\n ps = []\n for idx, row in tqdm(df.iterrows(), total=len(df)):\n examples = [\n Example(idx, source = row.mthd, target = row.cmt)\n ]\n eval_features = convert_examples_to_features(\n examples, tokenizer, args, stage='test'\n )\n source_ids = torch.tensor(eval_features[0].source_ids, dtype = torch.long).unsqueeze(0).to('cuda')\n source_mask = torch.tensor(eval_features[0].source_mask, dtype = torch.long).unsqueeze(0).to('cuda')\n\n with torch.no_grad():\n preds = model(source_ids = source_ids, source_mask = source_mask) \n for pred in preds:\n t = pred[0].cpu().numpy()\n t = list(t)\n if 0 in t:\n t = t[:t.index(0)]\n text = tokenizer.decode(t,clean_up_tokenization_spaces=False)\n ps.append(text)\n \n return ps",
"_____no_output_____"
],
[
"df_val = df_val.reset_index()\npreds = get_preds(df_val.head(10))\nfor idx, row in df_val.head(10).iterrows():\n print('Code:', row.mthd)\n print('Original Comment:', row.cmt)\n print('Generated Comment:', preds[idx])\n print('='*40)",
"_____no_output_____"
]
],
[
[
"The model seems to be doing a good job, but if you play with it some more you'll realize it is mostly taking the name of the method and using that to guide the comment. This makes sense, but it probably isn't learning much more than this association, at least with this small model. Let's explore it a bit more by looking at all the examples in the validation set it is failing the most on.",
"_____no_output_____"
]
],
[
[
"def get_preds_losses(df: pd.DataFrame):\n ps = []\n losses = []\n for idx, row in tqdm(df.iterrows(), total=len(df)):\n examples = [\n Example(idx, source = row.mthd, target = row.cmt)\n ]\n eval_features = convert_examples_to_features(\n examples, tokenizer, args, stage='test'\n )\n source_ids = torch.tensor([f.source_ids for f in eval_features], dtype = torch.long).to('cuda')\n source_mask = torch.tensor([f.source_mask for f in eval_features], dtype = torch.long).to('cuda')\n target_ids = torch.tensor([f.target_ids for f in eval_features], dtype = torch.long).to('cuda')\n target_mask = torch.tensor([f.target_mask for f in eval_features], dtype = torch.long).to('cuda')\n\n with torch.no_grad():\n _, loss, _ = model(\n source_ids = source_ids, source_mask = source_mask,\n target_ids = target_ids, target_mask = target_mask\n )\n preds = model(source_ids = source_ids, source_mask = source_mask) \n for pred in preds:\n t = pred[0].cpu().numpy()\n t = list(t)\n if 0 in t:\n t = t[:t.index(0)]\n text = tokenizer.decode(t,clean_up_tokenization_spaces=False)\n ps.append(text)\n losses.append(loss.item())\n \n return ps, losses",
"_____no_output_____"
],
[
"df_head = df_val.copy()\nps, losses = get_preds_losses(df_head)\ndf_head['pred'] = ps\ndf_head['loss'] = losses\ndf_sorted_losses = df_head.sort_values('loss', ascending = False)\n\nfor _, row in df_sorted_losses.head(10).iterrows():\n print('Code:', row.mthd)\n print('Original Comment:', row.cmt)\n print('Generated Comment:', row.pred)\n print(row.loss)\n print('='*40)",
"_____no_output_____"
]
],
[
[
"# What's Next?\n\nIf you'd like to see how you can integrate this code comment summarizer model into the popular VSCode IDE, check out my video that goes over just that!\n\n> youtube: https://youtu.be/SYjgPjQ-vbc",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
]
|
ec65485f768b7ee4082e3ab30e806dfabeb6edbc | 43,128 | ipynb | Jupyter Notebook | courses/machine_learning/deepdive2/launching_into_ml/solutions/automl-tabular-classification.ipynb | maureenm/training-data-analyst | 4ce1319bf0058ad83e929c500abbd2a47a14de5d | [
"Apache-2.0"
]
| 1 | 2021-11-11T11:36:10.000Z | 2021-11-11T11:36:10.000Z | courses/machine_learning/deepdive2/launching_into_ml/solutions/automl-tabular-classification.ipynb | maureenm/training-data-analyst | 4ce1319bf0058ad83e929c500abbd2a47a14de5d | [
"Apache-2.0"
]
| null | null | null | courses/machine_learning/deepdive2/launching_into_ml/solutions/automl-tabular-classification.ipynb | maureenm/training-data-analyst | 4ce1319bf0058ad83e929c500abbd2a47a14de5d | [
"Apache-2.0"
]
| null | null | null | 46.027748 | 335 | 0.664162 | [
[
[
"# Vertex AI Model Builder SDK: AutoML Tabular Training and Prediction\n\n## Overview\n\nThis tutorial demonstrates how to use the Vertex AI Python client library to train and deploy a tabular classification model for online prediction.\n\n## Learning Objective\n\nIn this notebook, you learn how to:\n\n* Create a Vertex AI model training job.\n* Train an AutoML Tabular model.\n* Deploy the `Model` resource to a serving `Endpoint` resource.\n* Make a prediction by sending data.\n* Undeploy the `Model` resource.\n\n## Introduction\n\nThis notebook demonstrates, using the Vertex AI Python client library, how to train and make predictions on an AutoML model based on a tabular dataset. Alternatively, you can train and make predictions on models by using the gcloud command-line tool or by using the online Cloud Console.\n\nEach learning objective will correspond to a __#TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/automl-tabular-classification.ipynb). \n\n**Make sure to enable the Vertex AI API and Compute Engine API.**",
"_____no_output_____"
],
[
"## Installation",
"_____no_output_____"
]
],
[
[
"import os\n\n# The Google Cloud Notebook product has specific requirements\nIS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists(\"/opt/deeplearning/metadata/env_version\")\n\nUSER_FLAG = \"\"\n# Google Cloud Notebook requires dependencies to be installed with '--user'\nif IS_GOOGLE_CLOUD_NOTEBOOK:\n USER_FLAG = \"--user\"",
"_____no_output_____"
]
],
[
[
"Install the latest version of the Vertex AI client library.\n\nRun the following command in your virtual environment to install the Vertex SDK for Python:",
"_____no_output_____"
]
],
[
[
"! pip install {USER_FLAG} --upgrade google-cloud-aiplatform",
"Requirement already satisfied: google-cloud-aiplatform in /opt/conda/lib/python3.7/site-packages (1.1.1)\nCollecting google-cloud-aiplatform\n Downloading google_cloud_aiplatform-1.3.0-py2.py3-none-any.whl (1.3 MB)\n\u001b[K |████████████████████████████████| 1.3 MB 7.6 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: proto-plus>=1.10.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.19.0)\nRequirement already satisfied: google-cloud-bigquery<3.0.0dev,>=1.15.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (2.23.2)\nRequirement already satisfied: google-api-core[grpc]<3.0.0dev,>=1.26.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.31.1)\nRequirement already satisfied: google-cloud-storage<2.0.0dev,>=1.32.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (1.41.1)\nRequirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-cloud-aiplatform) (21.0)\nRequirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.53.0)\nRequirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (3.16.0)\nRequirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.16.0)\nRequirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.1)\nRequirement already satisfied: google-auth<2.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.34.0)\nRequirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.25.1)\nRequirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (49.6.0.post20210108)\nRequirement already satisfied: grpcio<2.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.38.1)\nRequirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.7.2)\nRequirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.2.2)\nRequirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.2.7)\nRequirement already satisfied: google-resumable-media<3.0dev,>=0.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.3.2)\nRequirement already satisfied: google-cloud-core<3.0.0dev,>=1.4.1 in /opt/conda/lib/python3.7/site-packages (from google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.7.2)\nRequirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.1.2)\nRequirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.14.6)\nRequirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3.0.0dev,>=1.15.0->google-cloud-aiplatform) (2.20)\nRequirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-cloud-aiplatform) (2.4.7)\nRequirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.25.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (0.4.8)\nRequirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2021.5.30)\nRequirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (1.26.6)\nRequirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (4.0.0)\nRequirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<3.0.0dev,>=1.26.0->google-cloud-aiplatform) (2.10)\nInstalling collected packages: google-cloud-aiplatform\n\u001b[33m WARNING: The script tb-gcp-uploader is installed in '/home/jupyter/.local/bin' which is not on PATH.\n Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\nSuccessfully installed google-cloud-aiplatform-1.3.0\n"
]
],
[
[
"Install the Cloud Storage library:",
"_____no_output_____"
]
],
[
[
"! pip install {USER_FLAG} --upgrade google-cloud-storage",
"Requirement already satisfied: google-cloud-storage in /opt/conda/lib/python3.7/site-packages (1.41.1)\nCollecting google-cloud-storage\n Downloading google_cloud_storage-1.42.0-py2.py3-none-any.whl (105 kB)\n\u001b[K |████████████████████████████████| 105 kB 8.1 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: google-resumable-media<3.0dev,>=1.3.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.3.2)\nRequirement already satisfied: google-auth<3.0dev,>=1.25.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.34.0)\nRequirement already satisfied: google-cloud-core<3.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.7.2)\nRequirement already satisfied: google-api-core<3.0dev,>=1.29.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (1.31.1)\nRequirement already satisfied: requests<3.0.0dev,>=2.18.0 in /opt/conda/lib/python3.7/site-packages (from google-cloud-storage) (2.25.1)\nRequirement already satisfied: packaging>=14.3 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (21.0)\nRequirement already satisfied: protobuf>=3.12.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (3.16.0)\nRequirement already satisfied: six>=1.13.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.16.0)\nRequirement already satisfied: pytz in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2021.1)\nRequirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (1.53.0)\nRequirement already satisfied: setuptools>=40.3.0 in /opt/conda/lib/python3.7/site-packages (from google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (49.6.0.post20210108)\nRequirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.2.2)\nRequirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.2.7)\nRequirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-cloud-storage) (4.7.2)\nRequirement already satisfied: google-crc32c<2.0dev,>=1.0 in /opt/conda/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.1.2)\nRequirement already satisfied: cffi>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (1.14.6)\nRequirement already satisfied: pycparser in /opt/conda/lib/python3.7/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<3.0dev,>=1.3.0->google-cloud-storage) (2.20)\nRequirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging>=14.3->google-api-core<3.0dev,>=1.29.0->google-cloud-storage) (2.4.7)\nRequirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<3.0dev,>=1.25.0->google-cloud-storage) (0.4.8)\nRequirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (1.26.6)\nRequirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2.10)\nRequirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2021.5.30)\nRequirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (4.0.0)\nInstalling collected packages: google-cloud-storage\nSuccessfully installed google-cloud-storage-1.42.0\n"
]
],
[
[
"### Restart the kernel\n\nAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.",
"_____no_output_____"
]
],
[
[
"# Automatically restart kernel after installs\nimport os\n\nif not os.getenv(\"IS_TESTING\"):\n # Automatically restart kernel after installs\n import IPython\n\n app = IPython.Application.instance()\n app.kernel.do_shutdown(True)",
"_____no_output_____"
]
],
[
[
"### Set your project ID\n\n**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.",
"_____no_output_____"
]
],
[
[
"import os\n\nPROJECT_ID = \"\"\n\n# Get your Google Cloud project ID from gcloud\nif not os.getenv(\"IS_TESTING\"):\n shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null\n PROJECT_ID = shell_output[0]\n print(\"Project ID: \", PROJECT_ID)",
"Project ID: qwiklabs-gcp-04-c846b6079446\n"
]
],
[
[
"Otherwise, set your project ID here.",
"_____no_output_____"
]
],
[
[
"if PROJECT_ID == \"\" or PROJECT_ID is None:\n PROJECT_ID = \"qwiklabs-gcp-04-c846b6079446\" # @param {type:\"string\"}",
"_____no_output_____"
]
],
[
[
"### Timestamp\n\nIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.",
"_____no_output_____"
]
],
[
[
"from datetime import datetime\n\nTIMESTAMP = datetime.now().strftime(\"%Y%m%d%H%M%S\")",
"_____no_output_____"
]
],
[
[
"### Create a Cloud Storage bucket\n\n**The following steps are required, regardless of your notebook environment.**\n\nThis notebook demonstrates how to use Model Builder SDK to create an AutoML model based on a tabular dataset. You will need to provide a Cloud Storage bucket where the dataset will be stored.\n\nSet the name of your Cloud Storage bucket below. It must be unique across all of your \nCloud Storage buckets.\n\nYou may also change the `REGION` variable, which is used for operations\nthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services are\navailable](https://cloud.google.com/vertex-ai/docs/general/locations). You may\nnot use a Multi-Regional Storage bucket for training with Vertex AI.",
"_____no_output_____"
]
],
[
[
"BUCKET_NAME = \"gs://qwiklabs-gcp-04-c846b6079446\" # @param {type:\"string\"}\nREGION = \"us-central1\" # @param {type:\"string\"}",
"_____no_output_____"
],
[
"if BUCKET_NAME == \"\" or BUCKET_NAME is None or BUCKET_NAME == \"gs://qwiklabs-gcp-04-c846b6079446\":\n BUCKET_NAME = \"gs://\" + PROJECT_ID + \"aip-\" + TIMESTAMP",
"_____no_output_____"
]
],
[
[
"**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.",
"_____no_output_____"
]
],
[
[
"! gsutil mb -l $REGION $BUCKET_NAME",
"Creating gs://qwiklabs-gcp-04-c846b6079446aip-20210826051658/...\n"
]
],
[
[
"Finally, validate access to your Cloud Storage bucket by examining its contents:",
"_____no_output_____"
]
],
[
[
"! gsutil ls -al $BUCKET_NAME",
"_____no_output_____"
]
],
[
[
"### Copy dataset into your Cloud Storage bucket",
"_____no_output_____"
]
],
[
[
"IMPORT_FILE = \"petfinder-tabular-classification.csv\"\n! gsutil cp gs://cloud-samples-data/ai-platform-unified/datasets/tabular/{IMPORT_FILE} {BUCKET_NAME}/data/\n\ngcs_source = f\"{BUCKET_NAME}/data/{IMPORT_FILE}\"",
"Copying gs://cloud-samples-data/ai-platform-unified/datasets/tabular/petfinder-tabular-classification.csv [Content-Type=text/csv]...\n/ [1 files][872.8 KiB/872.8 KiB] \nOperation completed over 1 objects/872.8 KiB. \n"
]
],
[
[
"### Import Vertex SDK for Python\n\nImport the Vertex SDK into your Python environment and initialize it.",
"_____no_output_____"
]
],
[
[
"import os\n\nfrom google.cloud import aiplatform\n\naiplatform.init(project=PROJECT_ID, location=REGION)",
"_____no_output_____"
]
],
[
[
"## Tutorial\n\nNow you are ready to create your AutoML Tabular model.",
"_____no_output_____"
],
[
"### Create a Managed Tabular Dataset from a CSV\n\nThis section will create a dataset from a CSV file stored on your GCS bucket.",
"_____no_output_____"
]
],
[
[
"ds = dataset = aiplatform.TabularDataset.create(\n display_name=\"petfinder-tabular-dataset\",\n gcs_source=gcs_source,\n)\n\nds.resource_name",
"INFO:google.cloud.aiplatform.datasets.dataset:Creating TabularDataset\nINFO:google.cloud.aiplatform.datasets.dataset:Create TabularDataset backing LRO: projects/1075205415941/locations/us-central1/datasets/1945247175768276992/operations/1110822578768838656\nINFO:google.cloud.aiplatform.datasets.dataset:TabularDataset created. Resource name: projects/1075205415941/locations/us-central1/datasets/1945247175768276992\nINFO:google.cloud.aiplatform.datasets.dataset:To use this TabularDataset in another session:\nINFO:google.cloud.aiplatform.datasets.dataset:ds = aiplatform.TabularDataset('projects/1075205415941/locations/us-central1/datasets/1945247175768276992')\n"
]
],
[
[
"### Launch a Training Job to Create a Model\n\nOnce we have defined your training script, we will create a model. The `run` function creates a training pipeline that trains and creates a `Model` object. After the training pipeline completes, the `run` function returns the `Model` object.",
"_____no_output_____"
]
],
[
[
"job = aiplatform.AutoMLTabularTrainingJob(\n display_name=\"train-petfinder-automl-1\",\n optimization_prediction_type=\"classification\",\n column_transformations=[\n {\"categorical\": {\"column_name\": \"Type\"}},\n {\"numeric\": {\"column_name\": \"Age\"}},\n {\"categorical\": {\"column_name\": \"Breed1\"}},\n {\"categorical\": {\"column_name\": \"Color1\"}},\n {\"categorical\": {\"column_name\": \"Color2\"}},\n {\"categorical\": {\"column_name\": \"MaturitySize\"}},\n {\"categorical\": {\"column_name\": \"FurLength\"}},\n {\"categorical\": {\"column_name\": \"Vaccinated\"}},\n {\"categorical\": {\"column_name\": \"Sterilized\"}},\n {\"categorical\": {\"column_name\": \"Health\"}},\n {\"numeric\": {\"column_name\": \"Fee\"}},\n {\"numeric\": {\"column_name\": \"PhotoAmt\"}},\n ],\n)\n\n# This will take around an hour to run\nmodel = job.run(\n dataset=ds,\n target_column=\"Adopted\",\n training_fraction_split=0.8,\n validation_fraction_split=0.1,\n test_fraction_split=0.1,\n model_display_name=\"adopted-prediction-model\",\n disable_early_stopping=False,\n)",
"/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n and should_run_async(code)\n/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:16: DeprecationWarning: consider using column_specs instead. column_transformations will be deprecated in the future.\n app.launch_new_instance()\n"
]
],
[
[
"### Deploy your model\n\nBefore you use your model to make predictions, you need to deploy it to an `Endpoint`. You can do this by calling the `deploy` function on the `Model` resource. This function does two things:\n\n1. Creates an `Endpoint` resource to which the `Model` resource will be deployed.\n2. Deploys the `Model` resource to the `Endpoint` resource.\n\nDeploy your model.\n\n### NOTE: Wait until the model **FINISHES** deployment before proceeding to prediction.",
"_____no_output_____"
]
],
[
[
"endpoint = model.deploy(\n machine_type=\"n1-standard-4\",\n)",
"/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n and should_run_async(code)\n"
]
],
[
[
"### Predict on the endpoint\n",
"_____no_output_____"
],
[
"* This sample instance is taken from an observation in which `Adopted` = **Yes**\n* Note that the values are all strings. Since the original data was in CSV format, everything is treated as a string. The transformations you defined when creating your `AutoMLTabularTrainingJob` inform Vertex AI to transform the inputs to their defined types.\n",
"_____no_output_____"
]
],
[
[
"prediction = endpoint.predict(\n [\n {\n \"Type\": \"Cat\",\n \"Age\": \"3\",\n \"Breed1\": \"Tabby\",\n \"Gender\": \"Male\",\n \"Color1\": \"Black\",\n \"Color2\": \"White\",\n \"MaturitySize\": \"Small\",\n \"FurLength\": \"Short\",\n \"Vaccinated\": \"No\",\n \"Sterilized\": \"No\",\n \"Health\": \"Healthy\",\n \"Fee\": \"100\",\n \"PhotoAmt\": \"2\",\n }\n ]\n)\n\nprint(prediction)",
"/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n and should_run_async(code)\n"
]
],
[
[
"### Undeploy the model\n\nTo undeploy your `Model` resource from the serving `Endpoint` resource, use the endpoint's `undeploy` method with the following parameter:\n\n- `deployed_model_id`: The model deployment identifier returned by the prediction service when the `Model` resource is deployed. You can retrieve the `deployed_model_id` using the prediction object's `deployed_model_id` property.",
"_____no_output_____"
]
],
[
[
"endpoint.undeploy(deployed_model_id=prediction.deployed_model_id)",
"INFO:google.cloud.aiplatform.models:Undeploying Endpoint model: projects/1075205415941/locations/us-central1/endpoints/7467372802459303936\n"
]
],
[
[
"# Cleaning up\n\nTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.\n\nOtherwise, you can delete the individual resources you created in this tutorial:\n\n- Training Job\n- Model\n- Endpoint\n- Cloud Storage Bucket\n\n**Note**: You must delete any `Model` resources deployed to the `Endpoint` resource before deleting the `Endpoint` resource.",
"_____no_output_____"
]
],
[
[
"delete_training_job = True\ndelete_model = True\ndelete_endpoint = True\n\n# Warning: Setting this to true will delete everything in your bucket\ndelete_bucket = False\n\n# Delete the training job\njob.delete()\n\n# Delete the model\nmodel.delete()\n\n# Delete the endpoint\nendpoint.delete()\n\nif delete_bucket and \"BUCKET_NAME\" in globals():\n ! gsutil -m rm -r $BUCKET_NAME",
"INFO:google.cloud.aiplatform.base:Deleting AutoMLTabularTrainingJob : projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360\nINFO:google.cloud.aiplatform.base:Delete AutoMLTabularTrainingJob backing LRO: projects/1075205415941/locations/us-central1/operations/5317466105709592576\nINFO:google.cloud.aiplatform.base:AutoMLTabularTrainingJob deleted. . Resource name: projects/1075205415941/locations/us-central1/trainingPipelines/1715908841423503360\nINFO:google.cloud.aiplatform.base:Deleting Model : projects/1075205415941/locations/us-central1/models/3676687718445744128\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec65489d1aa341de401adcf099afeaf5411270a9 | 182,501 | ipynb | Jupyter Notebook | content/ch-quantum-hardware/transpiling-quantum-circuits.ipynb | a-kapila/qiskit-textbook | 19ee615f0c94dd82b6fddfae555b39fe05e7db07 | [
"Apache-2.0"
]
| 1 | 2021-03-13T12:15:44.000Z | 2021-03-13T12:15:44.000Z | content/ch-quantum-hardware/transpiling-quantum-circuits.ipynb | a-kapila/qiskit-textbook | 19ee615f0c94dd82b6fddfae555b39fe05e7db07 | [
"Apache-2.0"
]
| null | null | null | content/ch-quantum-hardware/transpiling-quantum-circuits.ipynb | a-kapila/qiskit-textbook | 19ee615f0c94dd82b6fddfae555b39fe05e7db07 | [
"Apache-2.0"
]
| 1 | 2020-07-15T03:48:47.000Z | 2020-07-15T03:48:47.000Z | 42.382954 | 710 | 0.528046 | [
[
[
"# Transpiling Quantum Circuits",
"_____no_output_____"
],
[
"In this chapter we will investigate how quantum circuits are transformed when run on quantum devices. That we need to modify the circuits at all is a consequence of the limitations of current quantum computing hardware. Namely, the limited connectivity inherent in most quantum hardware, restricted gate sets, as well as environmental noise and gate errors, all conspire to limit the effective computational power on today's quantum devices. Fortunately, quantum circuit rewriting tool chains have been developed that directly address these issues, and return heavily optimized circuits mapped to targeted quantum devices. Here we will explore the IBM Qiskit 'transpiler' circuit rewriting framework.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom qiskit import *\nfrom qiskit.tools.jupyter import *\nfrom qiskit.providers.ibmq import least_busy\n%matplotlib inline\n%config InlineBackend.figure_format = 'svg' # Makes the images look nice",
"_____no_output_____"
],
[
"IBMQ.load_account()",
"_____no_output_____"
]
],
[
[
"## Core Steps in Circuit Rewriting",
"_____no_output_____"
],
[
"As we will see, rewriting quantum circuits to match hardware constraints and optimize for performance can be far from trivial. The flow of logic in the rewriting tool chain need not be linear, and can often have iterative sub-loops, conditional branches, and other complex behaviors. That being said, the basic building blocks follow the structure given below.",
"_____no_output_____"
],
[
"\n",
"_____no_output_____"
],
[
"Our goal in this section is to see what each of these \"passes\" does at a high-level, and then begin exploring their usage on a set of common circuits.",
"_____no_output_____"
],
[
"### Unrolling to Basis Gates",
"_____no_output_____"
],
[
"When writing a quantum circuit you are free to use any quantum gate (unitary operator) that you like, along with a collection of non-gate operations such as qubit measurements and reset operations. However, when running a circuit on a real quantum device one no longer has this flexibility. Due to limitations in, for example, the physical interactions between qubits, difficulty in implementing multi-qubit gates, control electronics etc, a quantum computing device can only natively support a handful of quantum gates and non-gate operations. In the present case of IBM Q devices, the native gate set can be found by querying the devices themselves:",
"_____no_output_____"
]
],
[
[
"provider = IBMQ.get_provider(group='open')\nprovider.backends(simulator=False)",
"_____no_output_____"
],
[
"backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 5 and not x.configuration().simulator and x.status().operational==True))\nbackend.configuration().basis_gates",
"_____no_output_____"
]
],
[
[
"We see that the our device supports five native gates: three single-qubit gates (`u1`, `u2`, `u3`, and `id`) and one two-qubit entangling gate `cx`. In addition, the device supports qubit measurements (otherwise we can not read out an answer!). Although we have queried only a single device, all IBM Q devices support this gate set.\n\nThe `u*` gates represent arbitrary single-qubit rotations of one, two, and three angles. The `u1` gates are single-parameter rotations that represent generalized phase gates of the form\n\n$$\nU_{1}(\\lambda) = \\begin{bmatrix}\n1 & 0 \\\\\n0 & e^{i\\lambda}\n\\end{bmatrix}\n$$\n\nThis set includes common gates such as $Z$, $T$, $T^{\\dagger}$, $S$, and $S^{\\dagger}$. It turns out that these gates do not actually need to be performed on hardware, but instead, can be implemented in software as \"virtual gates\". These virtual gates are called \"frame changes\" and take zero time, and have no associated error; they are free gates on hardware.\n\nTwo-angle rotations, $U_{2}(\\phi,\\lambda)$, are actually two frame changes with a single $X_{\\pi/2}$ gate in between them, and can be used to synthesize gates like the Hadamard ($U_{2}(0,\\pi)$) gate. As the only actual gate performed is the $X_{\\pi/2}$ gate, the error and gate time associated with any $U_{2}$ gate is the same as an $X_{\\pi/2}$ gate. Similarly, $U_{3}(\\theta,\\phi,\\lambda)$ gates are formed from three frame changes with two $X_{\\pi/2}$ gates in between them. The errors and gate times are twice those of a single $X_{\\pi/2}$. The identity gate, $id$, is straightforward, and is a placeholder gate with a fixed time-interval. \n\nThe only entangling gate supported by the IBM Q devices is the CNOT gate (`cx`) that, in the computational basis, can be written as:\n\n$$\n\\mathrm{CNOT}(0,1) = \\begin{bmatrix}\n1 & 0 & 0 & 0 \\\\\n0 & 0 & 0 & 1 \\\\\n0 & 0 & 1 & 0 \\\\\n0 & 1 & 0 & 0\n\\end{bmatrix}\n$$,\n\nwhere we see that the matrix form follows from the specific bit-ordering convention used in Qiskit.",
"_____no_output_____"
],
[
"Every quantum circuit run on a IBM Q device must be expressed using only these basis gates. For example, suppose one wants to run a simple phase estimation circuit:",
"_____no_output_____"
]
],
[
[
"qr = QuantumRegister(2, 'q')\ncr = ClassicalRegister(1, 'c')\nqc = QuantumCircuit(qr, cr)\n\nqc.h(qr[0])\nqc.x(qr[1])\nqc.cu1(np.pi/4, qr[0], qr[1])\nqc.h(qr[0])\nqc.measure(qr[0], cr[0])\nqc.draw(output='mpl')",
"_____no_output_____"
]
],
[
[
"We have $H$, $X$, and controlled-$U_{1}$ gates, all of which are not in our devices basis gate set, and must be expanded. We will see that this expansion is taken care of for you, but for now let us just rewrite the circuit in the basis gate set:",
"_____no_output_____"
]
],
[
[
"qr = QuantumRegister(2, 'q')\ncr = ClassicalRegister(1, 'c')\nqc_basis = QuantumCircuit(qr, cr)\n\n# Hadamard in U2 format\nqc_basis.u2(0, np.pi, qr[0])\n# X gate in U3 format\nqc_basis.u3(np.pi, 0, np.pi, qr[1])\n\n# Decomposition for controlled-U1 with lambda=pi/4\nqc_basis.u1(np.pi/8, qr[0]) \nqc_basis.cx(qr[0], qr[1]) \nqc_basis.u1(-np.pi/8, qr[1]) \nqc_basis.cx(qr[0], qr[1])\nqc_basis.u1(np.pi/8, qr[1])\n\n# Hadamard in U2 format\nqc_basis.u2(0, np.pi, qr[0]) \n\nqc_basis.measure(qr[0], cr[0])\nqc_basis.draw(output='mpl')",
"_____no_output_____"
]
],
[
[
"A few things to highlight. One, the circuit has gotten longer with respect to the initial one. This can be verified by checking the depth of the circuits:",
"_____no_output_____"
]
],
[
[
"print(qc.depth(), ',', qc_basis.depth())",
"4 , 7\n"
]
],
[
[
"Second, although we had a single controlled gate, the fact that it was not in the basis set means that, when expanded, it requires more than a single `cx` gate to implement. All said, unrolling to the basis set of gates leads to an increase in the length of a quantum circuit and the number of gates. Both of these increases lead to more errors from the environment and gate errors, respectively, and further circuit rewriting steps must try to mitigate this effect through circuit optimizations.",
"_____no_output_____"
],
[
"Finally, we will look at the particularly important example of a Toffoli, or controlled-controlled-not gate:",
"_____no_output_____"
]
],
[
[
"qr = QuantumRegister(3, 'q')\nqc = QuantumCircuit(qr)\n\nqc.ccx(qr[0], qr[1], qr[2])\nqc.draw(output='mpl')",
"_____no_output_____"
]
],
[
[
"As a three-qubit gate, it should already be clear that this is not in the basis set of our devices. We have already seen that controlled gates not in the basis set are typically decomposed into multiple CNOT gates. This is doubly true for controlled gates with more than two qubits, where multiple CNOT gates are needed to implement the entangling across the multiple qubits. In our basis set, the Toffoli gate can be written as:",
"_____no_output_____"
]
],
[
[
"qr = QuantumRegister(3, 'q')\nqc_basis = QuantumCircuit(qr)\n\nqc_basis.u2(0,np.pi, qr[2])\nqc_basis.cx(qr[1], qr[2])\nqc_basis.u1(-np.pi/4, qr[2])\nqc_basis.cx(qr[0], qr[2])\nqc_basis.u1(np.pi/4, qr[2])\nqc_basis.cx(qr[1], qr[2])\nqc_basis.u1(np.pi/4, qr[1])\nqc_basis.u1(-np.pi/4, qr[2])\nqc_basis.cx(qr[0], qr[2])\nqc_basis.cx(qr[0], qr[1])\nqc_basis.u1(np.pi/4, qr[2])\nqc_basis.u1(np.pi/4, qr[0])\nqc_basis.u1(-np.pi/4, qr[1])\nqc_basis.u2(0,np.pi, qr[2])\nqc_basis.cx(qr[0], qr[1])\nqc_basis.draw(output='mpl')",
"_____no_output_____"
]
],
[
[
"Therefore, for every Toffoli gate in a quantum circuit, the IBM Q hardware must execute six CNOT gates, and a handful of single-qubit gates. From this example, it should be clear that any algorithm that makes use of multiple Toffoli gates will end up as a circuit with large depth and with therefore be appreciably affected by noise and gate errors.",
"_____no_output_____"
],
[
"### Initial Layout",
"_____no_output_____"
]
],
[
[
"qr = QuantumRegister(5, 'q')\ncr = ClassicalRegister(5, 'c')\nqc = QuantumCircuit(qr, cr)\n\nqc.h(qr[0])\nqc.cx(qr[0], qr[4])\nqc.cx(qr[4], qr[3])\nqc.cx(qr[3], qr[1])\nqc.cx(qr[1], qr[2])\n\nqc.draw(output='mpl')",
"_____no_output_____"
],
[
"from qiskit.visualization.gate_map import plot_gate_map\nplot_gate_map(backend, plot_directed=True)",
"_____no_output_____"
],
[
"import qiskit\nqiskit.__qiskit_version__",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
]
|
ec654e53c71f0f6cf28565d1cad6b9397ac796bf | 41,935 | ipynb | Jupyter Notebook | 1_Preliminaries.ipynb | amcumber/udacity-image-captioning | 12bff47ea017130491d58811f4df29e5fedea94b | [
"MIT"
]
| null | null | null | 1_Preliminaries.ipynb | amcumber/udacity-image-captioning | 12bff47ea017130491d58811f4df29e5fedea94b | [
"MIT"
]
| null | null | null | 1_Preliminaries.ipynb | amcumber/udacity-image-captioning | 12bff47ea017130491d58811f4df29e5fedea94b | [
"MIT"
]
| null | null | null | 44.142105 | 704 | 0.618386 | [
[
[
"# Computer Vision Nanodegree\n\n## Project: Image Captioning\n\n---\n\nIn this notebook, you will learn how to load and pre-process data from the [COCO dataset](http://cocodataset.org/#home). You will also design a CNN-RNN model for automatically generating image captions.\n\nNote that **any amendments that you make to this notebook will not be graded**. However, you will use the instructions provided in **Step 3** and **Step 4** to implement your own CNN encoder and RNN decoder by making amendments to the **models.py** file provided as part of this project. Your **models.py** file **will be graded**. \n\nFeel free to use the links below to navigate the notebook:\n- [Step 1](#step1): Explore the Data Loader\n- [Step 2](#step2): Use the Data Loader to Obtain Batches\n- [Step 3](#step3): Experiment with the CNN Encoder\n- [Step 4](#step4): Implement the RNN Decoder",
"_____no_output_____"
],
[
"<a id='step1'></a>\n## Step 1: Explore the Data Loader\n\nWe have already written a [data loader](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader) that you can use to load the COCO dataset in batches. \n\nIn the code cell below, you will initialize the data loader by using the `get_loader` function in **data_loader.py**. \n\n> For this project, you are not permitted to change the **data_loader.py** file, which must be used as-is.\n\nThe `get_loader` function takes as input a number of arguments that can be explored in **data_loader.py**. Take the time to explore these arguments now by opening **data_loader.py** in a new window. Most of the arguments must be left at their default values, and you are only allowed to amend the values of the arguments below:\n1. **`transform`** - an [image transform](http://pytorch.org/docs/master/torchvision/transforms.html) specifying how to pre-process the images and convert them to PyTorch tensors before using them as input to the CNN encoder. For now, you are encouraged to keep the transform as provided in `transform_train`. You will have the opportunity later to choose your own image transform to pre-process the COCO images.\n2. **`mode`** - one of `'train'` (loads the training data in batches) or `'test'` (for the test data). We will say that the data loader is in training or test mode, respectively. While following the instructions in this notebook, please keep the data loader in training mode by setting `mode='train'`.\n3. **`batch_size`** - determines the batch size. When training the model, this is number of image-caption pairs used to amend the model weights in each training step.\n4. **`vocab_threshold`** - the total number of times that a word must appear in the in the training captions before it is used as part of the vocabulary. Words that have fewer than `vocab_threshold` occurrences in the training captions are considered unknown words. \n5. **`vocab_from_file`** - a Boolean that decides whether to load the vocabulary from file. \n\nWe will describe the `vocab_threshold` and `vocab_from_file` arguments in more detail soon. For now, run the code cell below. Be patient - it may take a couple of minutes to run!",
"_____no_output_____"
]
],
[
[
"!pip install pycocotools",
"Collecting pycocotools\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/2e/1c/4fd663fc57be418cecf6f89d0d141ffa815d0fd6538ccddeccf767e8aace/pycocotools-2.0.3.tar.gz (106kB)\n\u001b[K 100% |████████████████████████████████| 112kB 4.1MB/s ta 0:00:01\n\u001b[?25hRequirement already satisfied: setuptools>=18.0 in /opt/conda/lib/python3.6/site-packages (from pycocotools) (38.4.0)\nRequirement already satisfied: cython>=0.27.3 in /opt/conda/lib/python3.6/site-packages (from pycocotools) (0.29.7)\nRequirement already satisfied: matplotlib>=2.1.0 in /opt/conda/lib/python3.6/site-packages (from pycocotools) (2.1.0)\nRequirement already satisfied: numpy>=1.7.1 in /opt/conda/lib/python3.6/site-packages (from matplotlib>=2.1.0->pycocotools) (1.12.1)\nRequirement already satisfied: six>=1.10 in /opt/conda/lib/python3.6/site-packages (from matplotlib>=2.1.0->pycocotools) (1.11.0)\nRequirement already satisfied: python-dateutil>=2.0 in /opt/conda/lib/python3.6/site-packages (from matplotlib>=2.1.0->pycocotools) (2.6.1)\nRequirement already satisfied: pytz in /opt/conda/lib/python3.6/site-packages (from matplotlib>=2.1.0->pycocotools) (2017.3)\nRequirement already satisfied: cycler>=0.10 in /opt/conda/lib/python3.6/site-packages/cycler-0.10.0-py3.6.egg (from matplotlib>=2.1.0->pycocotools) (0.10.0)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /opt/conda/lib/python3.6/site-packages (from matplotlib>=2.1.0->pycocotools) (2.2.0)\nBuilding wheels for collected packages: pycocotools\n Running setup.py bdist_wheel for pycocotools ... \u001b[?25ldone\n\u001b[?25h Stored in directory: /root/.cache/pip/wheels/a4/63/19/c1b14db8360631f88a34a9729676326f0056944348fdacd5be\nSuccessfully built pycocotools\nInstalling collected packages: pycocotools\nSuccessfully installed pycocotools-2.0.3\n"
],
[
"import sys\nsys.path.append('/opt/cocoapi/PythonAPI')\nfrom pycocotools.coco import COCO\n!pip install nltk\nimport nltk\nnltk.download('punkt')\nfrom data_loader import get_loader\nfrom torchvision import transforms\n\n# Define a transform to pre-process the training images.\ntransform_train = transforms.Compose([ \n transforms.Resize(256), # smaller edge of image resized to 256\n transforms.RandomCrop(224), # get 224x224 crop from random location\n transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5\n transforms.ToTensor(), # convert the PIL Image to a tensor\n transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model\n (0.229, 0.224, 0.225))])\n\n# Set the minimum word count threshold.\nvocab_threshold = 5\n\n# Specify the batch size.\nbatch_size = 10\n\n# load from file\nload_file = True\n\n# Obtain the data loader.\nif not load_file:\n data_loader = get_loader(transform=transform_train,\n mode='train',\n batch_size=batch_size,\n vocab_threshold=vocab_threshold,\n vocab_from_file=True)\n import pickle\n with open ('data_loader.pkl', 'wb') as fh:\n pickle.dump(data_loader, fh)\nelse:\n import pickle\n with open('data_loader.pkl', 'rb') as fh:\n data_loader = pickle.load(fh)\n print('loaded pickle file...')",
"Requirement already satisfied: nltk in /opt/conda/lib/python3.6/site-packages (3.2.5)\nRequirement already satisfied: six in /opt/conda/lib/python3.6/site-packages (from nltk) (1.11.0)\n[nltk_data] Downloading package punkt to /root/nltk_data...\n[nltk_data] Package punkt is already up-to-date!\nloaded pickle file...\n"
]
],
[
[
"When you ran the code cell above, the data loader was stored in the variable `data_loader`. \n\nYou can access the corresponding dataset as `data_loader.dataset`. This dataset is an instance of the `CoCoDataset` class in **data_loader.py**. If you are unfamiliar with data loaders and datasets, you are encouraged to review [this PyTorch tutorial](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html).\n\n### Exploring the `__getitem__` Method\n\nThe `__getitem__` method in the `CoCoDataset` class determines how an image-caption pair is pre-processed before being incorporated into a batch. This is true for all `Dataset` classes in PyTorch; if this is unfamiliar to you, please review [the tutorial linked above](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html). \n\nWhen the data loader is in training mode, this method begins by first obtaining the filename (`path`) of a training image and its corresponding caption (`caption`).\n\n#### Image Pre-Processing \n\nImage pre-processing is relatively straightforward (from the `__getitem__` method in the `CoCoDataset` class):\n```python\n# Convert image to tensor and pre-process using transform\nimage = Image.open(os.path.join(self.img_folder, path)).convert('RGB')\nimage = self.transform(image)\n```\nAfter loading the image in the training folder with name `path`, the image is pre-processed using the same transform (`transform_train`) that was supplied when instantiating the data loader. \n\n#### Caption Pre-Processing \n\nThe captions also need to be pre-processed and prepped for training. In this example, for generating captions, we are aiming to create a model that predicts the next token of a sentence from previous tokens, so we turn the caption associated with any image into a list of tokenized words, before casting it to a PyTorch tensor that we can use to train the network.\n\nTo understand in more detail how COCO captions are pre-processed, we'll first need to take a look at the `vocab` instance variable of the `CoCoDataset` class. The code snippet below is pulled from the `__init__` method of the `CoCoDataset` class:\n```python\ndef __init__(self, transform, mode, batch_size, vocab_threshold, vocab_file, start_word, \n end_word, unk_word, annotations_file, vocab_from_file, img_folder):\n ...\n self.vocab = Vocabulary(vocab_threshold, vocab_file, start_word,\n end_word, unk_word, annotations_file, vocab_from_file)\n ...\n```\nFrom the code snippet above, you can see that `data_loader.dataset.vocab` is an instance of the `Vocabulary` class from **vocabulary.py**. Take the time now to verify this for yourself by looking at the full code in **data_loader.py**. \n\nWe use this instance to pre-process the COCO captions (from the `__getitem__` method in the `CoCoDataset` class):\n\n```python\n# Convert caption to tensor of word ids.\ntokens = nltk.tokenize.word_tokenize(str(caption).lower()) # line 1\ncaption = [] # line 2\ncaption.append(self.vocab(self.vocab.start_word)) # line 3\ncaption.extend([self.vocab(token) for token in tokens]) # line 4\ncaption.append(self.vocab(self.vocab.end_word)) # line 5\ncaption = torch.Tensor(caption).long() # line 6\n```\n\nAs you will see soon, this code converts any string-valued caption to a list of integers, before casting it to a PyTorch tensor. To see how this code works, we'll apply it to the sample caption in the next code cell.",
"_____no_output_____"
]
],
[
[
"sample_caption = 'A person doing a trick on a rail while riding a skateboard.'",
"_____no_output_____"
]
],
[
[
"In **`line 1`** of the code snippet, every letter in the caption is converted to lowercase, and the [`nltk.tokenize.word_tokenize`](http://www.nltk.org/) function is used to obtain a list of string-valued tokens. Run the next code cell to visualize the effect on `sample_caption`.",
"_____no_output_____"
]
],
[
[
"import nltk\n\nsample_tokens = nltk.tokenize.word_tokenize(str(sample_caption).lower())\nprint(sample_tokens)",
"['a', 'person', 'doing', 'a', 'trick', 'on', 'a', 'rail', 'while', 'riding', 'a', 'skateboard', '.']\n"
]
],
[
[
"In **`line 2`** and **`line 3`** we initialize an empty list and append an integer to mark the start of a caption. The [paper](https://arxiv.org/pdf/1411.4555.pdf) that you are encouraged to implement uses a special start word (and a special end word, which we'll examine below) to mark the beginning (and end) of a caption.\n\nThis special start word (`\"<start>\"`) is decided when instantiating the data loader and is passed as a parameter (`start_word`). You are **required** to keep this parameter at its default value (`start_word=\"<start>\"`).\n\nAs you will see below, the integer `0` is always used to mark the start of a caption.",
"_____no_output_____"
]
],
[
[
"sample_caption = []\n\nstart_word = data_loader.dataset.vocab.start_word\nprint('Special start word:', start_word)\nsample_caption.append(data_loader.dataset.vocab(start_word))\nprint(sample_caption)",
"Special start word: <start>\n[0]\n"
]
],
[
[
"In **`line 4`**, we continue the list by adding integers that correspond to each of the tokens in the caption.",
"_____no_output_____"
]
],
[
[
"sample_caption.extend([data_loader.dataset.vocab(token) for token in sample_tokens])\nprint(sample_caption)",
"[0, 3, 98, 754, 3, 396, 39, 3, 1009, 207, 139, 3, 753, 18]\n"
]
],
[
[
"In **`line 5`**, we append a final integer to mark the end of the caption. \n\nIdentical to the case of the special start word (above), the special end word (`\"<end>\"`) is decided when instantiating the data loader and is passed as a parameter (`end_word`). You are **required** to keep this parameter at its default value (`end_word=\"<end>\"`).\n\nAs you will see below, the integer `1` is always used to mark the end of a caption.",
"_____no_output_____"
]
],
[
[
"end_word = data_loader.dataset.vocab.end_word\nprint('Special end word:', end_word)\n\nsample_caption.append(data_loader.dataset.vocab(end_word))\nprint(sample_caption)",
"Special end word: <end>\n[0, 3, 98, 754, 3, 396, 39, 3, 1009, 207, 139, 3, 753, 18, 1]\n"
]
],
[
[
"Finally, in **`line 6`**, we convert the list of integers to a PyTorch tensor and cast it to [long type](http://pytorch.org/docs/master/tensors.html#torch.Tensor.long). You can read more about the different types of PyTorch tensors on the [website](http://pytorch.org/docs/master/tensors.html).",
"_____no_output_____"
]
],
[
[
"import torch\n\nsample_caption = torch.Tensor(sample_caption).long()\nprint(sample_caption)",
"tensor([ 0, 3, 98, 754, 3, 396, 39, 3, 1009,\n 207, 139, 3, 753, 18, 1])\n"
]
],
[
[
"And that's it! In summary, any caption is converted to a list of tokens, with _special_ start and end tokens marking the beginning and end of the sentence:\n```\n[<start>, 'a', 'person', 'doing', 'a', 'trick', 'while', 'riding', 'a', 'skateboard', '.', <end>]\n```\nThis list of tokens is then turned into a list of integers, where every distinct word in the vocabulary has an associated integer value:\n```\n[0, 3, 98, 754, 3, 396, 207, 139, 3, 753, 18, 1]\n```\nFinally, this list is converted to a PyTorch tensor. All of the captions in the COCO dataset are pre-processed using this same procedure from **`lines 1-6`** described above. \n\nAs you saw, in order to convert a token to its corresponding integer, we call `data_loader.dataset.vocab` as a function. The details of how this call works can be explored in the `__call__` method in the `Vocabulary` class in **vocabulary.py**. \n\n```python\ndef __call__(self, word):\n if not word in self.word2idx:\n return self.word2idx[self.unk_word]\n return self.word2idx[word]\n```\n\nThe `word2idx` instance variable is a Python [dictionary](https://docs.python.org/3/tutorial/datastructures.html#dictionaries) that is indexed by string-valued keys (mostly tokens obtained from training captions). For each key, the corresponding value is the integer that the token is mapped to in the pre-processing step.\n\nUse the code cell below to view a subset of this dictionary.",
"_____no_output_____"
]
],
[
[
"# Preview the word2idx dictionary.\ndict(list(data_loader.dataset.vocab.word2idx.items())[:10])",
"_____no_output_____"
]
],
[
[
"We also print the total number of keys.",
"_____no_output_____"
]
],
[
[
"# Print the total number of keys in the word2idx dictionary.\nprint('Total number of tokens in vocabulary:', len(data_loader.dataset.vocab))",
"Total number of tokens in vocabulary: 8855\n"
]
],
[
[
"As you will see if you examine the code in **vocabulary.py**, the `word2idx` dictionary is created by looping over the captions in the training dataset. If a token appears no less than `vocab_threshold` times in the training set, then it is added as a key to the dictionary and assigned a corresponding unique integer. You will have the option later to amend the `vocab_threshold` argument when instantiating your data loader. Note that in general, **smaller** values for `vocab_threshold` yield a **larger** number of tokens in the vocabulary. You are encouraged to check this for yourself in the next code cell by decreasing the value of `vocab_threshold` before creating a new data loader. ",
"_____no_output_____"
]
],
[
[
"# Modify the minimum word count threshold.\nvocab_threshold = 4\n\n# Obtain the data loader.\ndata_loader = get_loader(transform=transform_train,\n mode='train',\n batch_size=batch_size,\n vocab_threshold=vocab_threshold,\n vocab_from_file=False)",
"_____no_output_____"
],
[
"# Print the total number of keys in the word2idx dictionary.\nprint('Total number of tokens in vocabulary:', len(data_loader.dataset.vocab))",
"_____no_output_____"
]
],
[
[
"There are also a few special keys in the `word2idx` dictionary. You are already familiar with the special start word (`\"<start>\"`) and special end word (`\"<end>\"`). There is one more special token, corresponding to unknown words (`\"<unk>\"`). All tokens that don't appear anywhere in the `word2idx` dictionary are considered unknown words. In the pre-processing step, any unknown tokens are mapped to the integer `2`.",
"_____no_output_____"
]
],
[
[
"unk_word = data_loader.dataset.vocab.unk_word\nprint('Special unknown word:', unk_word)\n\nprint('All unknown words are mapped to this integer:', data_loader.dataset.vocab(unk_word))",
"_____no_output_____"
]
],
[
[
"Check this for yourself below, by pre-processing the provided nonsense words that never appear in the training captions. ",
"_____no_output_____"
]
],
[
[
"print(data_loader.dataset.vocab('jfkafejw'))\nprint(data_loader.dataset.vocab('ieowoqjf'))",
"2\n2\n"
]
],
[
[
"The final thing to mention is the `vocab_from_file` argument that is supplied when creating a data loader. To understand this argument, note that when you create a new data loader, the vocabulary (`data_loader.dataset.vocab`) is saved as a [pickle](https://docs.python.org/3/library/pickle.html) file in the project folder, with filename `vocab.pkl`.\n\nIf you are still tweaking the value of the `vocab_threshold` argument, you **must** set `vocab_from_file=False` to have your changes take effect. \n\nBut once you are happy with the value that you have chosen for the `vocab_threshold` argument, you need only run the data loader *one more time* with your chosen `vocab_threshold` to save the new vocabulary to file. Then, you can henceforth set `vocab_from_file=True` to load the vocabulary from file and speed the instantiation of the data loader. Note that building the vocabulary from scratch is the most time-consuming part of instantiating the data loader, and so you are strongly encouraged to set `vocab_from_file=True` as soon as you are able.\n\nNote that if `vocab_from_file=True`, then any supplied argument for `vocab_threshold` when instantiating the data loader is completely ignored.",
"_____no_output_____"
]
],
[
[
"# Obtain the data loader (from file). Note that it runs much faster than before!\ndata_loader = get_loader(transform=transform_train,\n mode='train',\n batch_size=batch_size,\n vocab_from_file=True)",
"Vocabulary successfully loaded from vocab.pkl file!\nloading annotations into memory...\n"
]
],
[
[
"In the next section, you will learn how to use the data loader to obtain batches of training data.",
"_____no_output_____"
],
[
"<a id='step2'></a>\n## Step 2: Use the Data Loader to Obtain Batches\n\nThe captions in the dataset vary greatly in length. You can see this by examining `data_loader.dataset.caption_lengths`, a Python list with one entry for each training caption (where the value stores the length of the corresponding caption). \n\nIn the code cell below, we use this list to print the total number of captions in the training data with each length. As you will see below, the majority of captions have length 10. Likewise, very short and very long captions are quite rare. ",
"_____no_output_____"
]
],
[
[
"from collections import Counter\n\n# Tally the total number of training captions with each length.\ncounter = Counter(data_loader.dataset.caption_lengths)\nlengths = sorted(counter.items(), key=lambda pair: pair[1], reverse=True)\nfor value, count in lengths:\n print('value: %2d --- count: %5d' % (value, count))",
"value: 10 --- count: 86334\nvalue: 11 --- count: 79948\nvalue: 9 --- count: 71934\nvalue: 12 --- count: 57637\nvalue: 13 --- count: 37645\nvalue: 14 --- count: 22335\nvalue: 8 --- count: 20771\nvalue: 15 --- count: 12841\nvalue: 16 --- count: 7729\nvalue: 17 --- count: 4842\nvalue: 18 --- count: 3104\nvalue: 19 --- count: 2014\nvalue: 7 --- count: 1597\nvalue: 20 --- count: 1451\nvalue: 21 --- count: 999\nvalue: 22 --- count: 683\nvalue: 23 --- count: 534\nvalue: 24 --- count: 383\nvalue: 25 --- count: 277\nvalue: 26 --- count: 215\nvalue: 27 --- count: 159\nvalue: 28 --- count: 115\nvalue: 29 --- count: 86\nvalue: 30 --- count: 58\nvalue: 31 --- count: 49\nvalue: 32 --- count: 44\nvalue: 34 --- count: 39\nvalue: 37 --- count: 32\nvalue: 33 --- count: 31\nvalue: 35 --- count: 31\nvalue: 36 --- count: 26\nvalue: 38 --- count: 18\nvalue: 39 --- count: 18\nvalue: 43 --- count: 16\nvalue: 44 --- count: 16\nvalue: 48 --- count: 12\nvalue: 45 --- count: 11\nvalue: 42 --- count: 10\nvalue: 40 --- count: 9\nvalue: 49 --- count: 9\nvalue: 46 --- count: 9\nvalue: 47 --- count: 7\nvalue: 50 --- count: 6\nvalue: 51 --- count: 6\nvalue: 41 --- count: 6\nvalue: 52 --- count: 5\nvalue: 54 --- count: 3\nvalue: 56 --- count: 2\nvalue: 6 --- count: 2\nvalue: 53 --- count: 2\nvalue: 55 --- count: 2\nvalue: 57 --- count: 1\n"
]
],
[
[
"To generate batches of training data, we begin by first sampling a caption length (where the probability that any length is drawn is proportional to the number of captions with that length in the dataset). Then, we retrieve a batch of size `batch_size` of image-caption pairs, where all captions have the sampled length. This approach for assembling batches matches the procedure in [this paper](https://arxiv.org/pdf/1502.03044.pdf) and has been shown to be computationally efficient without degrading performance.\n\nRun the code cell below to generate a batch. The `get_train_indices` method in the `CoCoDataset` class first samples a caption length, and then samples `batch_size` indices corresponding to training data points with captions of that length. These indices are stored below in `indices`.\n\nThese indices are supplied to the data loader, which then is used to retrieve the corresponding data points. The pre-processed images and captions in the batch are stored in `images` and `captions`.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport torch.utils.data as data\n\n# Randomly sample a caption length, and sample indices with that length.\nindices = data_loader.dataset.get_train_indices()\nprint('sampled indices:', indices)\n\n# Create and assign a batch sampler to retrieve a batch with the sampled indices.\nnew_sampler = data.sampler.SubsetRandomSampler(indices=indices)\ndata_loader.batch_sampler.sampler = new_sampler\n \n# Obtain the batch.\nimages, captions = next(iter(data_loader))\n \nprint('images.shape:', images.shape)\nprint('captions.shape:', captions.shape)\n\n# (Optional) Uncomment the lines of code below to print the pre-processed images and captions.\n# print('images:', images)\n# print('captions:', captions)",
"sampled indices: [272743, 306880, 9936, 199594, 237612, 138331, 53206, 237082, 357523, 227778]\nimages.shape: torch.Size([10, 3, 224, 224])\ncaptions.shape: torch.Size([10, 11])\n"
]
],
[
[
"Each time you run the code cell above, a different caption length is sampled, and a different batch of training data is returned. Run the code cell multiple times to check this out!\n\nYou will train your model in the next notebook in this sequence (**2_Training.ipynb**). This code for generating training batches will be provided to you.\n\n> Before moving to the next notebook in the sequence (**2_Training.ipynb**), you are strongly encouraged to take the time to become very familiar with the code in **data_loader.py** and **vocabulary.py**. **Step 1** and **Step 2** of this notebook are designed to help facilitate a basic introduction and guide your understanding. However, our description is not exhaustive, and it is up to you (as part of the project) to learn how to best utilize these files to complete the project. __You should NOT amend any of the code in either *data_loader.py* or *vocabulary.py*.__\n\nIn the next steps, we focus on learning how to specify a CNN-RNN architecture in PyTorch, towards the goal of image captioning.",
"_____no_output_____"
],
[
"<a id='step3'></a>\n## Step 3: Experiment with the CNN Encoder\n\nRun the code cell below to import `EncoderCNN` and `DecoderRNN` from **model.py**. ",
"_____no_output_____"
]
],
[
[
"# Watch for any changes in model.py, and re-load it automatically.\n% load_ext autoreload\n% autoreload 2\n\n# Import EncoderCNN and DecoderRNN. \nimport torch\nfrom model import EncoderCNN, DecoderRNN\n",
"_____no_output_____"
]
],
[
[
"In the next code cell we define a `device` that you will use move PyTorch tensors to GPU (if CUDA is available). Run this code cell before continuing.",
"_____no_output_____"
]
],
[
[
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n# device = 'cpu'",
"_____no_output_____"
]
],
[
[
"Run the code cell below to instantiate the CNN encoder in `encoder`. \n\nThe pre-processed images from the batch in **Step 2** of this notebook are then passed through the encoder, and the output is stored in `features`.",
"_____no_output_____"
]
],
[
[
"# Specify the dimensionality of the image embedding.\nembed_size = 256\n\n#-#-#-# Do NOT modify the code below this line. #-#-#-#\n\n# Initialize the encoder. (Optional: Add additional arguments if necessary.)\nencoder = EncoderCNN(embed_size)\n\n# Move the encoder to GPU if CUDA is available.\nencoder.to(device)\n \n# Move last batch of images (from Step 2) to GPU if CUDA is available. \nimages = images.to(device)\n\n# Pass the images through the encoder.\nfeatures = encoder(images)\n\nprint('type(features):', type(features))\nprint('features.shape:', features.shape)\n\n# Check that your encoder satisfies some requirements of the project! :D\nassert type(features)==torch.Tensor, \"Encoder output needs to be a PyTorch Tensor.\" \nassert (features.shape[0]==batch_size) & (features.shape[1]==embed_size), \"The shape of the encoder output is incorrect.\"",
"Downloading: \"https://download.pytorch.org/models/resnet50-19c8e357.pth\" to /root/.torch/models/resnet50-19c8e357.pth\n100%|██████████| 102502400/102502400 [00:02<00:00, 46237700.01it/s]\n"
]
],
[
[
"The encoder that we provide to you uses the pre-trained ResNet-50 architecture (with the final fully-connected layer removed) to extract features from a batch of pre-processed images. The output is then flattened to a vector, before being passed through a `Linear` layer to transform the feature vector to have the same size as the word embedding.\n\n\n\nYou are welcome (and encouraged) to amend the encoder in **model.py**, to experiment with other architectures. In particular, consider using a [different pre-trained model architecture](http://pytorch.org/docs/master/torchvision/models.html). You may also like to [add batch normalization](http://pytorch.org/docs/master/nn.html#normalization-layers). \n\n> You are **not** required to change anything about the encoder.\n\nFor this project, you **must** incorporate a pre-trained CNN into your encoder. Your `EncoderCNN` class must take `embed_size` as an input argument, which will also correspond to the dimensionality of the input to the RNN decoder that you will implement in Step 4. When you train your model in the next notebook in this sequence (**2_Training.ipynb**), you are welcome to tweak the value of `embed_size`.\n\nIf you decide to modify the `EncoderCNN` class, save **model.py** and re-execute the code cell above. If the code cell returns an assertion error, then please follow the instructions to modify your code before proceeding. The assert statements ensure that `features` is a PyTorch tensor with shape `[batch_size, embed_size]`.",
"_____no_output_____"
],
[
"<a id='step4'></a>\n## Step 4: Implement the RNN Decoder\n\nBefore executing the next code cell, you must write `__init__` and `forward` methods in the `DecoderRNN` class in **model.py**. (Do **not** write the `sample` method yet - you will work with this method when you reach **3_Inference.ipynb**.)\n\n> The `__init__` and `forward` methods in the `DecoderRNN` class are the only things that you **need** to modify as part of this notebook. You will write more implementations in the notebooks that appear later in the sequence.\n\nYour decoder will be an instance of the `DecoderRNN` class and must accept as input:\n- the PyTorch tensor `features` containing the embedded image features (outputted in Step 3, when the last batch of images from Step 2 was passed through `encoder`), along with\n- a PyTorch tensor corresponding to the last batch of captions (`captions`) from Step 2.\n\nNote that the way we have written the data loader should simplify your code a bit. In particular, every training batch will contain pre-processed captions where all have the same length (`captions.shape[1]`), so **you do not need to worry about padding**. \n> While you are encouraged to implement the decoder described in [this paper](https://arxiv.org/pdf/1411.4555.pdf), you are welcome to implement any architecture of your choosing, as long as it uses at least one RNN layer, with hidden dimension `hidden_size`. \n\nAlthough you will test the decoder using the last batch that is currently stored in the notebook, your decoder should be written to accept an arbitrary batch (of embedded image features and pre-processed captions [where all captions have the same length]) as input. \n\n\n\nIn the code cell below, `outputs` should be a PyTorch tensor with size `[batch_size, captions.shape[1], vocab_size]`. Your output should be designed such that `outputs[i,j,k]` contains the model's predicted score, indicating how likely the `j`-th token in the `i`-th caption in the batch is the `k`-th token in the vocabulary. In the next notebook of the sequence (**2_Training.ipynb**), we provide code to supply these scores to the [`torch.nn.CrossEntropyLoss`](http://pytorch.org/docs/master/nn.html#torch.nn.CrossEntropyLoss) optimizer in PyTorch.",
"_____no_output_____"
]
],
[
[
"# Specify the number of features in the hidden state of the RNN decoder.\nhidden_size = 512\n\n#-#-#-# Do NOT modify the code below this line. #-#-#-#\n\n# Store the size of the vocabulary.\nvocab_size = len(data_loader.dataset.vocab)\n\n# Initialize the decoder.\ndecoder = DecoderRNN(embed_size, hidden_size, vocab_size)\n\n# Move the decoder to GPU if CUDA is available.\ndecoder.to(device)\n \n# Move last batch of captions (from Step 1) to GPU if CUDA is available \ncaptions = captions.to(device)\n\n# Pass the encoder output and captions through the decoder.\noutputs = decoder(features, captions)\n\nprint('type(outputs):', type(outputs))\nprint('outputs.shape:', outputs.shape)\n\n# Check that your decoder satisfies some requirements of the project! :D\nassert type(outputs)==torch.Tensor, \"Decoder output needs to be a PyTorch Tensor.\"\nassert (outputs.shape[0]==batch_size) & (outputs.shape[1]==captions.shape[1]) & (outputs.shape[2]==vocab_size), \"The shape of the decoder output is incorrect.\"",
"type(outputs): <class 'torch.Tensor'>\noutputs.shape: torch.Size([10, 11, 8855])\n"
]
],
[
[
"When you train your model in the next notebook in this sequence (**2_Training.ipynb**), you are welcome to tweak the value of `hidden_size`.",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
]
|
ec6553d8881589036125ec7718b0b1c228a0d9f4 | 55,913 | ipynb | Jupyter Notebook | Lecture01 - Theory.ipynb | hoesung/intel-image-classification | a5105f910da604579baa08e24ccb74575220d476 | [
"MIT"
]
| 2 | 2019-11-26T15:19:19.000Z | 2020-08-27T00:00:33.000Z | Lecture01 - Theory.ipynb | minsuk-sung/intel-image-classification | bae550731bbbd67e7df93b0361f29ebcfe033f00 | [
"MIT"
]
| 2 | 2019-11-26T15:57:09.000Z | 2020-01-28T23:07:36.000Z | Lecture01 - Theory.ipynb | minsuk-sung/intel-image-classification | bae550731bbbd67e7df93b0361f29ebcfe033f00 | [
"MIT"
]
| 4 | 2019-11-26T15:03:28.000Z | 2020-08-29T07:19:10.000Z | 67.773333 | 11,915 | 0.67825 | [
[
[
"<img src='https://www.anadronestarting.com/wp-content/uploads/intel-main_opt.png' width=50%>\n\n# 모바일넷을 이용한 이미지분류\n<font size=5><b>(Image Classification using Mobilenet)<b></font>\n\n<div align='right'>성 민 석<br>(Minsuk Sung)</div>\n\n<img src='https://chaosmail.github.io/images/deep-learning/classification.png' width=60%>\n\n---",
"_____no_output_____"
],
[
"<h1>강의목차<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#이미지-분류(Image-Classification)\" data-toc-modified-id=\"이미지-분류(Image-Classification)-1\"><span class=\"toc-item-num\">1 </span>이미지 분류(Image Classification)</a></span></li><li><span><a href=\"#딥러닝-프레임워크(Deep-Learning-Framework)\" data-toc-modified-id=\"딥러닝-프레임워크(Deep-Learning-Framework)-2\"><span class=\"toc-item-num\">2 </span>딥러닝 프레임워크(Deep Learning Framework)</a></span><ul class=\"toc-item\"><li><span><a href=\"#텐서플로우(Tensorflow)\" data-toc-modified-id=\"텐서플로우(Tensorflow)-2.1\"><span class=\"toc-item-num\">2.1 </span>텐서플로우(Tensorflow)</a></span></li><li><span><a href=\"#Keras-:-Tensorflow와-손을-잡다\" data-toc-modified-id=\"Keras-:-Tensorflow와-손을-잡다-2.2\"><span class=\"toc-item-num\">2.2 </span>Keras : Tensorflow와 손을 잡다</a></span><ul class=\"toc-item\"><li><span><a href=\"#왜-Keras일까요?\" data-toc-modified-id=\"왜-Keras일까요?-2.2.1\"><span class=\"toc-item-num\">2.2.1 </span>왜 Keras일까요?</a></span><ul class=\"toc-item\"><li><span><a href=\"#Keras는-사용자-친화적입니다\" data-toc-modified-id=\"Keras는-사용자-친화적입니다-2.2.1.1\"><span class=\"toc-item-num\">2.2.1.1 </span>Keras는 사용자 친화적입니다</a></span></li><li><span><a href=\"#Keras는-업계와-학계-양쪽에서-모두-폭넓게-사용되고-있습니다\" data-toc-modified-id=\"Keras는-업계와-학계-양쪽에서-모두-폭넓게-사용되고-있습니다-2.2.1.2\"><span class=\"toc-item-num\">2.2.1.2 </span>Keras는 업계와 학계 양쪽에서 모두 폭넓게 사용되고 있습니다</a></span></li><li><span><a href=\"#Keras는-모델의-제품화를-쉽게-해줍니다\" data-toc-modified-id=\"Keras는-모델의-제품화를-쉽게-해줍니다-2.2.1.3\"><span class=\"toc-item-num\">2.2.1.3 </span>Keras는 모델의 제품화를 쉽게 해줍니다</a></span></li><li><span><a href=\"#Keras는-여러-백엔드-엔진을-지원하여-하나의-생태계에-속박되지-않습니다\" data-toc-modified-id=\"Keras는-여러-백엔드-엔진을-지원하여-하나의-생태계에-속박되지-않습니다-2.2.1.4\"><span class=\"toc-item-num\">2.2.1.4 </span>Keras는 여러 백엔드 엔진을 지원하여 하나의 생태계에 속박되지 않습니다</a></span></li><li><span><a href=\"#Keras는-다중-GPU와-학습의-분산처리를-지원합니다\" data-toc-modified-id=\"Keras는-다중-GPU와-학습의-분산처리를-지원합니다-2.2.1.5\"><span class=\"toc-item-num\">2.2.1.5 </span>Keras는 다중 GPU와 학습의 분산처리를 지원합니다</a></span></li><li><span><a href=\"#Keras의-개발은-딥러닝-생태계의-주요-기업들의-지원을-받습니다\" data-toc-modified-id=\"Keras의-개발은-딥러닝-생태계의-주요-기업들의-지원을-받습니다-2.2.1.6\"><span class=\"toc-item-num\">2.2.1.6 </span>Keras의 개발은 딥러닝 생태계의 주요 기업들의 지원을 받습니다</a></span></li></ul></li></ul></li><li><span><a href=\"#PyTorch\" data-toc-modified-id=\"PyTorch-2.3\"><span class=\"toc-item-num\">2.3 </span>PyTorch</a></span></li><li><span><a href=\"#Caffe\" data-toc-modified-id=\"Caffe-2.4\"><span class=\"toc-item-num\">2.4 </span>Caffe</a></span><ul class=\"toc-item\"><li><span><a href=\"#Caffe란?\" data-toc-modified-id=\"Caffe란?-2.4.1\"><span class=\"toc-item-num\">2.4.1 </span>Caffe란?</a></span></li><li><span><a href=\"#왜-Caffe일까요?\" data-toc-modified-id=\"왜-Caffe일까요?-2.4.2\"><span class=\"toc-item-num\">2.4.2 </span>왜 Caffe일까요?</a></span></li></ul></li></ul></li><li><span><a href=\"#수많은-레이어(Layer)\" data-toc-modified-id=\"수많은-레이어(Layer)-3\"><span class=\"toc-item-num\">3 </span>수많은 레이어(Layer)</a></span><ul class=\"toc-item\"><li><span><a href=\"#완전-연결-레이어(Fully-Connected-Layer)\" data-toc-modified-id=\"완전-연결-레이어(Fully-Connected-Layer)-3.1\"><span class=\"toc-item-num\">3.1 </span>완전 연결 레이어(Fully Connected Layer)</a></span></li><li><span><a href=\"#합성곱-레이어(Convolutional-Layer)\" data-toc-modified-id=\"합성곱-레이어(Convolutional-Layer)-3.2\"><span class=\"toc-item-num\">3.2 </span>합성곱 레이어(Convolutional Layer)</a></span></li><li><span><a href=\"#최대-풀링-레이어(Max-Pooling-Layer)\" data-toc-modified-id=\"최대-풀링-레이어(Max-Pooling-Layer)-3.3\"><span class=\"toc-item-num\">3.3 </span>최대 풀링 레이어(Max Pooling Layer)</a></span></li><li><span><a href=\"#활성화-함수(Activation-Function)\" data-toc-modified-id=\"활성화-함수(Activation-Function)-3.4\"><span class=\"toc-item-num\">3.4 </span>활성화 함수(Activation Function)</a></span></li></ul></li><li><span><a href=\"#다양한-신경망-네트워크(Neural-Network)\" data-toc-modified-id=\"다양한-신경망-네트워크(Neural-Network)-4\"><span class=\"toc-item-num\">4 </span>다양한 신경망 네트워크(Neural Network)</a></span><ul class=\"toc-item\"><li><span><a href=\"#합성곱-신경망(CNN,-Convolutional-Neural-Network)\" data-toc-modified-id=\"합성곱-신경망(CNN,-Convolutional-Neural-Network)-4.1\"><span class=\"toc-item-num\">4.1 </span>합성곱 신경망(CNN, Convolutional Neural Network)</a></span></li></ul></li><li><span><a href=\"#더-깊은-딥러닝-모델(Deep-Learning-Model)\" data-toc-modified-id=\"더-깊은-딥러닝-모델(Deep-Learning-Model)-5\"><span class=\"toc-item-num\">5 </span>더 깊은 딥러닝 모델(Deep Learning Model)</a></span><ul class=\"toc-item\"><li><span><a href=\"#AlexNet\" data-toc-modified-id=\"AlexNet-5.1\"><span class=\"toc-item-num\">5.1 </span>AlexNet</a></span></li><li><span><a href=\"#Inception-V3\" data-toc-modified-id=\"Inception-V3-5.2\"><span class=\"toc-item-num\">5.2 </span>Inception V3</a></span></li><li><span><a href=\"#VGG-16\" data-toc-modified-id=\"VGG-16-5.3\"><span class=\"toc-item-num\">5.3 </span>VGG-16</a></span></li><li><span><a href=\"#ResNet-50\" data-toc-modified-id=\"ResNet-50-5.4\"><span class=\"toc-item-num\">5.4 </span>ResNet-50</a></span></li></ul></li><li><span><a href=\"#MobileNet\" data-toc-modified-id=\"MobileNet-6\"><span class=\"toc-item-num\">6 </span>MobileNet</a></span><ul class=\"toc-item\"><li><span><a href=\"#딥러닝-경량화-기술의-동향\" data-toc-modified-id=\"딥러닝-경량화-기술의-동향-6.1\"><span class=\"toc-item-num\">6.1 </span>딥러닝 경량화 기술의 동향</a></span></li><li><span><a href=\"#MobileNet에-대해서\" data-toc-modified-id=\"MobileNet에-대해서-6.2\"><span class=\"toc-item-num\">6.2 </span>MobileNet에 대해서</a></span></li><li><span><a href=\"#Pipeline\" data-toc-modified-id=\"Pipeline-6.3\"><span class=\"toc-item-num\">6.3 </span>Pipeline</a></span></li><li><span><a href=\"#Standard-Convolution-VS-Depthwise-Separable-Convolution\" data-toc-modified-id=\"Standard-Convolution-VS-Depthwise-Separable-Convolution-6.4\"><span class=\"toc-item-num\">6.4 </span>Standard Convolution VS Depthwise Separable Convolution</a></span><ul class=\"toc-item\"><li><span><a href=\"#Standard-Convolution\" data-toc-modified-id=\"Standard-Convolution-6.4.1\"><span class=\"toc-item-num\">6.4.1 </span>Standard Convolution</a></span></li><li><span><a href=\"#Depthwise-Separable-Convolution\" data-toc-modified-id=\"Depthwise-Separable-Convolution-6.4.2\"><span class=\"toc-item-num\">6.4.2 </span>Depthwise Separable Convolution</a></span><ul class=\"toc-item\"><li><span><a href=\"#Depthwise-Convolution\" data-toc-modified-id=\"Depthwise-Convolution-6.4.2.1\"><span class=\"toc-item-num\">6.4.2.1 </span>Depthwise Convolution</a></span></li><li><span><a href=\"#Pointwise-Convolution\" data-toc-modified-id=\"Pointwise-Convolution-6.4.2.2\"><span class=\"toc-item-num\">6.4.2.2 </span>Pointwise Convolution</a></span></li></ul></li></ul></li><li><span><a href=\"#효율성\" data-toc-modified-id=\"효율성-6.5\"><span class=\"toc-item-num\">6.5 </span>효율성</a></span></li></ul></li><li><span><a href=\"#MobileNetV2\" data-toc-modified-id=\"MobileNetV2-7\"><span class=\"toc-item-num\">7 </span>MobileNetV2</a></span><ul class=\"toc-item\"><li><span><a href=\"#Pipeline\" data-toc-modified-id=\"Pipeline-7.1\"><span class=\"toc-item-num\">7.1 </span>Pipeline</a></span></li><li><span><a href=\"#Linear-Bottlenecks\" data-toc-modified-id=\"Linear-Bottlenecks-7.2\"><span class=\"toc-item-num\">7.2 </span>Linear Bottlenecks</a></span><ul class=\"toc-item\"><li><span><a href=\"#Two-assumptions-(non-linear-function-을-똑같이-사용-할-수-있다-)\" data-toc-modified-id=\"Two-assumptions-(non-linear-function-을-똑같이-사용-할-수-있다-)-7.2.1\"><span class=\"toc-item-num\">7.2.1 </span>Two assumptions (non-linear function 을 똑같이 사용 할 수 있다 )</a></span></li><li><span><a href=\"#In-experiments\" data-toc-modified-id=\"In-experiments-7.2.2\"><span class=\"toc-item-num\">7.2.2 </span>In experiments</a></span></li></ul></li><li><span><a href=\"#Inverted-Residuals\" data-toc-modified-id=\"Inverted-Residuals-7.3\"><span class=\"toc-item-num\">7.3 </span>Inverted Residuals</a></span><ul class=\"toc-item\"><li><span><a href=\"#Residual-block-VS-Inverted-residual-block\" data-toc-modified-id=\"Residual-block-VS-Inverted-residual-block-7.3.1\"><span class=\"toc-item-num\">7.3.1 </span>Residual block VS Inverted residual block</a></span></li><li><span><a href=\"#확장(Expansion)이-필요한-이유는?\" data-toc-modified-id=\"확장(Expansion)이-필요한-이유는?-7.3.2\"><span class=\"toc-item-num\">7.3.2 </span>확장(Expansion)이 필요한 이유는?</a></span></li><li><span><a href=\"#Structure-of-inverted-residual-block\" data-toc-modified-id=\"Structure-of-inverted-residual-block-7.3.3\"><span class=\"toc-item-num\">7.3.3 </span>Structure of inverted residual block</a></span></li></ul></li><li><span><a href=\"#Memory-Efficient-Inference\" data-toc-modified-id=\"Memory-Efficient-Inference-7.4\"><span class=\"toc-item-num\">7.4 </span>Memory Efficient Inference</a></span><ul class=\"toc-item\"><li><span><a href=\"#Bottleneck-Residual-Block\" data-toc-modified-id=\"Bottleneck-Residual-Block-7.4.1\"><span class=\"toc-item-num\">7.4.1 </span>Bottleneck Residual Block</a></span></li><li><span><a href=\"#Compute-graph-G\" data-toc-modified-id=\"Compute-graph-G-7.4.2\"><span class=\"toc-item-num\">7.4.2 </span>Compute graph G</a></span></li><li><span><a href=\"#t-way-split\" data-toc-modified-id=\"t-way-split-7.4.3\"><span class=\"toc-item-num\">7.4.3 </span>t-way split</a></span></li><li><span><a href=\"#성능\" data-toc-modified-id=\"성능-7.4.4\"><span class=\"toc-item-num\">7.4.4 </span>성능</a></span></li></ul></li></ul></li><li><span><a href=\"#여러-CNN-모델-파라미터-및-성능-비교\" data-toc-modified-id=\"여러-CNN-모델-파라미터-및-성능-비교-8\"><span class=\"toc-item-num\">8 </span>여러 CNN 모델 파라미터 및 성능 비교</a></span></li><li><span><a href=\"#Keras에서-제공하는-모델들\" data-toc-modified-id=\"Keras에서-제공하는-모델들-9\"><span class=\"toc-item-num\">9 </span>Keras에서 제공하는 모델들</a></span><ul class=\"toc-item\"><li><span><a href=\"#ImageNet으로-학습한-가중치를-이용해-이미지-분류를-수행하는-모델:\" data-toc-modified-id=\"ImageNet으로-학습한-가중치를-이용해-이미지-분류를-수행하는-모델:-9.1\"><span class=\"toc-item-num\">9.1 </span>ImageNet으로 학습한 가중치를 이용해 이미지 분류를 수행하는 모델:</a></span></li><li><span><a href=\"#예시\" data-toc-modified-id=\"예시-9.2\"><span class=\"toc-item-num\">9.2 </span>예시</a></span></li></ul></li><li><span><a href=\"#Appendix\" data-toc-modified-id=\"Appendix-10\"><span class=\"toc-item-num\">10 </span>Appendix</a></span><ul class=\"toc-item\"><li><span><a href=\"#Standard-Convolution\" data-toc-modified-id=\"Standard-Convolution-10.1\"><span class=\"toc-item-num\">10.1 </span>Standard Convolution</a></span><ul class=\"toc-item\"><li><span><a href=\"#디지털-이미지\" data-toc-modified-id=\"디지털-이미지-10.1.1\"><span class=\"toc-item-num\">10.1.1 </span>디지털 이미지</a></span></li><li><span><a href=\"#Convolution-Neural-Network\" data-toc-modified-id=\"Convolution-Neural-Network-10.1.2\"><span class=\"toc-item-num\">10.1.2 </span>Convolution Neural Network</a></span></li></ul></li><li><span><a href=\"#Process-of-Convolution\" data-toc-modified-id=\"Process-of-Convolution-10.2\"><span class=\"toc-item-num\">10.2 </span>Process of Convolution</a></span></li><li><span><a href=\"#ReLu6\" data-toc-modified-id=\"ReLu6-10.3\"><span class=\"toc-item-num\">10.3 </span>ReLu6</a></span></li></ul></li><li><span><a href=\"#참고\" data-toc-modified-id=\"참고-11\"><span class=\"toc-item-num\">11 </span>참고</a></span></li></ul></div>",
"_____no_output_____"
],
[
"---\n\n## 이미지 분류(Image Classification)\n\n이미지 분류(Image Classification)는 말 그대로 수많은 이미지들을 분류하는 작업입니다. 예를 들면, 수많은 개나 고양이와 같은 이미지를 학습시켜서 처음 보는 사진 속 동물이 개 혹은 고양이인지를 분류할 수 있습니다. 물론, 과거에도 이미지를 분류하려는 수많은 시도가 있었지만, 이미지 분류 문제는 딥러닝(Deep Learning)의 등장을 전후로 크게 발전했습니다.\n\n<img src='https://storage.googleapis.com/kaggle-organizations/768/thumbnail.png?r=587'>\n\n2012년 캐나다 토론토대학의 알렉스 크리제브스키(Alex Khrizevsky)가 이미지넷(ImageNet)이라 불리는 이미지 인식 경진 대회에서 GPU를 활용한 앞서 언급했던 딥러닝을 이용하여 정확도를 획기적으로 높인 논문을 발표하면서 또 한번의 전환점을 맞이합니다. 이 때 등장한 네트워크가 바로 알렉스넷(AlexNet)입니다. 알렉스넷은 추후 다시 간략하게 설명하겠습니다.\n\n",
"_____no_output_____"
],
[
"---\n\n## 딥러닝 프레임워크(Deep Learning Framework)\n\n<br>\n<img src='https://cdn.pixabay.com/photo/2017/09/08/19/07/a-2729794_960_720.png' width=50%>\n\n 이미지 분류를 하기 위해서 딥러닝을 이용하면 훨씬 좋은 퍼포먼스를 발휘할 수 있습니다. 그럼 여기서 우리는 의문점이 하나 들게 됩니다.\n > 그럼 어떻게 딥러닝을 시작하면 좋을까요?\n \n 여러분이 만약 수학적인 기초가 튼튼하게 월등한 코딩 실력이 갖춰져있다면, 직접 밑바닥부터 하나하나 직접 구현하셔도 괜찮습니다. 하지만 이렇게 번거로운 방법은 시간이 너무 낭비가 되버립니다. 이미 모든 내용을 다 알고 있는 딥러닝 실력자들일지라도 그러한 것들을 다시 구현한다면, 그들이 원래 하고자하는 일을 할 시간이 줄어들기 때문이죠. 예를 들어, 여러분은 호텔의 수석 요리사라고 합시다. 남부럽지 않는 요리 실력과 고급 요리에 대한 레시피도 다 알고 있다고 가정합시다. 이러한 상황에서 굳이 주방기구를 만들 필요가 있을까요? 칼이나 후라이팬과 같은 요리기구까지 직접 만들어서 요리를 해야하나요? 물론 절대 그럴 필요없죠.\n \n> 마찬가지로 `딥러닝 프레임워크(Deep Learning Framework)`는 작업자가 딥러닝을 하기 위한 도구를 모두 모아놓은 도구세트라고 생각하시면 됩니다.\n \n 그래서 이미 수많은 회사에서 혹은 연구실에서 딥러닝 프레임워크를 개발하고 있습니다. 우리는 그들의 도움을 받아서 딥러닝을 이용만 하면 됩니다. 앞으로 소개하는 몇가지 프레임워크는 가장 많이 쓰이는 프레임워크들입니다. \n \n ",
"_____no_output_____"
],
[
"### 텐서플로우(Tensorflow)\n\n<img src='https://www.tensorflow.org/images/tf_logo_social.png' width=80%>\n\n> 텐서플로우(TensorFlow™)는 데이터 플로우 그래프(Data flow graph)를 사용하여 수치 연산을 하는 오픈소스 소프트웨어 라이브러리입니다. 그래프의 노드(Node)는 수치 연산을 나타내고 엣지(edge)는 노드 사이를 이동하는 다차원 데이터 배열(텐서,tensor)를 나타냅니다. 유연한 아키텍처로 구성되어 있어 코드 수정없이 데스크탑, 서버 혹은 모바일 디바이스에서 CPU나 GPU를 사용하여 연산을 구동시킬 수 있습니다. 텐서플로우는 원래 머신러닝과 딥 뉴럴 네트워크 연구를 목적으로 구글의 인공지능 연구 조직인 구글 브레인 팀의 연구자와 엔지니어들에 의해 개발되었습니다. 하지만 이 시스템은 여러 다른 분야에도 충분히 적용될 수 있습니다.\n\n출처 : 텐서플로우 공식홈페이지",
"_____no_output_____"
],
[
"---\n\n### Keras : Tensorflow와 손을 잡다\n\n\n\n>케라스는 \n많은 이들이 딥러닝을 쉽게 접할 수 있도록, \n다양한 플랫폼 위에서 딥러닝 모델을 만들 수 있는 \nAPI이다.\n\n#### 왜 Keras일까요?\n\n오늘날 존재하는 수많은 딥러닝 프레임워크들 중에서, 왜 굳이 Keras일까요? 다른 대안들에 비해 Keras를 선호하는 이유는 다음과 같습니다.\n\n---\n\n##### Keras는 사용자 친화적입니다\n \n- Keras는 기계가 아닌 사람을 위한 도구입니다. Keras는 [사용자의 부담을 덜기 위해](https://blog.keras.io/user-experience-design-for-apis.html) 일관되고 간결한 API를 제공하며, 일반적인 유스케이스에 필요한 사용자의 조작을 최소화 하고, 오작동에 대한 명확하고 실용적인 피드백을 제공합니다.\n\n- Keras의 이런 개발 철학 덕분에 Keras는 배우기도, 사용하기에도 쉽습니다. Keras를 통해서 더 많은 아이디어를 빠르게 시도해 볼 수 있고, 이는 [머신러닝 대회에서 좋은 성적을 거둘 수 있도록 도와줍니다](https://www.quora.com/Why-has-Keras-been-so-successful-lately-at-Kaggle-competitions).\n\n- Keras는 쉬운 고수준의 API를 제공하면서도, TensorFlow와 같은 저수준의 API와도 호환이 잘 되어 어떠한 네트워크 구조도 만들 수 있게 합니다. 특히, `tf.keras`를 사용하면 TensorFlow 기반의 작업 흐름에도 매끄럽게 통합시킬 수 있습니다.\n\n---\n\n##### Keras는 업계와 학계 양쪽에서 모두 폭넓게 사용되고 있습니다\n\n<a href='https://towardsdatascience.com/deep-learning-framework-power-scores-2018-23607ddf297a'>\n <img style='width: 80%; margin-left: 10%;' src='https://s3.amazonaws.com/keras.io/img/dl_frameworks_power_scores.png'/>\n</a>\n<p style='font-style: italic; font-size: 10pt; text-align: center;'>\n 7개의 분류에 걸친 11개의 데이터 소스를 기반으로 계산된 딥러닝 프레임워크 순위, Jeff Hale.\n</i>\n\nKeras는 250,000명 이상의 개인 사용자(2018년 기준)를 기반으로 TensorFlow를 제외한 그 어떤 딥러닝 프레임워크보다 업계와 학계 모두에 깊게 배어있습니다. 또한 Keras API는 `tf.keras` 모듈을 통해 TensorFlow의 공식 프론트엔드로 사용되고 있습니다.\n\nKeras를 통해 개발된 기능들은 Netflix, Uber, Yelp, Instacart, Zocdoc, Square사 등의 서비스에서 쉽게 찾아볼 수 있습니다. 이는 특히 딥러닝을 서비스의 핵심으로 삼는 스타트업 기업들 사이에서 인기가 많습니다.\n\nKeras는 [arXiv.org](https://arxiv.org/archive/cs)에 업로드 된 과학 논문들 중에서 두 번째로 많이 언급 될 정도로 딥러닝 연구자들에게 사랑받고 있습니다. Keras는 또한 CERN과 NASA와 같은 대형 연구소에서도 채택된 도구입니다.\n\n---\n\n##### Keras는 모델의 제품화를 쉽게 해줍니다\n\nKeras는 다른 어떤 딥러닝 프레임워크보다도 다양한 방면의 플랫폼에 쉽게 배포할 수 있습니다. 이에 해당하는 플랫폼들은 다음과 같습니다.\n\n- iOS에서는 [Apple’s CoreML](https://developer.apple.com/documentation/coreml)을 통해서 가능합니다. Apple사는 공식적으로 Keras를 지원합니다 ([튜토리얼](https://www.pyimagesearch.com/2018/04/23/running-keras-models-on-ios-with-coreml/)). \n- Android에서는 TensorFlow Android 런타임을 통해서 가능합니다 (e.g. [Not Hotdog 앱](https://medium.com/@timanglade/how-hbos-silicon-valley-built-not-hotdog-with-mobile-tensorflow-keras-react-native-ef03260747f3)).\n- 웹 브라우저에서는 [Keras.js](https://transcranial.github.io/keras-js/#/)와 같은 GPU 가속된 JavaScript 런타임과 [WebDNN](https://mil-tokyo.github.io/webdnn/)을 통해서 가능합니다.\n- Google Cloud에서는 [TensorFlow-Serving](https://www.tensorflow.org/serving/)을 통해서 가능합니다.\n- [Flask 앱과 같은 Python 웹 백엔드](https://blog.keras.io/building-a-simple-keras-deep-learning-rest-api.html)에서도 가능합니다.\n- JVM에서는 [SkyMind가 제공하는 DL4J](https://deeplearning4j.org/model-import-keras)를 통해서 가능합니다.\n- Raspberry Pi에서도 가능합니다.\n\n---\n\n##### Keras는 여러 백엔드 엔진을 지원하여 하나의 생태계에 속박되지 않습니다\n\nKeras 모델은 여러 [딥러닝 백엔드](https://keras.io/backend/)를 지원합니다. 눈여겨볼 만한 점은, 내장 레이어로만 구성된 Keras 모델들은 지원하는 모든 백엔드들과 호환이 되어 학습에 사용되는 백엔드와 배포 등을 위한 로드에 사용되는 백엔드가 서로 달라도 된다는 것입니다. 사용 가능한 백엔드들은 다음과 같습니다.\n\n- TensorFlow 백엔드 (Google사 제공)\n- CNTK 백엔드 (Microsoft사 제공)\n- Theano 백엔드\n\nAmazon사는 MXNet을 백엔드로 사용하는 [Keras의 분기 버전](https://github.com/awslabs/keras-apache-mxnet)을 제공합니다.\n\n결과적으로 Keras 모델들은 CPU뿐만이 아닌 다른 여러 하드웨어 플랫폼들에서도 학습이 가능합니다.\n\n- [NVIDIA GPUs](https://developer.nvidia.com/deep-learning)\n- [Google TPUs](https://cloud.google.com/tpu/) (TensorFlow 백엔드와 Google Cloud를 통해서)\n- AMD사의 OpenCL과 호환되는 GPU ([PlaidML Keras 백엔드](https://github.com/plaidml/plaidml)를 통해서)\n\n---\n\n##### Keras는 다중 GPU와 학습의 분산처리를 지원합니다\n\n- Keras는 [다중 GPU 데이터 병렬성에 대한 지원이 내장되어있습니다](/utils/#multi_gpu_model).\n- Uber사의 [Horovod](https://github.com/uber/horovod)는 케라스 모델을 일차적으로 지원합니다.\n- Keras 모델을 [TensorFlow 추정자로 변환](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/estimator/model_to_estimator)이 가능하며, [Google Cloud를 통한 GPU 클러스터](https://cloud.google.com/solutions/running-distributed-tensorflow-on-compute-engine)에서 학습시킬 수 있습니다.\n- [Dist-Keras](https://github.com/cerndb/dist-keras)와 [Elephas](https://github.com/maxpumperla/elephas)를 통해 Spark에서 Keras를 실행할 수 있습니다.\n\n---\n\n##### Keras의 개발은 딥러닝 생태계의 주요 기업들의 지원을 받습니다\n\nKeras는 Google사의 지원을 중심으로 개발되고 있으며, Keras API는 `tf.keras`로 TensorFlow의 패키지로 제공됩니다. CNTK Keras 백엔드의 유지보수 또한 Microsoft사의 책임하에 이루어집니다. Amazon AWS는 MXNet과 함께 Keras를 관리합니다. NVIDIA, Uber, CoreML을 포함한 Apple사 또한 Keras의 개발에 공헌하였습니다.\n\n<img src='https://keras.io/img/google-logo.png' style='width:200px; margin-right:15px;'/>\n<img src='https://keras.io/img/microsoft-logo.png' style='width:200px; margin-right:15px;'/>\n<img src='https://keras.io/img/nvidia-logo.png' style='width:200px; margin-right:15px;'/>\n<img src='https://keras.io/img/aws-logo.png' style='width:110px; margin-right:15px;'/>",
"_____no_output_____"
],
[
"### PyTorch\n\n\n\nPyTorch는 Python을 위한 오픈소스 머신 러닝 라이브러리이다. Torch를 기반으로 하며[1][2][3], 자연어 처리와 같은 애플리케이션을 위해 사용된다.[4] GPU사용이 가능하기 때문에 속도가 상당히 빠르다. 아직까지는 Tensorflow의 사용자가 많지만, 비직관적인 구조와 난이도 때문에, Pytorch의 사용자가 늘어나고 있는 추세이다. 이는 Facebook의 인공지능 연구팀이 개발했으며, Uber의 “Pyro”(확률론적 프로그래밍 언어)소프트웨어가 Pytorch를 기반으로 한다\n\nPytorch는 두 개의 높은 수준의 파이선 패키지 형태로 제공한다.[5]\n- 강력한 GPU가속화를 통한 Tensor계산 ex) NumPy\n- 테이프 기반 자동 삭제 시스템을 기반으로 구축된 심층 신경망\n\nFacebook은 PyTorch와 Convolutional Architecture for Fast Feature Embedding (Caffe2)을 모두 운영하고 있지만 비호환성으로 인해 PyTorch 정의 모델을 Caffe2로 변환하거나 그 반대로 변환하는 것이 어렵다. 개신경망 교환(ONNX, Open Neural Network Exchange) 프로젝트는 Facebook과 Microsoft가 프레임워크 간 모델 전환을 위해 2017년 9월 만든 프로젝트다. Caffe2는 2018년 3월 말에 PyTorch으로 합병되었다.",
"_____no_output_____"
],
[
"### Caffe\n\n<img src='https://miro.medium.com/max/1600/1*TKh1O_vDYwfsW3JeEYFYlg.jpeg' width=50%>",
"_____no_output_____"
],
[
"#### Caffe란?\nCaffe는 표현, 속도 및 모듈성을 염두에두고 만들어진 딥 러닝 프레임 워크입니다. BAIR (Berkeley AI Research )와 커뮤니티 기고자들이 개발했습니다. Yangqing Jia 는 UC Berkeley에서 박사 과정 중에 프로젝트를 만들었습니다. Caffe는 BSD 2-Clause 라이센스에 따라 배포 됩니다.\n\n#### 왜 Caffe일까요?\n`표현형 아키텍처(Expressive architecture)`는 애플리케이션과 혁신을 장려합니다. 모델 및 최적화는 하드 코딩없이 구성으로 정의됩니다. GPU 시스템에서 학습하도록 단일 플래그를 설정하여 CPU와 GPU 간을 전환 한 다음 상용 클러스터 또는 모바일 장치에 배포하십시오.\n\n`확장 가능한 코드(Extensible code)`는 적극적인 개발을 촉진합니다. Caffe의 첫 해에는 1,000 명 이상의 개발자가 포크했으며 많은 중요한 변화가있었습니다. 이러한 기여자 덕분에 프레임 워크는 코드와 모델 모두에서 최신 기술을 추적합니다.\n\nCaffe는 `속도(Speed)`를 통해 연구 실험 및 산업 배치에 적합합니다. Caffe는 단일 NVIDIA K40 GPU 로 매일 60M 이상의 이미지를 처리 할 수 있습니다 . 추론의 경우 1ms / 이미지이고 학습의 경우 4ms / 이미지이며 최신 라이브러리 버전 및 하드웨어는 여전히 더 빠릅니다. 우리는 Caffe가 가장 빠른 convnet 구현 중 하나라고 생각합니다.\n\n`커뮤니티(Community)` : Caffe는 이미 비전, 연설 및 멀티미디어 분야의 학술 연구 프로젝트, 스타트 업 프로토 타입 및 대규모 산업 응용 프로그램을 지원합니다. caffe-users 그룹 및 Github 의 브루어 커뮤니티에 참여하십시오 .",
"_____no_output_____"
],
[
"---\n\n## 수많은 레이어(Layer)",
"_____no_output_____"
],
[
"### 완전 연결 레이어(Fully Connected Layer)\n\n딥러닝과 신경망 구조를 공부하다보면 가장 먼저 접하게 되는 레이어입니다. 이전 레이어의 모든 노드가 다음 레이어의 모든 노드에 연결된 레이어를 완전 연결 레이어(Fully Connected Layer)라고 합니다. Keras에서는 특별히 이러한 층을 Dense라고도 부릅니다.\n\n<img src='https://www.oreilly.com/library/view/tensorflow-for-deep/9781491980446/assets/tfdl_0401.png' width=50%>",
"_____no_output_____"
],
[
"### 합성곱 레이어(Convolutional Layer)\n\n합성곱 레이어는 이미지에서 필터(Filter,Kernel)를 통해서 주요한 특징을 추출하는 레이어라고 생각하면 된다. 각 레이어마다 수많은 필터를 가지고 있으며, 해당 필터만 추출되는 특징은 그만큼 다양하다. 어떤 필터는 수직 엣지를 추출하는 필터도 있고, 어떤 필터는 수평 엣지를 추출하는 필터도 있다. 이러한 합성곱 레이어가 여러겹으로 쌓일수록 추출할 수 있는 특징 또한 고차원화된다. 쉽게 말하면 합성곱 레이어를 적게 통과한 이미지에서 뽑힌 특징은 저차원 특징을 추출하지만, 합성곱 레이어를 많이 통과한 이미지는 더욱 고차원의 특징을 추출한다.\n\n\n\n",
"_____no_output_____"
],
[
"### 최대 풀링 레이어(Max Pooling Layer)\n\n위에 합성곱 레이어를 통과할 때 각 영역별로 수많은 값을 가진 특징맵이 생겨나는데, 그 중에서도 가장 두드러지는 특징을 고르는게 바로 최대 풀링 레이어의 역할이다. 최대 풀링 레이어를 거치면 또한 고려해야하는 특징의 수를 줄여주는 역할도 하므로써 연산량을 줄여주는 효과도 생긴다.\n\n",
"_____no_output_____"
],
[
"### 활성화 함수(Activation Function)\n\n- ",
"_____no_output_____"
],
[
"---\n\n## 다양한 신경망 네트워크(Neural Network)",
"_____no_output_____"
],
[
"### 합성곱 신경망(CNN, Convolutional Neural Network)\n\n\n\n 앞서 언급한 3개의 레이어를 통해서, 우리는 드디어 이미지를 분류할 수 있는 모델을 만들 준비가 되었습니다. 레이어가 레고 블럭이라면 네트워크는 레고 블럭으로 이루어진 집 정도가 된다고 생각하시면 좋습니다. 합성곱 신경망은 일단 합성곱 레이어를 통해서 이미지의 특징을 뽑아냅니다. 그리고 최대 풀링 레이어를 통해서 가장 두드러진 특징을 찾아냅니다. 합성곱 레이어가 여러겹으로 쌓일수록 이미지에서 뽑을 수 있는 특징은 점점 저차원에서 고차원의 특징까지 뽑을 수 있게 됩니다. 이렇게 합성곱 레이어와 최대 풀링 레이어의 반복을 여러번 반복한 후에는 마지막으로 완전 연결 계층을 통해서 해당 이미지의 클래스를 분류하는 작업을 합니다.\n\n",
"_____no_output_____"
],
[
"---\n\n## 더 깊은 딥러닝 모델(Deep Learning Model)",
"_____no_output_____"
],
[
"### AlexNet\n\n\n\nRecurrent Neural Network와 더불어 딥러닝 모델의 양대 산맥으로 주목받고 있는 CNN은 기본적으로 얀 르쿤이 1989년 제안한 구조를 토대로 하고 있습니다. 컴퓨터 비전 분야의 ‘올림픽’이라 할 수 있는 ILSVRC(ImageNet Large-Scale Visual Recognition Challenge)의 2012년 대회에서 제프리 힌튼 교수팀의 AlexNet이 top 5 test error 기준 15.4%를 기록해 2위(26.2%)를 큰 폭으로 따돌리고 1위를 차지했습니다.\n\n여기서 top 5 test error란 모델이 예측한 최상위 5개 범주 가운데 정답이 없는 경우의 오류율을 나타냅니다. 당시 ILSVRC 데이터셋(Image은 1000개 범주 예측 문제였습니다. 어쨌든 AlexNet 덕분에 딥러닝, 특히 CNN이 세간의 주목을 받게 됐습니다. AlexNet 아키텍처의 주요 특징은 다음과 같습니다.\n\nAlexNet이 중요한 이유는 의미있는 성능을 낸 첫번째 CNN 아키텍처이자, AlexNet에 쓰인 드롭아웃 등 기법은 이 분야 표준으로 자리잡을 정도로 선도적인 역할을 했기 때문입니다.\n\n출처 : [ratsgo's blog](https://ratsgo.github.io/deep%20learning/2017/10/09/CNNs/)",
"_____no_output_____"
],
[
"### Inception V3\n\n\n\nAlexNet 이후 층을 더 깊게 쌓아 성능을 높이려는 시도들이 계속되었습니다. VGGNet(2014), GoogleNet(2015) 등이 바로 그것입니다. GoogleNet은 VGGNet보다 구조가 복잡해 널리 쓰이진 않았지만 아키텍처 면에서 주목을 받았습니다. 보통 하나의 conv layer에는 한 가지의 conv filter가 사용됩니다.\n\nGoogleNet 연구진들은 한 가지의 conv filter를 적용한 conv layer를 단순히 깊게 쌓는 방법도 있지만, 하나의 layer에서도 다양한 종류의 filter나 pooling을 도입함으로써 개별 layer를 두텁게 확장시킬 수 있다는 창조적인 아이디어로 후배 연구자들에게 많은 영감을 주었습니다. 이들이 제안한 구조가 바로 Inception module입니다. (그림 출처)\n\n<img src='https://i.imgur.com/VY3BkBR.png' width=50%>\n\nInception module에서 특히 주목받은 것이 바로 1×1 conv filter입니다. 가령 현재 층 입력데이터 이미지의 차원수가 100×100×60이고, 1×1 conv filter를 20개 사용한다면 데이터의 차원 수는 100×100×20으로 줄어듭니다. 60개 채널(차원)에 달하는 하나의 픽셀이 20차원의 feature space로 선형변환, 차원축소된 것이라고도 볼 수 있겠습니다.\n\n출처 : [ratsgo's blog](https://ratsgo.github.io/deep%20learning/2017/10/09/CNNs/)",
"_____no_output_____"
],
[
"### VGG-16\n\n\nVGGNet의 특징은 작은 필터 크기의 convolution 연산이다. AlexNet의 경우, 첫번째 convolution layer의 필터 크기는 11x11, GoogLeNet의 경우는 7x7 이었다. 반면, VGG는 처음부터 끝까지 3x3의 필터 크기를 사용하여 좋은 성과를 거두었다. 3x3 convolution 연산을 쌓는 것이 어떤 의미가 있는 것일까?\n\n3x3 convolution을 두 번 쌓는 것은 5x5 convolution과 동일한 receptive field의 정보를 처리하고, 세 번 쌓는 것은 7x7의 receptive field의 정보를 처리한다. 하지만, 3x3을 여러번 쌓는 것이 더 좋은 성능을 낸다. 그 이유는 두 가지이다. 첫째로 3x3 convolution 연산을 여러번 하는 것은 여러번의 비선형 처리를 해주는 것이므로, 큰 필터로 한번 연산 했을 때보다 더 많은 비선형성을 가질 수 있다. 두번째로는 그럼에도 불구하고, 파라미터 수는 3x3 convolution을 여러번 했을 때 더 적다. 예를 들어, 채널의 갯수가 𝐶 라고 할 때, 7x7의 경우는 72×𝐶2 의 파라미터를 가지지만, 3x3을 세 번 쌓은 경우에는, 3×(32×𝐶2) 의 파라미터를 가진다.\n\n다음에 나오는 VGG16의 summary 결과를 보면, 어마하게 많은 파라미터가 존재한다는 것을 알 수 있다. VGGNet은 간단한 구조를 가졌지만, fully connected layer가 3개가 있고, 풀링(pooling)을 거친 뒤에는 피쳐맵의 갯수가 2배로 커지면서 필요한 파라미터가 과도하게 많아졌다. 파라미터가 많다는 것은 딥러닝의 고질적인 문제인, gradient vanishing, 과적합등의 문제가 발생할 가능성이 크다는 의미이다.\n\n실제로 VGG16과 VGG19는 학습에 어려움이 있었다. 논문 저자들은 이 문제를 해결하기 위해, 표 18.4.2 의 \"A\" 모델로 학습한 fully connected layer의 가중치를 초기값으로 주어 16, 19개의 layer의 모델들을 학습시켰다.\n\n출처 : [데이터 사이언스 스쿨](https://datascienceschool.net/view-notebook/47c57f9446224af08f13e1f3b00a774e/)",
"_____no_output_____"
],
[
"### ResNet-50\n\n\n\nResNet(2015)은 2015년 ILSVRC에서 오류율 3.6%로 1등을 차지했습니다. 인간의 분류 오차가 5~10% 정도라는 걸 감안하면 놀라운 성적표입니다.\n\n사실 AlexNet이 처음 제안된 이후로 CNN 아키텍처의 층은 점점 더 깊어졌습니다. AlexNet이 불과 5개 층에 불과한 반면 VGGNet은 19개 층, GoogleNet은 22개 층에 달합니다. 하지만 층이 깊어질 수록 역전파되는 그래디언트가 중간에 죽어서 학습이 잘 되지 않는 문제(gradient vanishing)가 발생했습니다. ResNet 저자들이 제시한 아래 학습그래프를 보면 이같은 문제가 뚜렷이 나타납니다.\n\n\n\nResNet 저자들의 핵심 아이디어는 다음 그림과 같은 residual block입니다. 그래디언트가 잘 흐를 수 있도록 일종의 지름길(shortcut, skip connection)을 만들어 주자는 생각입니다. 이는 forget gate 등을 도입해 이전 스텝의 그래디언트(정보)를 좀 더 잘 흐르게 만드려는 Long Term Short Memory(LSTM)의 철학과 본질적으로 유사합니다.\n\n\n\nResNet의 성능이 좋은 이유는 그래디언트 문제 말고 또 있습니다. Veit et al. (2016)은 residual block이 앙상블(ensemble) 모델을 구축한 것과 비슷한 효과를 낸다고 주장했습니다. residual block의 skip connection 덕분에 입력데이터와 그래디언트가 오갈 수 있는 통로가 크게 늘어나기 때문입니다. (n개 skip connection이 있다면 2n개의 서로 다른 통로 존재) 이를 직관적으로 나타낸 그림은 아래 그림과 같습니다.\n\n\n\n출처 : [ratsgo's blog](https://ratsgo.github.io/deep%20learning/2017/10/09/CNNs/)",
"_____no_output_____"
],
[
"---\n\n## MobileNet\n\n### 딥러닝 경량화 기술의 동향\n\n 최근 좋은 성과를 내는 딥러닝을 모바일 디바이스, 산업용 게이트웨이, IoT 센서와 같은 온-디바이스에서 작동시키기 위하여 경량화 연구가 활발히 진행중이다. 경량 딥러닝 기술이란, 알고리즘 자체를 적은 연산량을 가지게게 효율적인 구조로 설계하는 기술이다. 메모리 크기가 작은 디바이스에서 기존의 학습된 모델의 정확도를 유지시키는 것을 골자로 가장 일반화된 합성곱 신경망(CNN: Convolutional Neural Network)을 통해 다양한 연구가 진행중이다.\n\n첫번째로는 다양한 신규 계층 구조를 설계하여 신경망 구조를 제공함으로써 우수한 추론 성능을 보이는 연구가 소개되고 있다. 이는 기본 단일 층별 연산에 그치지 않고 연산량 과 파라미터 수를 줄이기 위한 잔여 블록(Residual Block) 또는 병목 블록(Bottleneck Block)과 같은 형태를 반복적으로 쌓아 신경망을 구성하는 방법이다. \n\n두번째로는 CNN 계열의 모델에서 학습 시 가장 큰 연산량을 요구하는 합성곱 연산을 줄이기 위한 효율적인 합성곱 필터 기술을 연구하는 것이다.\n\n마지막으로는 기존 신경망의 모델 구조를 인간에게 의존적으로 수행하지 않고 모델 구조를 자동 탐색함 으로써 모델을 자동화하거나 연산량 대비 모델 압축비율을 조정하는 등 다양한 자동탐색 기술이 존재한다. 이는 모바일 딥러닝과 같은 다양한 기기의 성능대비 추론속도가 중요한 응용을 위해 정확도, 지연시간, 에너지 소모량들을 사용하여 강화학습(Reinforcement Learning)을 활용하여 경량 모델을 탐색하는 기술이다.\n\n\n<img src='./img/theory/dl-trend.png' width=70%>\n\n[출처:https://ettrends.etri.re.kr/]",
"_____no_output_____"
],
[
"### MobileNet에 대해서\n\n- MobileNet V1 : https://arxiv.org/pdf/1704.04861.pdf\n - 관련 Slideshare : https://www.slideshare.net/JinwonLee9/mobilenet-pr044\n- MobileNet V2 : https://arxiv.org/pdf/1801.04381.pdf\n - 관련 Slideshare : https://www.slideshare.net/JinwonLee9/pr108-mobilenetv2-inverted-residuals-and-linear-bottlenecks\n\n\n\nMobilenet에서는 컴퓨터 성능이 제한된 모바일, 디바이스 등에서 사용될 목적으로 설계된 CNN 구조입니다. Xception에서 배웠던 **`Depthwise separable convolution`** 이 사용하여 같은 레이어 수의 다른 CNN 구조에 비해 파라미터 수를 상당히 낮추었습니다. 기존의 합성곱 필터를 채널(Channel) 단위로 먼저 합성곱(depthwise convolution)을 하고, 그 결과를 하나의 픽셀(point) 단위로 진행 하는 합성곱(pointwise convolution)으로 분리(factorized)하여 연산량을 줄이는 방법을 제안하였습니다. 그로 인해 정확도는 어느정도 유지하면서 연산량을 8~9배로 줄이는 모델입니다.",
"_____no_output_____"
],
[
"### Pipeline\n\nMobileNet이 작동하는 전체적인 파이프라인은 아래와 같습니다.\n\n<img src='./img/theory/mobilnetpipeline.png' width=70%>\n\n- Depthwise convolution: 깊이 별로 $3\\times3$ kernel과 합성곱을 수행합니다.\n- Pointwise convolution: 깊이 별로 합성한 결과를 $1x1$ convolution 을 사용하여 결합을 수행합니다.",
"_____no_output_____"
],
[
"### Standard Convolution VS Depthwise Separable Convolution\n\n#### Standard Convolution \n\n그럼 모바일넷이 기존의 합성곱 연산과 어떻게 다른지 알아보기 위해서, 다시 기존의 합성곱 연산에 대해서 살펴보자\n\n <img src='./img/theory/standard_conv.png' width=70%>\n \n - ${D_F}{\\times}{D_F}\\times{M}$(폭x높이x채널)의 크기를 같는 입력 값(Feature map)에 ${D_k}\\times{D_k}\\times{M}\\times{N}$(폭x높이x채널x갯수)의 크기를 같는 필터(kernel)을 합성곱(convolution) 시킨다면 ${D_G}\\times{D_G}\\times{N}$(폭x높이x채널)의 크기를 같는 결과값(output)이 나옵니다. <P>\n \n - 계산 비용은 ${D_k}\\times{D_k}\\times{M}\\times{N}\\times{D_F}\\times{D_F}$을 가지게 됩니다. \n\n#### Depthwise Separable Convolution\nMobileNet의 Depthwise separable convolution연산은 Xception과 대부분이 동일하고, depthwise convolution 연산과 pointwise convolution 연산 사이에도 batch normalization과 ReLU 활성화 함수 단계가 추가된 점만 다르다.\n Depthwise separable convolution은 채널(Channel) 단위로먼저 합성곱(depthwise convolution)을 하는 단계와 하나의 픽셀(point) 단위로 진행 하는 합성곱(pointwise convolution)을 하는 두 단계로 나눌 수 있습니다.\n \n> `Depthwise Separable Convolution` = `Depthwise Convolution` + `Pointwise Convolution`",
"_____no_output_____"
],
[
"##### Depthwise Convolution \n\n <img src='./img/theory/STEP1.png' width=80%>\n \n - ${D_F}{\\times}{D_F}\\times{M}$(폭x높이x채널)의 크기를 같는 입력 값(Feature map)에 ${D_k}\\times{D_k}\\times{M}\\times{1}$(폭x높이x채널x갯수)의 크기를 같는 필터_1(kernel)을 합성곱(convolution) 시킨다면 ${D_F}\\times{D_F}\\times{M}$(폭x높이x채널)의 크기를 같는 결과값(output_1)이 나옵니다.<p>\n \n - 깊이(depth)별로 하므로 최종 결과값의 크기가 같은 것을 확인 할 수 있습니다. <p>\n \n - STEP1의 계산비용은 ${D_k}\\times{D_k}\\times{M}\\times{D_F}\\times{D_F}$ 이 됩니다. <p>\n \n \n##### Pointwise Convolution\n\n<img src='./img/theory/STEP2.png' width=80%>\n\n - ${D_F}{\\times}{D_F}\\times{M}$(폭x높이x채널)의 크기를 같는 입력 값(Output_1)에 ${1}\\times{1}\\times{M}\\times{N}$(폭x높이x채널x갯수)의 크기를 같는 필터_2(kernel)을 합성곱(convolution) 시킨다면 ${D_G}\\times{D_G}\\times{N}$(폭x높이x채널)의 크기를 같는 결과값(Output_2)이 나옵니다.<p>\n \n - ${D_F}X{D_F}X{M}$ X $1X1XN$( 1X1 conv. M번 사용하여 필터갯수 N 번 만큼 하였다. <p>\n \n - STEP2의 계산비용은 ${M}\\times{N}\\times{D_F}\\times{D_F}$ 이 됩니다. <p>\n ",
"_____no_output_____"
],
[
"### 효율성\n 일반적인 합성곱을 두 단계로 나누어 계산하는 것은 계산량이 더 높아지는 것이라고 생각되기 쉽습니다. 하지만, 일반적인 합성곱 과 Depthwise separable convolution을 수식적으로 비교하여 본다면 필터(kernel)의 크기가 3으로 고정될때, 8-9배가 빨라짐을 확인 할 수 있습니다. \n\n<img src='./img/theory/efficiency.png' width=80%>",
"_____no_output_____"
],
[
"## MobileNetV2\n\nLinear bottleneck, Inverted residual block 을 사용하여 Mobilenet v1을 개선 시킨 Mobilenet v2 모델을 제안하였다. \n \n\n### Pipeline\n <img src='./img/theory/mobilenetv2.png'>\n 저 차원(low-dimention) 입력값을 받아 $1\\times1$ convolution 을 사용하여 채널을 늘린다. 그 후 Mobilenet v1 과 같은 깊이별 합성곱(depthwise convolution), 픽셀별 합성곱(pointwise convolution) 프로세스를 거치게 된다. (단, t는 채널을 늘리는 expansion factor )",
"_____no_output_____"
],
[
"### Linear Bottlenecks \n\n 뉴럴네트워크에서 입력 값의 집합(= input set, manifold of interest)이 저차원(low-dimension)으로 임베디드(embedded) 될 수 있다는 가정은 오래된 가정이다. Mobilenet v2에서도 이와 같은 두 가지 가정이 만족된다면, Linear Bottlenecks layer 을 가지고 Manifold of interest 부분을 포착 할 수 있다고 주장한다.\n \n#### Two assumptions (non-linear function 을 똑같이 사용 할 수 있다 )\n - Manifold of interest 부분이 ReLU를 거친 후에도 non-zero volume을 가지고 있다면 그것은 linear transformation에 해당된다.\n - Input manifold가 input의 low-dimensional subspace에 놓여 있다면, ReLU가 input manifold에 대해서 완전한 정보를 preserving할 수 있다.\n\n#### In experiments\n \n 본 논문에서는 linear layer를 사용하여 non-linearity의 정보손실(information loss)을 보존하였다. 실제 실험을 통해서 bottleneck 구조에서 non-linear layer가 성능저하를 불러왔다.",
"_____no_output_____"
],
[
"### Inverted Residuals \n\n#### Residual block VS Inverted residual block \n <img src='./img/theory/residual_inverted.png' width=90%>\n\n - Residual block\n > bottleneck이 후 expansion 이 이루어 진다. <p>\n > Residual block은 channel이 큰 block 끼리 연결되어있다.\n \n - Inverted residual block\n > expansion 후 bottleneck 이 이루어 진다.<p>\n > Inverted residual block은 bottleneck끼리 연결되어 있다. <p>\n (* 이를 Short cut-connection이라 하며 사용하는 이유는 ResNet처럼 Gradient back-propagation이 잘 되기 때문이다.) \n \n#### 확장(Expansion)이 필요한 이유는?\n 활성함수(Activation function) 으로 ReLu를 사용 할 시 정보손실(information loss) 가 발생하는 점을 간과 할 수 없기 때문이다. 그리하여 보통의 채널(Channel)을 감소(reduction)하는 방식이 아닌 먼저 채널을 늘리는(expanding) 방식을 사용한다. 그 결과, 채널(Channel)이 깊어졌기 때문에 저차원(low-dimensional subspace)으로 맵핑 한다하여도 정보손실을 크게 유발하지 않을 수 있으며 정보를 보존 할 수 있다. \n\n#### Structure of inverted residual block\n <img src='./img/theory/structure of inverted residual block.png' width= 70%>\n \n - STEP1; Expansion Convolution \n \n 높이가 h, 폭 w 이고 채널수 k 일때, 입력 값(Input) $h\\times w\\times k$은 convolution을 거쳐 $h\\times w\\times tk$이라는 결과값을 가지게 된다. 채널이 t배만큼 확장된 결과값을 가지며 $5 \\le t \\le 10 $ 이여야되며, defualt 값으로 6을 사용하였다. <p> \n \n - STEP2; Depthwise Convolution\n \n $3\\times3 $ kernel 을 가지고 깊이별(depthwise) 합성곱(convolution)을 실시한다. stride는 s 로 결과값으로 $\\frac{h}{s} \\times \\frac{w}{s} \\times tk $을 가지게 된다. <p>\n \n - STEP3; Pointwise Convolution \n \n 픽셀별(pointwise) 합성곱으로 인해 STEP2의 결과값을 합쳐 주게 된다. \n \n ",
"_____no_output_____"
],
[
"### Memory Efficient Inference\n\n#### Bottleneck Residual Block\n <img src='./img/theory/Bottleneck Residual Block.png' width=100%>\n Bottleneck 함수를 $F(x) = \\Sigma{B \\circ N \\circ A}$ 로정의 할 수 있다. <p> - $A$ 는 linear transformation $A: R^{s \\times s \\times k} \\rightarrow R^{s \\times s \\times n}$ 로써 expansion conv.에 해당된다. <p> - $N$ 는 non-linear transformation $N: R^{s \\times s \\times n} \\rightarrow R^{s' \\times s' \\times n}$ 로써 $N = $ReLu6$\\circ$ dwsie$\\circ$ RELu6 이며 per-channel transormation 이다. <p> - $B$ 는 linear transformation $B: R^{s' \\times s' \\times n} \\rightarrow R^{s' \\times s' \\times k'}$ pointwise conv. (= compression conv.) 이다.\n \n#### Compute graph G \n \n 병렬구조인 그래프로 메모리를 추론해본다면 $\\mbox{max}_{op \\in G}[\\sum_{A \\in op} \\left\\vert A \\right\\vert + \\sum_{B \\in op} \\left\\vert B \\right\\vert + \\left\\vert op \\right\\vert]$ 이다.\n \n 만약 우리가 Bottleneck Residual Block 을 하나의 오퍼레이터로 본다면 전체 메모리는 메모리는 bottleneck tensor 사이즈에 영향을 받는 것을 뜻한다.\n \n 즉, 입력값과 아웃풋의 사이즈가 작을수록 메모리를 적게 사용한다.\n \n#### t-way split \n \n 전체 inner tensor $I$는 $t$개의 tensor 들의 합으로 나타낼수 있다. 따라서 우리의 함수를 다음과 같이 표시 할 수 있다. $$F(x) = \\Sigma{B_i \\circ N \\circ A_i}$$ \n 그리하여 $ F(x)$ 를 $\\frac{n}{t}$ 으로 쪼개어 학습시킬수 있지만, 너무 세분하게 쪼갤 경우 cash 메모리 부족으로 오히려 런타임이 증가할수 있기에 $ 2 \\le t \\le 5$ 를 권장한다. ",
"_____no_output_____"
],
[
"#### 성능\n\n- ",
"_____no_output_____"
],
[
"---\n\n## 여러 CNN 모델 파라미터 및 성능 비교\n\n",
"_____no_output_____"
],
[
"---\n\n## Keras에서 제공하는 모델들\n\n직접 모델을 구성하여 컴파일해도 되지만, Keras에서 제공하는 어플리케이션 모듈에는 아래 모델을 미리 제공되기 때문에 이걸 활용해도 된다.\n\n### ImageNet으로 학습한 가중치를 이용해 이미지 분류를 수행하는 모델:\n- Xception\n- VGG16\n- VGG19\n- ResNet, ResNetV2, ResNeXt\n- InceptionV3\n- InceptionResNetV2\n- `MobileNet`\n- `MobileNetV2`\n- DenseNet\n- NASNet\n\n### 예시\n```python\nfrom keras.applications.mobilenet import MobileNet\nfrom keras.preprocessing import image\nfrom keras.applications.mobilenet import preprocess_input, decode_predictions\nimport numpy as np\n\nmodel = MobileNet(weights='imagenet')\n\nimg_path = 'elephant.jpg'\nimg = image.load_img(img_path, target_size=(224, 224))\nx = image.img_to_array(img)\nx = np.expand_dims(x, axis=0)\nx = preprocess_input(x)\n\npreds = model.predict(x)\n# 결과를 튜플의 리스트(클래스, 설명, 확률)로 디코딩합니다\n# (배치 내 각 샘플 당 하나의 리스트)\nprint('Predicted:', decode_predictions(preds, top=3)[0])\n# 예측결과: [(u'n02504013', u'Indian_elephant', 0.82658225), (u'n01871265', u'tusker', 0.1122357), (u'n02504458', u'African_elephant', 0.061040461)]\n```\n\n출처 : [Keras 공식 홈페이지](https://keras.io/ko/applications/#_2)",
"_____no_output_____"
],
[
"## Appendix\n\n### Standard Convolution \n\n#### 디지털 이미지\n <img src='./img/theory/rgb_image.png' width=100%>\n 위 그림과 같이 컬러 디지털 이미지는 픽셀들의 결합으로 만들어집니다. 이러한 픽셀들은 채널들의 조합으로 표현되어지며, RGB, YUV, Ycbcr 등 채널을 나누는 다양한 방법이있습다. \n\n#### Convolution Neural Network\n\n <img src='https://kr.mathworks.com/content/mathworks/kr/ko/solutions/deep-learning/convolutional-neural-network/jcr:content/mainParsys/band_copy_copy_14735/mainParsys/columns_1606542234_c/2/image.adapt.full.medium.jpg/1556019841908.jpg' width = 70%>\n \n Convolution Neural Network(CNN)에서는 이미지분류, 텍스트 또는 사운드를 분류하는 딥러닝에서 가장 많이 사용되는 알고리즘입니다. 특히, 패턴을 찾는데 유용하며 CNN을 학습하면 이미지를 분류하고 특징을 수동으로 추출할 필요가 없으며 높은 정확도를 보인다는 장점 이있습니다. 이러한 장점들로 인하여 최근 CNN의 사용하는 사례가 급증하게 되었습니다.<p> \n\n CNN은 이미지의 픽셀값을 입력값으로 받아 합성곱$ \\rightarrow$ ReLu $\\rightarrow$ Pooling을 반복 적으로 수행하여 특징을 추출하게 됩니다. \n\n<img src='https://kr.mathworks.com/content/mathworks/kr/ko/solutions/deep-learning/convolutional-neural-network/jcr:content/mainParsys/band_copy_copy_14735_1026954091/mainParsys/columns_1606542234_c/2/image.adapt.full.medium.jpg/1556019842434.jpg' with= 70%>\n\n - 합성곱: 가장 중요한 레이어인 합성곱(Convolution) 층(layer)에서는 3차원의 공간정보를 학습하게 됩니다. <p>\n \n - ReLu(Rectified Linear Unit): 다양한 활성함수(Actication Function)이 있지만 주로 ReLu를 사용하며 비선형적인 특징때문에 층을 깊게 쌓을수 있도록 해줍니다. <p>\n \n - Pooling: 네트워크에서 학습해야하는 매개변수 수를 줄여서 출력을 간소화 시켜주는 역할을 합니다.<p>\n\n\n### Process of Convolution \n\n<img src='https://camo.githubusercontent.com/bd17725182187a746c1d7a49ed1c3e3722f2b832/68747470733a2f2f7777772e636e746b2e61692f6a75702f636e746b313033645f636f6e7632645f66696e616c2e676966' width=30%>\n\n### ReLu6",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef relu6(x):\n return np.minimum(np.maximum(x,0),6)\n\nx = np.arange(-10,20)\ny = relu6(np.arange(-10,20))\nplt.plot(x,y)\nplt.grid()\nplt.show()",
"_____no_output_____"
]
],
[
[
"---\n\n## 참고",
"_____no_output_____"
],
[
"- Intel\n - https://www.intel.co.kr/\n- Intel OpenVINO\n - https://software.intel.com/en-us/openvino-toolkit\n- MNIST\n - http://yann.lecun.com/exdb/mnist/\n- CIFAR10\n - https://www.cs.toronto.edu/~kriz/cifar.html\n- ImageNet\n - http://www.image-net.org\n- Tensorflow\n - https://www.tensorflow.org/?hl=ko\n- Keras\n - https://keras.io/\n - https://tensorflow.blog/2019/03/06/tensorflow-2-0-keras-api-overview/\n - https://tykimos.github.io/2017/02/22/Integrating_Keras_and_TensorFlow/\n - https://tykimos.github.io/2017/03/08/CNN_Getting_Started/\n - https://raw.githubusercontent.com/keras-team/keras-docs-ko/master/sources/why-use-keras.md\n- Keras to Caffe\n - https://github.com/uhfband/keras2caffe\n - http://www.deepvisionconsulting.com/from-keras-to-caffe/\n- Fully Connected Layer\n - https://sonofgodcom.wordpress.com/2018/12/31/cnn%EC%9D%84-%EC%9D%B4%ED%95%B4%ED%95%B4%EB%B3%B4%EC%9E%90-fully-connected-layer%EB%8A%94-%EB%AD%94%EA%B0%80/\n- Convultional Nueral Network\n - http://aikorea.org/cs231n/convolutional-networks/\n - http://cs231n.stanford.edu/\n- CNN Models\n - https://ratsgo.github.io/deep%20learning/2017/10/09/CNNs/\n\n- VOC2012\n - https://blog.godatadriven.com/rod-keras-multi-label\n - https://gist.github.com/rragundez/ae3a17428bfec631d1b35dcdc6296a85#file-multi-label_classification_with_keras_imagedatagenerator-ipynbhttps://fairyonice.github.io/Part_5_Object_Detection_with_Yolo_using_VOC_2012_data_training.html\n - http://research.sualab.com/introduction/2017/11/29/image-recognition-overview-1.html\n ",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
]
|
ec6553fa59336d21a9ee7c0fd08ab4ec819ac790 | 1,046,805 | ipynb | Jupyter Notebook | jupyter/extras/Python 2.7 Crash Course v0.5.ipynb | mullenkamp/EcanPythonCourse2019 | 23122e8da7aac01796eb99ecdd2f7d0550ee63d1 | [
"Apache-2.0"
]
| null | null | null | jupyter/extras/Python 2.7 Crash Course v0.5.ipynb | mullenkamp/EcanPythonCourse2019 | 23122e8da7aac01796eb99ecdd2f7d0550ee63d1 | [
"Apache-2.0"
]
| null | null | null | jupyter/extras/Python 2.7 Crash Course v0.5.ipynb | mullenkamp/EcanPythonCourse2019 | 23122e8da7aac01796eb99ecdd2f7d0550ee63d1 | [
"Apache-2.0"
]
| 1 | 2020-11-01T23:07:44.000Z | 2020-11-01T23:07:44.000Z | 150.316628 | 89,510 | 0.877755 | [
[
[
"# A Crash Course in Python for Scientists\n[Rick Muller](http://www.cs.sandia.gov/~rmuller/), Sandia National Laboratories\n\nversion 0.6\n\nThis work is licensed under a [Creative Commons Attribution-ShareAlike 3.0 Unported License](http://creativecommons.org/licenses/by-sa/3.0/deed.en_US).",
"_____no_output_____"
],
[
"## Why Python?\nPython is the programming language of choice for many scientists to a large degree because it offers a great deal of power to analyze and model scientific data with relatively little overhead in terms of learning, installation or development time. It is a language you can pick up in a weekend, and use for the rest of one's life.\n\nThe [Python Tutorial](http://docs.python.org/2/tutorial/) is a great place to start getting a feel for the language. To complement this material, I taught a [Python Short Course](http://www.wag.caltech.edu/home/rpm/python_course/) years ago to a group of computational chemists during a time that I was worried the field was moving too much in the direction of using canned software rather than developing one's own methods. I wanted to focus on what working scientists needed to be more productive: parsing output of other programs, building simple models, experimenting with object oriented programming, extending the language with C, and simple GUIs. \n\nI'm trying to do something very similar here, to cut to the chase and focus on what scientists need. In the last year or so, the [IPython Project](http://ipython.org) has put together a notebook interface that I have found incredibly valuable. A large number of people have released very good IPython Notebooks that I have taken a huge amount of pleasure reading through. Some ones that I particularly like include:\n\n* Rob Johansson's [excellent notebooks](http://jrjohansson.github.io/), including [Scientific Computing with Python](https://github.com/jrjohansson/scientific-python-lectures) and [Computational Quantum Physics with QuTiP](https://github.com/jrjohansson/qutip-lectures) lectures;\n* [XKCD style graphs in matplotlib](http://nbviewer.ipython.org/url/jakevdp.github.com/downloads/notebooks/XKCD_plots.ipynb);\n* [A collection of Notebooks for using IPython effectively](https://github.com/ipython/ipython/tree/master/examples/notebooks#a-collection-of-notebooks-for-using-ipython-effectively)\n* [A gallery of interesting IPython Notebooks](https://github.com/ipython/ipython/wiki/A-gallery-of-interesting-IPython-Notebooks)\n\nI find IPython notebooks an easy way both to get important work done in my everyday job, as well as to communicate what I've done, how I've done it, and why it matters to my coworkers. I find myself endlessly sweeping the [IPython subreddit](http://ipython.reddit.com) hoping someone will post a new notebook. In the interest of putting more notebooks out into the wild for other people to use and enjoy, I thought I would try to recreate some of what I was trying to get across in the original Python Short Course, updated by 15 years of Python, Numpy, Scipy, Matplotlib, and IPython development, as well as my own experience in using Python almost every day of this time.",
"_____no_output_____"
],
[
"## What You Need to Install\n\nThere are two branches of current releases in Python: the older-syntax Python 2, and the newer-syntax Python 3. This schizophrenia is largely intentional: when it became clear that some non-backwards-compatible changes to the language were necessary, the Python dev-team decided to go through a five-year (or so) transition, during which the new language features would be introduced and the old language was still actively maintained, to make such a transition as easy as possible. We're now (2013) past the halfway point, and, IMHO, at the first time when I'm considering making the change to Python 3.\n\nNonetheless, I'm going to write these notes with Python 2 in mind, since this is the version of the language that I use in my day-to-day job, and am most comfortable with. If these notes are important and are valuable to people, I'll be happy to rewrite the notes using Python 3.\n\nWith this in mind, these notes assume you have a Python distribution that includes:\n\n* [Python](http://www.python.org) version 2.7;\n* [Numpy](http://www.numpy.org), the core numerical extensions for linear algebra and multidimensional arrays;\n* [Scipy](http://www.scipy.org), additional libraries for scientific programming;\n* [Matplotlib](http://matplotlib.sf.net), excellent plotting and graphing libraries;\n* [IPython](http://ipython.org), with the additional libraries required for the notebook interface.\n\nA good, easy to install option that supports Mac, Windows, and Linux, and that has all of these packages (and much more) is the [Entought Python Distribution](https://www.enthought.com/products/epd), also known as EPD, which appears to be changing its name to Enthought Canopy. Enthought is a commercial company that supports a lot of very good work in scientific Python development and application. You can either purchase a license to use EPD, or there is also a [free version](https://www.enthought.com/products/epd/free/) that you can download and install.\n\nHere are some other alternatives, should you not want to use EPD:\n\n**Linux** Most distributions have an installation manager. Redhat has yum, Ubuntu has apt-get. To my knowledge, all of these packages should be available through those installers.\n\n**Mac** I use [Macports](http://www.macports.org/), which has up-to-date versions of all of these packages.\n\n**Windows** The [PythonXY](https://code.google.com/p/pythonxy/) package has everything you need: install the package, then go to Start > PythonXY > Command Prompts > IPython notebook server.\n\n**Cloud** This notebook is currently not running on the [IPython notebook viewer](http://nbviewer.ipython.org/), but will be shortly, which will allow the notebook to be viewed but not interactively. I'm keeping an eye on [Wakari](http://www.wakari.io), from [Continuum Analytics](http://continuum.io/), which is a cloud-based IPython notebook. Wakari appears to support free accounts as well. Continuum is a company started by some of the core Enthought Numpy/Scipy people focusing on big data. \n\nContinuum also supports a bundled, multiplatform Python package called [Anaconda](https://store.continuum.io/) that I'll also keep an eye on.",
"_____no_output_____"
],
[
"# I. Python Overview\nThis is a quick introduction to Python. There are lots of other places to learn the language more thoroughly. I have collected a list of useful links, including ones to other learning resources, at the end of this notebook. If you want a little more depth, [Python Tutorial](http://docs.python.org/2/tutorial/) is a great place to start, as is Zed Shaw's [Learn Python the Hard Way](http://learnpythonthehardway.org/book/).\n\nThe lessons that follow make use of the IPython notebooks. There's a good introduction to notebooks [in the IPython notebook documentation](http://ipython.org/notebook.html) that even has a [nice video](http://www.youtube.com/watch?v=H6dLGQw9yFQ#!) on how to use the notebooks. You should probably also flip through the [IPython tutorial](http://ipython.org/ipython-doc/dev/interactive/tutorial.html) in your copious free time.\n\nBriefly, notebooks have code cells (that are generally followed by result cells) and text cells. The text cells are the stuff that you're reading now. The code cells start with \"In []:\" with some number generally in the brackets. If you put your cursor in the code cell and hit Shift-Enter, the code will run in the Python interpreter and the result will print out in the output cell. You can then change things around and see whether you understand what's going on. If you need to know more, see the [IPython notebook documentation](http://ipython.org/notebook.html) or the [IPython tutorial](http://ipython.org/ipython-doc/dev/interactive/tutorial.html).",
"_____no_output_____"
],
[
"## Using Python as a Calculator",
"_____no_output_____"
],
[
"Many of the things I used to use a calculator for, I now use Python for:",
"_____no_output_____"
]
],
[
[
"2+2",
"_____no_output_____"
],
[
"(50-5*6)/4",
"_____no_output_____"
]
],
[
[
"(If you're typing this into an IPython notebook, or otherwise using notebook file, you hit shift-Enter to evaluate a cell.)",
"_____no_output_____"
],
[
"There are some gotchas compared to using a normal calculator.",
"_____no_output_____"
]
],
[
[
"7/3",
"_____no_output_____"
]
],
[
[
"Python integer division, like C or Fortran integer division, truncates the remainder and returns an integer. At least it does in version 2. In version 3, Python returns a floating point number. You can get a sneak preview of this feature in Python 2 by importing the module from the future features:\n\n from __future__ import division",
"_____no_output_____"
],
[
"Alternatively, you can convert one of the integers to a floating point number, in which case the division function returns another floating point number.",
"_____no_output_____"
]
],
[
[
"7/3.",
"_____no_output_____"
],
[
"7/float(3)",
"_____no_output_____"
]
],
[
[
"In the last few lines, we have sped by a lot of things that we should stop for a moment and explore a little more fully. We've seen, however briefly, two different data types: **integers**, also known as *whole numbers* to the non-programming world, and **floating point numbers**, also known (incorrectly) as *decimal numbers* to the rest of the world.\n\nWe've also seen the first instance of an **import** statement. Python has a huge number of libraries included with the distribution. To keep things simple, most of these variables and functions are not accessible from a normal Python interactive session. Instead, you have to import the name. For example, there is a **math** module containing many useful functions. To access, say, the square root function, you can either first\n\n from math import sqrt\n\nand then",
"_____no_output_____"
]
],
[
[
"sqrt(81)",
"_____no_output_____"
]
],
[
[
"or you can simply import the math library itself",
"_____no_output_____"
]
],
[
[
"import math\nmath.sqrt(81)",
"_____no_output_____"
]
],
[
[
"You can define variables using the equals (=) sign:",
"_____no_output_____"
]
],
[
[
"width = 20\nlength = 30\narea = length*width\narea",
"_____no_output_____"
]
],
[
[
"If you try to access a variable that you haven't yet defined, you get an error:",
"_____no_output_____"
]
],
[
[
"volume",
"_____no_output_____"
]
],
[
[
"and you need to define it:",
"_____no_output_____"
]
],
[
[
"depth = 10\nvolume = area*depth\nvolume",
"_____no_output_____"
]
],
[
[
"You can name a variable *almost* anything you want. It needs to start with an alphabetical character or \"\\_\", can contain alphanumeric charcters plus underscores (\"\\_\"). Certain words, however, are reserved for the language:\n\n and, as, assert, break, class, continue, def, del, elif, else, except, \n exec, finally, for, from, global, if, import, in, is, lambda, not, or,\n pass, print, raise, return, try, while, with, yield\n\nTrying to define a variable using one of these will result in a syntax error:",
"_____no_output_____"
]
],
[
[
"return = 0",
"_____no_output_____"
]
],
[
[
"The [Python Tutorial](http://docs.python.org/2/tutorial/introduction.html#using-python-as-a-calculator) has more on using Python as an interactive shell. The [IPython tutorial](http://ipython.org/ipython-doc/dev/interactive/tutorial.html) makes a nice complement to this, since IPython has a much more sophisticated iteractive shell.",
"_____no_output_____"
],
[
"## Strings\nStrings are lists of printable characters, and can be defined using either single quotes",
"_____no_output_____"
]
],
[
[
"'Hello, World!'",
"_____no_output_____"
]
],
[
[
"or double quotes",
"_____no_output_____"
]
],
[
[
"\"Hello, World!\"",
"_____no_output_____"
]
],
[
[
"But not both at the same time, unless you want one of the symbols to be part of the string.",
"_____no_output_____"
]
],
[
[
"\"He's a Rebel\"",
"_____no_output_____"
],
[
"'She asked, \"How are you today?\"'",
"_____no_output_____"
]
],
[
[
"Just like the other two data objects we're familiar with (ints and floats), you can assign a string to a variable",
"_____no_output_____"
]
],
[
[
"greeting = \"Hello, World!\"",
"_____no_output_____"
]
],
[
[
"The **print** statement is often used for printing character strings:",
"_____no_output_____"
]
],
[
[
"print greeting",
"Hello, World!\n"
]
],
[
[
"But it can also print data types other than strings:",
"_____no_output_____"
]
],
[
[
"print \"The area is \",area",
"The area is 600\n"
]
],
[
[
"In the above snipped, the number 600 (stored in the variable \"area\") is converted into a string before being printed out.",
"_____no_output_____"
],
[
"You can use the + operator to concatenate strings together:",
"_____no_output_____"
]
],
[
[
"statement = \"Hello,\" + \"World!\"\nprint statement",
"Hello,World!\n"
]
],
[
[
"Don't forget the space between the strings, if you want one there. ",
"_____no_output_____"
]
],
[
[
"statement = \"Hello, \" + \"World!\"\nprint statement",
"Hello, World!\n"
]
],
[
[
"You can use + to concatenate multiple strings in a single statement:",
"_____no_output_____"
]
],
[
[
"print \"This \" + \"is \" + \"a \" + \"longer \" + \"statement.\"",
"This is a longer statement.\n"
]
],
[
[
"If you have a lot of words to concatenate together, there are other, more efficient ways to do this. But this is fine for linking a few strings together.",
"_____no_output_____"
],
[
"## Lists\nVery often in a programming language, one wants to keep a group of similar items together. Python does this using a data type called **lists**.",
"_____no_output_____"
]
],
[
[
"days_of_the_week = [\"Sunday\",\"Monday\",\"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\",\"Saturday\"]",
"_____no_output_____"
]
],
[
[
"You can access members of the list using the **index** of that item:",
"_____no_output_____"
]
],
[
[
"days_of_the_week[2]",
"_____no_output_____"
]
],
[
[
"Python lists, like C, but unlike Fortran, use 0 as the index of the first element of a list. Thus, in this example, the 0 element is \"Sunday\", 1 is \"Monday\", and so on. If you need to access the *n*th element from the end of the list, you can use a negative index. For example, the -1 element of a list is the last element:",
"_____no_output_____"
]
],
[
[
"days_of_the_week[-1]",
"_____no_output_____"
]
],
[
[
"You can add additional items to the list using the .append() command:",
"_____no_output_____"
]
],
[
[
"languages = [\"Fortran\",\"C\",\"C++\"]\nlanguages.append(\"Python\")\nprint languages",
"['Fortran', 'C', 'C++', 'Python']\n"
]
],
[
[
"The **range()** command is a convenient way to make sequential lists of numbers:",
"_____no_output_____"
]
],
[
[
"range(10)",
"_____no_output_____"
]
],
[
[
"Note that range(n) starts at 0 and gives the sequential list of integers less than n. If you want to start at a different number, use range(start,stop)",
"_____no_output_____"
]
],
[
[
"range(2,8)",
"_____no_output_____"
]
],
[
[
"The lists created above with range have a *step* of 1 between elements. You can also give a fixed step size via a third command:",
"_____no_output_____"
]
],
[
[
"evens = range(0,20,2)\nevens",
"_____no_output_____"
],
[
"evens[3]",
"_____no_output_____"
]
],
[
[
"Lists do not have to hold the same data type. For example,",
"_____no_output_____"
]
],
[
[
"[\"Today\",7,99.3,\"\"]",
"_____no_output_____"
]
],
[
[
"However, it's good (but not essential) to use lists for similar objects that are somehow logically connected. If you want to group different data types together into a composite data object, it's best to use **tuples**, which we will learn about below.\n\nYou can find out how long a list is using the **len()** command:",
"_____no_output_____"
]
],
[
[
"help(len)",
"Help on built-in function len in module __builtin__:\n\nlen(...)\n len(object) -> integer\n \n Return the number of items of a sequence or collection.\n\n"
],
[
"len(evens)",
"_____no_output_____"
]
],
[
[
"## Iteration, Indentation, and Blocks\nOne of the most useful things you can do with lists is to *iterate* through them, i.e. to go through each element one at a time. To do this in Python, we use the **for** statement:",
"_____no_output_____"
]
],
[
[
"for day in days_of_the_week:\n print day",
"Sunday\nMonday\nTuesday\nWednesday\nThursday\nFriday\nSaturday\n"
]
],
[
[
"This code snippet goes through each element of the list called **days_of_the_week** and assigns it to the variable **day**. It then executes everything in the indented block (in this case only one line of code, the print statement) using those variable assignments. When the program has gone through every element of the list, it exists the block.\n\n(Almost) every programming language defines blocks of code in some way. In Fortran, one uses END statements (ENDDO, ENDIF, etc.) to define code blocks. In C, C++, and Perl, one uses curly braces {} to define these blocks.\n\nPython uses a colon (\":\"), followed by indentation level to define code blocks. Everything at a higher level of indentation is taken to be in the same block. In the above example the block was only a single line, but we could have had longer blocks as well:",
"_____no_output_____"
]
],
[
[
"for day in days_of_the_week:\n statement = \"Today is \" + day\n print statement",
"Today is Sunday\nToday is Monday\nToday is Tuesday\nToday is Wednesday\nToday is Thursday\nToday is Friday\nToday is Saturday\n"
]
],
[
[
"The **range()** command is particularly useful with the **for** statement to execute loops of a specified length:",
"_____no_output_____"
]
],
[
[
"for i in range(20):\n print \"The square of \",i,\" is \",i*i",
"The square of 0 is 0\nThe square of 1 is 1\nThe square of 2 is 4\nThe square of 3 is 9\nThe square of 4 is 16\nThe square of 5 is 25\nThe square of 6 is 36\nThe square of 7 is 49\nThe square of 8 is 64\nThe square of 9 is 81\nThe square of 10 is 100\nThe square of 11 is 121\nThe square of 12 is 144\nThe square of 13 is 169\nThe square of 14 is 196\nThe square of 15 is 225\nThe square of 16 is 256\nThe square of 17 is 289\nThe square of 18 is 324\nThe square of 19 is 361\n"
]
],
[
[
"## Slicing\nLists and strings have something in common that you might not suspect: they can both be treated as sequences. You already know that you can iterate through the elements of a list. You can also iterate through the letters in a string:",
"_____no_output_____"
]
],
[
[
"for letter in \"Sunday\":\n print letter",
"S\nu\nn\nd\na\ny\n"
]
],
[
[
"This is only occasionally useful. Slightly more useful is the *slicing* operation, which you can also use on any sequence. We already know that we can use *indexing* to get the first element of a list:",
"_____no_output_____"
]
],
[
[
"days_of_the_week[0]",
"_____no_output_____"
]
],
[
[
"If we want the list containing the first two elements of a list, we can do this via",
"_____no_output_____"
]
],
[
[
"days_of_the_week[0:2]",
"_____no_output_____"
]
],
[
[
"or simply",
"_____no_output_____"
]
],
[
[
"days_of_the_week[:2]",
"_____no_output_____"
]
],
[
[
"If we want the last items of the list, we can do this with negative slicing:",
"_____no_output_____"
]
],
[
[
"days_of_the_week[-2:]",
"_____no_output_____"
]
],
[
[
"which is somewhat logically consistent with negative indices accessing the last elements of the list.\n\nYou can do:",
"_____no_output_____"
]
],
[
[
"workdays = days_of_the_week[1:6]\nprint workdays",
"['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']\n"
]
],
[
[
"Since strings are sequences, you can also do this to them:",
"_____no_output_____"
]
],
[
[
"day = \"Sunday\"\nabbreviation = day[:3]\nprint abbreviation",
"Sun\n"
]
],
[
[
"If we really want to get fancy, we can pass a third element into the slice, which specifies a step length (just like a third argument to the **range()** function specifies the step):",
"_____no_output_____"
]
],
[
[
"numbers = range(0,40)\nevens = numbers[2::2]\nevens",
"_____no_output_____"
]
],
[
[
"Note that in this example I was even able to omit the second argument, so that the slice started at 2, went to the end of the list, and took every second element, to generate the list of even numbers less that 40.",
"_____no_output_____"
],
[
"## Booleans and Truth Testing\nWe have now learned a few data types. We have integers and floating point numbers, strings, and lists to contain them. We have also learned about lists, a container that can hold any data type. We have learned to print things out, and to iterate over items in lists. We will now learn about **boolean** variables that can be either True or False.\n\nWe invariably need some concept of *conditions* in programming to control branching behavior, to allow a program to react differently to different situations. If it's Monday, I'll go to work, but if it's Sunday, I'll sleep in. To do this in Python, we use a combination of **boolean** variables, which evaluate to either True or False, and **if** statements, that control branching based on boolean values.",
"_____no_output_____"
],
[
"For example:",
"_____no_output_____"
]
],
[
[
"if day == \"Sunday\":\n print \"Sleep in\"\nelse:\n print \"Go to work\"",
"Sleep in\n"
]
],
[
[
"(Quick quiz: why did the snippet print \"Go to work\" here? What is the variable \"day\" set to?)\n\nLet's take the snippet apart to see what happened. First, note the statement",
"_____no_output_____"
]
],
[
[
"day == \"Sunday\"",
"_____no_output_____"
]
],
[
[
"If we evaluate it by itself, as we just did, we see that it returns a boolean value, False. The \"==\" operator performs *equality testing*. If the two items are equal, it returns True, otherwise it returns False. In this case, it is comparing two variables, the string \"Sunday\", and whatever is stored in the variable \"day\", which, in this case, is the other string \"Saturday\". Since the two strings are not equal to each other, the truth test has the false value.",
"_____no_output_____"
],
[
"The if statement that contains the truth test is followed by a code block (a colon followed by an indented block of code). If the boolean is true, it executes the code in that block. Since it is false in the above example, we don't see that code executed.\n\nThe first block of code is followed by an **else** statement, which is executed if nothing else in the above if statement is true. Since the value was false, this code is executed, which is why we see \"Go to work\".\n\nYou can compare any data types in Python:",
"_____no_output_____"
]
],
[
[
"1 == 2",
"_____no_output_____"
],
[
"50 == 2*25",
"_____no_output_____"
],
[
"3 < 3.14159",
"_____no_output_____"
],
[
"1 == 1.0",
"_____no_output_____"
],
[
"1 != 0",
"_____no_output_____"
],
[
"1 <= 2",
"_____no_output_____"
],
[
"1 >= 1",
"_____no_output_____"
]
],
[
[
"We see a few other boolean operators here, all of which which should be self-explanatory. Less than, equality, non-equality, and so on.\n\nParticularly interesting is the 1 == 1.0 test, which is true, since even though the two objects are different data types (integer and floating point number), they have the same *value*. There is another boolean operator **is**, that tests whether two objects are the same object:",
"_____no_output_____"
]
],
[
[
"1 is 1.0",
"_____no_output_____"
]
],
[
[
"We can do boolean tests on lists as well:",
"_____no_output_____"
]
],
[
[
"[1,2,3] == [1,2,4]",
"_____no_output_____"
],
[
"[1,2,3] < [1,2,4]",
"_____no_output_____"
]
],
[
[
"Finally, note that you can also string multiple comparisons together, which can result in very intuitive tests:",
"_____no_output_____"
]
],
[
[
"hours = 5\n0 < hours < 24",
"_____no_output_____"
]
],
[
[
"If statements can have **elif** parts (\"else if\"), in addition to if/else parts. For example:",
"_____no_output_____"
]
],
[
[
"if day == \"Sunday\":\n print \"Sleep in\"\nelif day == \"Saturday\":\n print \"Do chores\"\nelse:\n print \"Go to work\"",
"Sleep in\n"
]
],
[
[
"Of course we can combine if statements with for loops, to make a snippet that is almost interesting:",
"_____no_output_____"
]
],
[
[
"for day in days_of_the_week:\n statement = \"Today is \" + day\n print statement\n if day == \"Sunday\":\n print \" Sleep in\"\n elif day == \"Saturday\":\n print \" Do chores\"\n else:\n print \" Go to work\"",
"Today is Sunday\n Sleep in\nToday is Monday\n Go to work\nToday is Tuesday\n Go to work\nToday is Wednesday\n Go to work\nToday is Thursday\n Go to work\nToday is Friday\n Go to work\nToday is Saturday\n Do chores\n"
]
],
[
[
"This is something of an advanced topic, but ordinary data types have boolean values associated with them, and, indeed, in early versions of Python there was not a separate boolean object. Essentially, anything that was a 0 value (the integer or floating point 0, an empty string \"\", or an empty list []) was False, and everything else was true. You can see the boolean value of any data object using the **bool()** function.",
"_____no_output_____"
]
],
[
[
"bool(1)",
"_____no_output_____"
],
[
"bool(0)",
"_____no_output_____"
],
[
"bool([\"This \",\" is \",\" a \",\" list\"])",
"_____no_output_____"
]
],
[
[
"## Code Example: The Fibonacci Sequence\nThe [Fibonacci sequence](http://en.wikipedia.org/wiki/Fibonacci_number) is a sequence in math that starts with 0 and 1, and then each successive entry is the sum of the previous two. Thus, the sequence goes 0,1,1,2,3,5,8,13,21,34,55,89,...\n\nA very common exercise in programming books is to compute the Fibonacci sequence up to some number **n**. First I'll show the code, then I'll discuss what it is doing.",
"_____no_output_____"
]
],
[
[
"n = 10\nsequence = [0,1]\nfor i in range(2,n): # This is going to be a problem if we ever set n <= 2!\n sequence.append(sequence[i-1]+sequence[i-2])\nprint sequence",
"[0, 1, 1, 2, 3, 5, 8, 13, 21, 34]\n"
]
],
[
[
"Let's go through this line by line. First, we define the variable **n**, and set it to the integer 20. **n** is the length of the sequence we're going to form, and should probably have a better variable name. We then create a variable called **sequence**, and initialize it to the list with the integers 0 and 1 in it, the first two elements of the Fibonacci sequence. We have to create these elements \"by hand\", since the iterative part of the sequence requires two previous elements.\n\nWe then have a for loop over the list of integers from 2 (the next element of the list) to **n** (the length of the sequence). After the colon, we see a hash tag \"#\", and then a **comment** that if we had set **n** to some number less than 2 we would have a problem. Comments in Python start with #, and are good ways to make notes to yourself or to a user of your code explaining why you did what you did. Better than the comment here would be to test to make sure the value of **n** is valid, and to complain if it isn't; we'll try this later.\n\nIn the body of the loop, we append to the list an integer equal to the sum of the two previous elements of the list.\n\nAfter exiting the loop (ending the indentation) we then print out the whole list. That's it!",
"_____no_output_____"
],
[
"## Functions\nWe might want to use the Fibonacci snippet with different sequence lengths. We could cut an paste the code into another cell, changing the value of **n**, but it's easier and more useful to make a function out of the code. We do this with the **def** statement in Python:",
"_____no_output_____"
]
],
[
[
"def fibonacci(sequence_length):\n \"Return the Fibonacci sequence of length *sequence_length*\"\n sequence = [0,1]\n if sequence_length < 1:\n print \"Fibonacci sequence only defined for length 1 or greater\"\n return\n if 0 < sequence_length < 3:\n return sequence[:sequence_length]\n for i in range(2,sequence_length): \n sequence.append(sequence[i-1]+sequence[i-2])\n return sequence",
"_____no_output_____"
]
],
[
[
"We can now call **fibonacci()** for different sequence_lengths:",
"_____no_output_____"
]
],
[
[
"fibonacci(2)",
"_____no_output_____"
],
[
"fibonacci(12)",
"_____no_output_____"
]
],
[
[
"We've introduced a several new features here. First, note that the function itself is defined as a code block (a colon followed by an indented block). This is the standard way that Python delimits things. Next, note that the first line of the function is a single string. This is called a **docstring**, and is a special kind of comment that is often available to people using the function through the python command line:",
"_____no_output_____"
]
],
[
[
"help(fibonacci)",
"Help on function fibonacci in module __main__:\n\nfibonacci(sequence_length)\n Return the Fibonacci sequence of length *sequence_length*\n\n"
]
],
[
[
"If you define a docstring for all of your functions, it makes it easier for other people to use them, since they can get help on the arguments and return values of the function.\n\nNext, note that rather than putting a comment in about what input values lead to errors, we have some testing of these values, followed by a warning if the value is invalid, and some conditional code to handle special cases.",
"_____no_output_____"
],
[
"## Recursion and Factorials\nFunctions can also call themselves, something that is often called *recursion*. We're going to experiment with recursion by computing the factorial function. The factorial is defined for a positive integer **n** as\n \n$$ n! = n(n-1)(n-2)\\cdots 1 $$\n\nFirst, note that we don't need to write a function at all, since this is a function built into the standard math library. Let's use the help function to find out about it:",
"_____no_output_____"
]
],
[
[
"from math import factorial\nhelp(factorial)",
"Help on built-in function factorial in module math:\n\nfactorial(...)\n factorial(x) -> Integral\n \n Find x!. Raise a ValueError if x is negative or non-integral.\n\n"
]
],
[
[
"This is clearly what we want.",
"_____no_output_____"
]
],
[
[
"factorial(20)",
"_____no_output_____"
]
],
[
[
"However, if we did want to write a function ourselves, we could do recursively by noting that\n\n$$ n! = n(n-1)!$$\n\nThe program then looks something like:",
"_____no_output_____"
]
],
[
[
"def fact(n):\n if n <= 0:\n return 1\n return n*fact(n-1)",
"_____no_output_____"
],
[
"fact(20)",
"_____no_output_____"
]
],
[
[
"Recursion can be very elegant, and can lead to very simple programs.",
"_____no_output_____"
],
[
"## Two More Data Structures: Tuples and Dictionaries\nBefore we end the Python overview, I wanted to touch on two more data structures that are very useful (and thus very common) in Python programs.\n\nA **tuple** is a sequence object like a list or a string. It's constructed by grouping a sequence of objects together with commas, either without brackets, or with parentheses:",
"_____no_output_____"
]
],
[
[
"t = (1,2,'hi',9.0)\nt",
"_____no_output_____"
]
],
[
[
"Tuples are like lists, in that you can access the elements using indices:",
"_____no_output_____"
]
],
[
[
"t[1]",
"_____no_output_____"
]
],
[
[
"However, tuples are *immutable*, you can't append to them or change the elements of them:",
"_____no_output_____"
]
],
[
[
"t.append(7)",
"_____no_output_____"
],
[
"t[1]=77",
"_____no_output_____"
]
],
[
[
"Tuples are useful anytime you want to group different pieces of data together in an object, but don't want to create a full-fledged class (see below) for them. For example, let's say you want the Cartesian coordinates of some objects in your program. Tuples are a good way to do this:",
"_____no_output_____"
]
],
[
[
"('Bob',0.0,21.0)",
"_____no_output_____"
]
],
[
[
"Again, it's not a necessary distinction, but one way to distinguish tuples and lists is that tuples are a collection of different things, here a name, and x and y coordinates, whereas a list is a collection of similar things, like if we wanted a list of those coordinates:",
"_____no_output_____"
]
],
[
[
"positions = [\n ('Bob',0.0,21.0),\n ('Cat',2.5,13.1),\n ('Dog',33.0,1.2)\n ]",
"_____no_output_____"
]
],
[
[
"Tuples can be used when functions return more than one value. Say we wanted to compute the smallest x- and y-coordinates of the above list of objects. We could write:",
"_____no_output_____"
]
],
[
[
"def minmax(objects):\n minx = 1e20 # These are set to really big numbers\n miny = 1e20\n for obj in objects:\n name,x,y = obj\n if x < minx: \n minx = x\n if y < miny:\n miny = y\n return minx,miny\n\nx,y = minmax(positions)\nprint x,y",
"0.0 1.2\n"
]
],
[
[
"Here we did two things with tuples you haven't seen before. First, we unpacked an object into a set of named variables using *tuple assignment*:\n\n >>> name,x,y = obj\n\nWe also returned multiple values (minx,miny), which were then assigned to two other variables (x,y), again by tuple assignment. This makes what would have been complicated code in C++ rather simple.\n\nTuple assignment is also a convenient way to swap variables:",
"_____no_output_____"
]
],
[
[
"x,y = 1,2\ny,x = x,y\nx,y",
"_____no_output_____"
]
],
[
[
"**Dictionaries** are an object called \"mappings\" or \"associative arrays\" in other languages. Whereas a list associates an integer index with a set of objects:",
"_____no_output_____"
]
],
[
[
"mylist = [1,2,9,21]",
"_____no_output_____"
]
],
[
[
"The index in a dictionary is called the *key*, and the corresponding dictionary entry is the *value*. A dictionary can use (almost) anything as the key. Whereas lists are formed with square brackets [], dictionaries use curly brackets {}:",
"_____no_output_____"
]
],
[
[
"ages = {\"Rick\": 46, \"Bob\": 86, \"Fred\": 21}\nprint \"Rick's age is \",ages[\"Rick\"]",
"Rick's age is 46\n"
]
],
[
[
"There's also a convenient way to create dictionaries without having to quote the keys.",
"_____no_output_____"
]
],
[
[
"dict(Rick=46,Bob=86,Fred=20)",
"_____no_output_____"
]
],
[
[
"The **len()** command works on both tuples and dictionaries:",
"_____no_output_____"
]
],
[
[
"len(t)",
"_____no_output_____"
],
[
"len(ages)",
"_____no_output_____"
]
],
[
[
"## Plotting with Matplotlib\nWe can generally understand trends in data by using a plotting program to chart it. Python has a wonderful plotting library called [Matplotlib](http://matplotlib.sf.net). The IPython notebook interface we are using for these notes has that functionality built in.\n\nAs an example, we have looked at two different functions, the Fibonacci function, and the factorial function, both of which grow faster than polynomially. Which one grows the fastest? Let's plot them. First, let's generate the Fibonacci sequence of length 20:",
"_____no_output_____"
]
],
[
[
"fibs = fibonacci(10)",
"_____no_output_____"
]
],
[
[
"Next lets generate the factorials.",
"_____no_output_____"
]
],
[
[
"facts = []\nfor i in range(10):\n facts.append(factorial(i))",
"_____no_output_____"
]
],
[
[
"Now we use the Matplotlib function **plot** to compare the two.",
"_____no_output_____"
]
],
[
[
"figsize(8,6)\nplot(facts,label=\"factorial\")\nplot(fibs,label=\"Fibonacci\")\nxlabel(\"n\")\nlegend()",
"_____no_output_____"
]
],
[
[
"The factorial function grows much faster. In fact, you can't even see the Fibonacci sequence. It's not entirely surprising: a function where we multiply by n each iteration is bound to grow faster than one where we add (roughly) n each iteration.\n\nLet's plot these on a semilog plot so we can see them both a little more clearly:",
"_____no_output_____"
]
],
[
[
"semilogy(facts,label=\"factorial\")\nsemilogy(fibs,label=\"Fibonacci\")\nxlabel(\"n\")\nlegend()",
"_____no_output_____"
]
],
[
[
"There are many more things you can do with Matplotlib. We'll be looking at some of them in the sections to come. In the meantime, if you want an idea of the different things you can do, look at the Matplotlib [Gallery](http://matplotlib.org/gallery.html). Rob Johansson's IPython notebook [Introduction to Matplotlib](http://nbviewer.ipython.org/urls/raw.github.com/jrjohansson/scientific-python-lectures/master/Lecture-4-Matplotlib.ipynb) is also particularly good.",
"_____no_output_____"
],
[
"## Conclusion of the Python Overview\nThere is, of course, much more to the language than I've covered here. I've tried to keep this brief enough so that you can jump in and start using Python to simplify your life and work. My own experience in learning new things is that the information doesn't \"stick\" unless you try and use it for something in real life.\n\nYou will no doubt need to learn more as you go. I've listed several other good references, including the [Python Tutorial](http://docs.python.org/2/tutorial/) and [Learn Python the Hard Way](http://learnpythonthehardway.org/book/). Additionally, now is a good time to start familiarizing yourself with the [Python Documentation](http://docs.python.org/2.7/), and, in particular, the [Python Language Reference](http://docs.python.org/2.7/reference/index.html).\n\nTim Peters, one of the earliest and most prolific Python contributors, wrote the \"Zen of Python\", which can be accessed via the \"import this\" command:",
"_____no_output_____"
]
],
[
[
"import this",
"The Zen of Python, by Tim Peters\n\nBeautiful is better than ugly.\nExplicit is better than implicit.\nSimple is better than complex.\nComplex is better than complicated.\nFlat is better than nested.\nSparse is better than dense.\nReadability counts.\nSpecial cases aren't special enough to break the rules.\nAlthough practicality beats purity.\nErrors should never pass silently.\nUnless explicitly silenced.\nIn the face of ambiguity, refuse the temptation to guess.\nThere should be one-- and preferably only one --obvious way to do it.\nAlthough that way may not be obvious at first unless you're Dutch.\nNow is better than never.\nAlthough never is often better than *right* now.\nIf the implementation is hard to explain, it's a bad idea.\nIf the implementation is easy to explain, it may be a good idea.\nNamespaces are one honking great idea -- let's do more of those!\n"
]
],
[
[
"No matter how experienced a programmer you are, these are words to meditate on.",
"_____no_output_____"
],
[
"# II. Numpy and Scipy\n\n[Numpy](http://numpy.org) contains core routines for doing fast vector, matrix, and linear algebra-type operations in Python. [Scipy](http://scipy) contains additional routines for optimization, special functions, and so on. Both contain modules written in C and Fortran so that they're as fast as possible. Together, they give Python roughly the same capability that the [Matlab](http://www.mathworks.com/products/matlab/) program offers. (In fact, if you're an experienced Matlab user, there a [guide to Numpy for Matlab users](http://www.scipy.org/NumPy_for_Matlab_Users) just for you.)\n\n## Making vectors and matrices\nFundamental to both Numpy and Scipy is the ability to work with vectors and matrices. You can create vectors from lists using the **array** command:",
"_____no_output_____"
]
],
[
[
"array([1,2,3,4,5,6])",
"_____no_output_____"
]
],
[
[
"You can pass in a second argument to **array** that gives the numeric type. There are a number of types [listed here](http://docs.scipy.org/doc/numpy/user/basics.types.html) that your matrix can be. Some of these are aliased to single character codes. The most common ones are 'd' (double precision floating point number), 'D' (double precision complex number), and 'i' (int32). Thus,",
"_____no_output_____"
]
],
[
[
"array([1,2,3,4,5,6],'d')",
"_____no_output_____"
],
[
"array([1,2,3,4,5,6],'D')",
"_____no_output_____"
],
[
"array([1,2,3,4,5,6],'i')",
"_____no_output_____"
]
],
[
[
"To build matrices, you can either use the array command with lists of lists:",
"_____no_output_____"
]
],
[
[
"array([[0,1],[1,0]],'d')",
"_____no_output_____"
]
],
[
[
"You can also form empty (zero) matrices of arbitrary shape (including vectors, which Numpy treats as vectors with one row), using the **zeros** command:",
"_____no_output_____"
]
],
[
[
"zeros((3,3),'d')",
"_____no_output_____"
]
],
[
[
"The first argument is a tuple containing the shape of the matrix, and the second is the data type argument, which follows the same conventions as in the array command. Thus, you can make row vectors:",
"_____no_output_____"
]
],
[
[
"zeros(3,'d')",
"_____no_output_____"
],
[
"zeros((1,3),'d')",
"_____no_output_____"
]
],
[
[
"or column vectors:",
"_____no_output_____"
]
],
[
[
"zeros((3,1),'d')",
"_____no_output_____"
]
],
[
[
"There's also an **identity** command that behaves as you'd expect:",
"_____no_output_____"
]
],
[
[
"identity(4,'d')",
"_____no_output_____"
]
],
[
[
"as well as a **ones** command.",
"_____no_output_____"
],
[
"## Linspace, matrix functions, and plotting\nThe **linspace** command makes a linear array of points from a starting to an ending value.",
"_____no_output_____"
]
],
[
[
"linspace(0,1)",
"_____no_output_____"
]
],
[
[
"If you provide a third argument, it takes that as the number of points in the space. If you don't provide the argument, it gives a length 50 linear space.",
"_____no_output_____"
]
],
[
[
"linspace(0,1,11)",
"_____no_output_____"
]
],
[
[
"**linspace** is an easy way to make coordinates for plotting. Functions in the numpy library (all of which are imported into IPython notebook) can act on an entire vector (or even a matrix) of points at once. Thus,",
"_____no_output_____"
]
],
[
[
"x = linspace(0,2*pi)\nsin(x)",
"_____no_output_____"
]
],
[
[
"In conjunction with **matplotlib**, this is a nice way to plot things:",
"_____no_output_____"
]
],
[
[
"plot(x,sin(x))",
"_____no_output_____"
]
],
[
[
"## Matrix operations\nMatrix objects act sensibly when multiplied by scalars:",
"_____no_output_____"
]
],
[
[
"0.125*identity(3,'d')",
"_____no_output_____"
]
],
[
[
"as well as when you add two matrices together. (However, the matrices have to be the same shape.)",
"_____no_output_____"
]
],
[
[
"identity(2,'d') + array([[1,1],[1,2]])",
"_____no_output_____"
]
],
[
[
"Something that confuses Matlab users is that the times (*) operator give element-wise multiplication rather than matrix multiplication:",
"_____no_output_____"
]
],
[
[
"identity(2)*ones((2,2))",
"_____no_output_____"
]
],
[
[
"To get matrix multiplication, you need the **dot** command:",
"_____no_output_____"
]
],
[
[
"dot(identity(2),ones((2,2)))",
"_____no_output_____"
]
],
[
[
"**dot** can also do dot products (duh!):",
"_____no_output_____"
]
],
[
[
"v = array([3,4],'d')\nsqrt(dot(v,v))",
"_____no_output_____"
]
],
[
[
"as well as matrix-vector products.",
"_____no_output_____"
],
[
"There are **determinant**, **inverse**, and **transpose** functions that act as you would suppose. Transpose can be abbreviated with \".T\" at the end of a matrix object:",
"_____no_output_____"
]
],
[
[
"m = array([[1,2],[3,4]])\nm.T",
"_____no_output_____"
]
],
[
[
"There's also a **diag()** function that takes a list or a vector and puts it along the diagonal of a square matrix. ",
"_____no_output_____"
]
],
[
[
"diag([1,2,3,4,5])",
"_____no_output_____"
]
],
[
[
"We'll find this useful later on.",
"_____no_output_____"
],
[
"## Matrix Solvers\nYou can solve systems of linear equations using the **solve** command:",
"_____no_output_____"
]
],
[
[
"A = array([[1,1,1],[0,2,5],[2,5,-1]])\nb = array([6,-4,27])\nsolve(A,b)",
"_____no_output_____"
]
],
[
[
"There are a number of routines to compute eigenvalues and eigenvectors\n\n* **eigvals** returns the eigenvalues of a matrix\n* **eigvalsh** returns the eigenvalues of a Hermitian matrix\n* **eig** returns the eigenvalues and eigenvectors of a matrix\n* **eigh** returns the eigenvalues and eigenvectors of a Hermitian matrix.",
"_____no_output_____"
]
],
[
[
"A = array([[13,-4],[-4,7]],'d')\neigvalsh(A)",
"_____no_output_____"
],
[
"eigh(A)",
"_____no_output_____"
]
],
[
[
"## Example: Finite Differences\nNow that we have these tools in our toolbox, we can start to do some cool stuff with it. Many of the equations we want to solve in Physics involve differential equations. We want to be able to compute the derivative of functions:\n\n$$ y' = \\frac{y(x+h)-y(x)}{h} $$\n\nby *discretizing* the function $y(x)$ on an evenly spaced set of points $x_0, x_1, \\dots, x_n$, yielding $y_0, y_1, \\dots, y_n$. Using the discretization, we can approximate the derivative by\n\n$$ y_i' \\approx \\frac{y_{i+1}-y_{i-1}}{x_{i+1}-x_{i-1}} $$\n\nWe can write a derivative function in Python via",
"_____no_output_____"
]
],
[
[
"def nderiv(y,x):\n \"Finite difference derivative of the function f\"\n n = len(y)\n d = zeros(n,'d') # assume double\n # Use centered differences for the interior points, one-sided differences for the ends\n for i in range(1,n-1):\n d[i] = (y[i+1]-y[i-1])/(x[i+1]-x[i-1])\n d[0] = (y[1]-y[0])/(x[1]-x[0])\n d[n-1] = (y[n-1]-y[n-2])/(x[n-1]-x[n-2])\n return d",
"_____no_output_____"
]
],
[
[
"Let's see whether this works for our sin example from above:",
"_____no_output_____"
]
],
[
[
"x = linspace(0,2*pi)\ndsin = nderiv(sin(x),x)\nplot(x,dsin,label='numerical')\nplot(x,cos(x),label='analytical')\ntitle(\"Comparison of numerical and analytical derivatives of sin(x)\")\nlegend()",
"_____no_output_____"
]
],
[
[
"Pretty close!",
"_____no_output_____"
],
[
"## One-Dimensional Harmonic Oscillator using Finite Difference\nNow that we've convinced ourselves that finite differences aren't a terrible approximation, let's see if we can use this to solve the one-dimensional harmonic oscillator.\n\nWe want to solve the time-independent Schrodinger equation\n\n$$ -\\frac{\\hbar^2}{2m}\\frac{\\partial^2\\psi(x)}{\\partial x^2} + V(x)\\psi(x) = E\\psi(x)$$\n\nfor $\\psi(x)$ when $V(x)=\\frac{1}{2}m\\omega^2x^2$ is the harmonic oscillator potential. We're going to use the standard trick to transform the differential equation into a matrix equation by multiplying both sides by $\\psi^*(x)$ and integrating over $x$. This yields\n\n$$ -\\frac{\\hbar}{2m}\\int\\psi(x)\\frac{\\partial^2}{\\partial x^2}\\psi(x)dx + \\int\\psi(x)V(x)\\psi(x)dx = E$$\n\nWe will again use the finite difference approximation. The finite difference formula for the second derivative is\n\n$$ y'' = \\frac{y_{i+1}-2y_i+y_{i-1}}{x_{i+1}-x_{i-1}} $$\n\nWe can think of the first term in the Schrodinger equation as the overlap of the wave function $\\psi(x)$ with the second derivative of the wave function $\\frac{\\partial^2}{\\partial x^2}\\psi(x)$. Given the above expression for the second derivative, we can see if we take the overlap of the states $y_1,\\dots,y_n$ with the second derivative, we will only have three points where the overlap is nonzero, at $y_{i-1}$, $y_i$, and $y_{i+1}$. In matrix form, this leads to the tridiagonal Laplacian matrix, which has -2's along the diagonals, and 1's along the diagonals above and below the main diagonal.\n\nThe second term turns leads to a diagonal matrix with $V(x_i)$ on the diagonal elements. Putting all of these pieces together, we get:",
"_____no_output_____"
]
],
[
[
"def Laplacian(x):\n h = x[1]-x[0] # assume uniformly spaced points\n n = len(x)\n M = -2*identity(n,'d')\n for i in range(1,n):\n M[i,i-1] = M[i-1,i] = 1\n return M/h**2",
"_____no_output_____"
],
[
"x = linspace(-3,3)\nm = 1.0\nohm = 1.0\nT = (-0.5/m)*Laplacian(x)\nV = 0.5*(ohm**2)*(x**2)\nH = T + diag(V)\nE,U = eigh(H)\nh = x[1]-x[0]\n\n# Plot the Harmonic potential\nplot(x,V,color='k')\n\nfor i in range(4):\n # For each of the first few solutions, plot the energy level:\n axhline(y=E[i],color='k',ls=\":\")\n # as well as the eigenfunction, displaced by the energy level so they don't\n # all pile up on each other:\n plot(x,-U[:,i]/sqrt(h)+E[i])\ntitle(\"Eigenfunctions of the Quantum Harmonic Oscillator\")\nxlabel(\"Displacement (bohr)\")\nylabel(\"Energy (hartree)\")",
"_____no_output_____"
]
],
[
[
"We've made a couple of hacks here to get the orbitals the way we want them. First, I inserted a -1 factor before the wave functions, to fix the phase of the lowest state. The phase (sign) of a quantum wave function doesn't hold any information, only the square of the wave function does, so this doesn't really change anything. \n\nBut the eigenfunctions as we generate them aren't properly normalized. The reason is that finite difference isn't a real basis in the quantum mechanical sense. It's a basis of Dirac δ functions at each point; we interpret the space betwen the points as being \"filled\" by the wave function, but the finite difference basis only has the solution being at the points themselves. We can fix this by dividing the eigenfunctions of our finite difference Hamiltonian by the square root of the spacing, and this gives properly normalized functions.",
"_____no_output_____"
],
[
"## Special Functions\nThe solutions to the Harmonic Oscillator are supposed to be Hermite polynomials. The Wikipedia page has the HO states given by\n\n$$\\psi_n(x) = \\frac{1}{\\sqrt{2^n n!}}\n\\left(\\frac{m\\omega}{\\pi\\hbar}\\right)^{1/4}\n\\exp\\left(-\\frac{m\\omega x^2}{2\\hbar}\\right)\nH_n\\left(\\sqrt{\\frac{m\\omega}{\\hbar}}x\\right)$$\n\nLet's see whether they look like those. There are some special functions in the Numpy library, and some more in Scipy. Hermite Polynomials are in Numpy:",
"_____no_output_____"
]
],
[
[
"from numpy.polynomial.hermite import Hermite\ndef ho_evec(x,n,m,ohm):\n vec = [0]*9\n vec[n] = 1\n Hn = Hermite(vec)\n return (1/sqrt(2**n*factorial(n)))*pow(m*ohm/pi,0.25)*exp(-0.5*m*ohm*x**2)*Hn(x*sqrt(m*ohm))",
"_____no_output_____"
]
],
[
[
"Let's compare the first function to our solution.",
"_____no_output_____"
]
],
[
[
"plot(x,ho_evec(x,0,1,1),label=\"Analytic\")\nplot(x,-U[:,0]/sqrt(h),label=\"Numeric\")\nxlabel('x (bohr)')\nylabel(r'$\\psi(x)$')\ntitle(\"Comparison of numeric and analytic solutions to the Harmonic Oscillator\")\nlegend()",
"_____no_output_____"
]
],
[
[
"The agreement is almost exact.",
"_____no_output_____"
],
[
"We can use the **subplot** command to put multiple comparisons in different panes on a single plot:",
"_____no_output_____"
]
],
[
[
"phase_correction = [-1,1,1,-1,-1,1]\nfor i in range(6):\n subplot(2,3,i+1)\n plot(x,ho_evec(x,i,1,1),label=\"Analytic\")\n plot(x,phase_correction[i]*U[:,i]/sqrt(h),label=\"Numeric\")",
"_____no_output_____"
]
],
[
[
"Other than phase errors (which I've corrected with a little hack: can you find it?), the agreement is pretty good, although it gets worse the higher in energy we get, in part because we used only 50 points.\n\nThe Scipy module has many more special functions:",
"_____no_output_____"
]
],
[
[
"from scipy.special import airy,jn,eval_chebyt,eval_legendre\nsubplot(2,2,1)\nx = linspace(-1,1)\nAi,Aip,Bi,Bip = airy(x)\nplot(x,Ai)\nplot(x,Aip)\nplot(x,Bi)\nplot(x,Bip)\ntitle(\"Airy functions\")\n\nsubplot(2,2,2)\nx = linspace(0,10)\nfor i in range(4):\n plot(x,jn(i,x))\ntitle(\"Bessel functions\")\n\nsubplot(2,2,3)\nx = linspace(-1,1)\nfor i in range(6):\n plot(x,eval_chebyt(i,x))\ntitle(\"Chebyshev polynomials of the first kind\")\n\nsubplot(2,2,4)\nx = linspace(-1,1)\nfor i in range(6):\n plot(x,eval_legendre(i,x))\ntitle(\"Legendre polynomials\")",
"_____no_output_____"
]
],
[
[
"As well as Jacobi, Laguerre, Hermite polynomials, Hypergeometric functions, and many others. There's a full listing at the [Scipy Special Functions Page](http://docs.scipy.org/doc/scipy/reference/special.html).",
"_____no_output_____"
],
[
"## Least squares fitting\nVery often we deal with some data that we want to fit to some sort of expected behavior. Say we have the following:",
"_____no_output_____"
]
],
[
[
"raw_data = \"\"\"\\\n3.1905781584582433,0.028208609537968457\n4.346895074946466,0.007160804747670053\n5.374732334047101,0.0046962988461934805\n8.201284796573875,0.0004614473299618756\n10.899357601713055,0.00005038370219939726\n16.295503211991434,4.377451812785309e-7\n21.82012847965739,3.0799922117601088e-9\n32.48394004282656,1.524776208284536e-13\n43.53319057815846,5.5012073588707224e-18\"\"\"",
"_____no_output_____"
]
],
[
[
"There's a section below on parsing CSV data. We'll steal the parser from that. For an explanation, skip ahead to that section. Otherwise, just assume that this is a way to parse that text into a numpy array that we can plot and do other analyses with.",
"_____no_output_____"
]
],
[
[
"data = []\nfor line in raw_data.splitlines():\n words = line.split(',')\n data.append(map(float,words))\ndata = array(data)",
"_____no_output_____"
],
[
"title(\"Raw Data\")\nxlabel(\"Distance\")\nplot(data[:,0],data[:,1],'bo')",
"_____no_output_____"
]
],
[
[
"Since we expect the data to have an exponential decay, we can plot it using a semi-log plot.",
"_____no_output_____"
]
],
[
[
"title(\"Raw Data\")\nxlabel(\"Distance\")\nsemilogy(data[:,0],data[:,1],'bo')",
"_____no_output_____"
]
],
[
[
"For a pure exponential decay like this, we can fit the log of the data to a straight line. The above plot suggests this is a good approximation. Given a function\n$$ y = Ae^{-ax} $$\n$$ \\log(y) = \\log(A) - ax$$\nThus, if we fit the log of the data versus x, we should get a straight line with slope $a$, and an intercept that gives the constant $A$.\n\nThere's a numpy function called **polyfit** that will fit data to a polynomial form. We'll use this to fit to a straight line (a polynomial of order 1)",
"_____no_output_____"
]
],
[
[
"params = polyfit(data[:,0],log(data[:,1]),1)\na = params[0]\nA = exp(params[1])",
"_____no_output_____"
]
],
[
[
"Let's see whether this curve fits the data.",
"_____no_output_____"
]
],
[
[
"x = linspace(1,45)\ntitle(\"Raw Data\")\nxlabel(\"Distance\")\nsemilogy(data[:,0],data[:,1],'bo')\nsemilogy(x,A*exp(a*x),'b-')",
"_____no_output_____"
]
],
[
[
"If we have more complicated functions, we may not be able to get away with fitting to a simple polynomial. Consider the following data:",
"_____no_output_____"
]
],
[
[
"gauss_data = \"\"\"\\\n-0.9902286902286903,1.4065274110372852e-19\n-0.7566104566104566,2.2504438576596563e-18\n-0.5117810117810118,1.9459459459459454\n-0.31887271887271884,10.621621621621626\n-0.250997150997151,15.891891891891893\n-0.1463309463309464,23.756756756756754\n-0.07267267267267263,28.135135135135133\n-0.04426734426734419,29.02702702702703\n-0.0015939015939017698,29.675675675675677\n0.04689304689304685,29.10810810810811\n0.0840994840994842,27.324324324324326\n0.1700546700546699,22.216216216216214\n0.370878570878571,7.540540540540545\n0.5338338338338338,1.621621621621618\n0.722014322014322,0.08108108108108068\n0.9926849926849926,-0.08108108108108646\"\"\"\n\ndata = []\nfor line in gauss_data.splitlines():\n words = line.split(',')\n data.append(map(float,words))\ndata = array(data)\n\nplot(data[:,0],data[:,1],'bo')",
"_____no_output_____"
]
],
[
[
"This data looks more Gaussian than exponential. If we wanted to, we could use **polyfit** for this as well, but let's use the **curve_fit** function from Scipy, which can fit to arbitrary functions. You can learn more using help(curve_fit).\n\nFirst define a general Gaussian function to fit to.",
"_____no_output_____"
]
],
[
[
"def gauss(x,A,a): return A*exp(a*x**2)",
"_____no_output_____"
]
],
[
[
"Now fit to it using **curve_fit**:",
"_____no_output_____"
]
],
[
[
"from scipy.optimize import curve_fit\n\nparams,conv = curve_fit(gauss,data[:,0],data[:,1])\nx = linspace(-1,1)\nplot(data[:,0],data[:,1],'bo')\nA,a = params\nplot(x,gauss(x,A,a),'b-')",
"_____no_output_____"
]
],
[
[
"The **curve_fit** routine we just used is built on top of a very good general **minimization** capability in Scipy. You can learn more [at the scipy documentation pages](http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html).",
"_____no_output_____"
],
[
"## Monte Carlo, random numbers, and computing $\\pi$\nMany methods in scientific computing rely on Monte Carlo integration, where a sequence of (pseudo) random numbers are used to approximate the integral of a function. Python has good random number generators in the standard library. The **random()** function gives pseudorandom numbers uniformly distributed between 0 and 1:",
"_____no_output_____"
]
],
[
[
"from random import random\nrands = []\nfor i in range(100):\n rands.append(random())\nplot(rands)",
"_____no_output_____"
]
],
[
[
"**random()** uses the [Mersenne Twister](http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html) algorithm, which is a highly regarded pseudorandom number generator. There are also functions to generate random integers, to randomly shuffle a list, and functions to pick random numbers from a particular distribution, like the normal distribution:",
"_____no_output_____"
]
],
[
[
"from random import gauss\ngrands = []\nfor i in range(100):\n grands.append(gauss(0,1))\nplot(grands)",
"_____no_output_____"
]
],
[
[
"It is generally more efficient to generate a list of random numbers all at once, particularly if you're drawing from a non-uniform distribution. Numpy has functions to generate vectors and matrices of particular types of random distributions.",
"_____no_output_____"
]
],
[
[
"plot(rand(100))",
"_____no_output_____"
]
],
[
[
"One of the first programs I ever wrote was a program to compute $\\pi$ by taking random numbers as x and y coordinates, and counting how many of them were in the unit circle. For example:",
"_____no_output_____"
]
],
[
[
"npts = 5000\nxs = 2*rand(npts)-1\nys = 2*rand(npts)-1\nr = xs**2+ys**2\nninside = (r<1).sum()\nfigsize(6,6) # make the figure square\ntitle(\"Approximation to pi = %f\" % (4*ninside/float(npts)))\nplot(xs[r<1],ys[r<1],'b.')\nplot(xs[r>1],ys[r>1],'r.')\nfigsize(8,6) # change the figsize back to 4x3 for the rest of the notebook",
"_____no_output_____"
]
],
[
[
"The idea behind the program is that the ratio of the area of the unit circle to the square that inscribes it is $\\pi/4$, so by counting the fraction of the random points in the square that are inside the circle, we get increasingly good estimates to $\\pi$. \n\nThe above code uses some higher level Numpy tricks to compute the radius of each point in a single line, to count how many radii are below one in a single line, and to filter the x,y points based on their radii. To be honest, I rarely write code like this: I find some of these Numpy tricks a little too cute to remember them, and I'm more likely to use a list comprehension (see below) to filter the points I want, since I can remember that.\n\nAs methods of computing $\\pi$ go, this is among the worst. A much better method is to use Leibniz's expansion of arctan(1):\n\n$$\\frac{\\pi}{4} = \\sum_k \\frac{(-1)^k}{2*k+1}$$",
"_____no_output_____"
]
],
[
[
"n = 100\ntotal = 0\nfor k in range(n):\n total += pow(-1,k)/(2*k+1.0)\nprint 4*total",
"3.13159290356\n"
]
],
[
[
"If you're interested a great method, check out [Ramanujan's method](http://en.wikipedia.org/wiki/Approximations_of_%CF%80). This converges so fast you really need arbitrary precision math to display enough decimal places. You can do this with the Python **decimal** module, if you're interested.",
"_____no_output_____"
],
[
"## Numerical Integration\nIntegration can be hard, and sometimes it's easier to work out a definite integral using an approximation. For example, suppose we wanted to figure out the integral:\n\n$$\\int_0^\\infty\\exp(-x)dx=1$$",
"_____no_output_____"
]
],
[
[
"from numpy import sqrt\ndef f(x): return exp(-x)\nx = linspace(0,10)\nplot(x,exp(-x))",
"_____no_output_____"
]
],
[
[
"Scipy has a numerical integration routine **quad** (since sometimes numerical integration is called *quadrature*), that we can use for this:",
"_____no_output_____"
]
],
[
[
"from scipy.integrate import quad\nquad(f,0,inf)",
"_____no_output_____"
]
],
[
[
"There are also 2d and 3d numerical integrators in Scipy. [See the docs](http://docs.scipy.org/doc/scipy/reference/integrate.html) for more information.",
"_____no_output_____"
],
[
"## Fast Fourier Transform and Signal Processing\n",
"_____no_output_____"
],
[
"Very often we want to use FFT techniques to help obtain the signal from noisy data. Scipy has several different options for this.",
"_____no_output_____"
]
],
[
[
"from scipy.fftpack import fft,fftfreq\n\nnpts = 4000\nnplot = npts/10\nt = linspace(0,120,npts)\ndef acc(t): return 10*sin(2*pi*2.0*t) + 5*sin(2*pi*8.0*t) + 2*rand(npts)\n\nsignal = acc(t)\n\nFFT = abs(fft(signal))\nfreqs = fftfreq(npts, t[1]-t[0])\n\nsubplot(211)\nplot(t[:nplot], signal[:nplot])\nsubplot(212)\nplot(freqs,20*log10(FFT),',')\nshow()",
"_____no_output_____"
]
],
[
[
"There are additional signal processing routines in Scipy that you can [read about here](http://docs.scipy.org/doc/scipy/reference/tutorial/signal.html).",
"_____no_output_____"
],
[
"# III. Intermediate Python\n\n## Output Parsing\nAs more and more of our day-to-day work is being done on and through computers, we increasingly have output that one program writes, often in a text file, that we need to analyze in one way or another, and potentially feed that output into another file.\n\nSuppose we have the following output:",
"_____no_output_____"
]
],
[
[
"myoutput = \"\"\"\\\n@ Step Energy Delta E Gmax Grms Xrms Xmax Walltime\n@ ---- ---------------- -------- -------- -------- -------- -------- --------\n@ 0 -6095.12544083 0.0D+00 0.03686 0.00936 0.00000 0.00000 1391.5\n@ 1 -6095.25762870 -1.3D-01 0.00732 0.00168 0.32456 0.84140 10468.0\n@ 2 -6095.26325979 -5.6D-03 0.00233 0.00056 0.06294 0.14009 11963.5\n@ 3 -6095.26428124 -1.0D-03 0.00109 0.00024 0.03245 0.10269 13331.9\n@ 4 -6095.26463203 -3.5D-04 0.00057 0.00013 0.02737 0.09112 14710.8\n@ 5 -6095.26477615 -1.4D-04 0.00043 0.00009 0.02259 0.08615 20211.1\n@ 6 -6095.26482624 -5.0D-05 0.00015 0.00002 0.00831 0.03147 21726.1\n@ 7 -6095.26483584 -9.6D-06 0.00021 0.00004 0.01473 0.05265 24890.5\n@ 8 -6095.26484405 -8.2D-06 0.00005 0.00001 0.00555 0.01929 26448.7\n@ 9 -6095.26484599 -1.9D-06 0.00003 0.00001 0.00164 0.00564 27258.1\n@ 10 -6095.26484676 -7.7D-07 0.00003 0.00001 0.00161 0.00553 28155.3\n@ 11 -6095.26484693 -1.8D-07 0.00002 0.00000 0.00054 0.00151 28981.7\n@ 11 -6095.26484693 -1.8D-07 0.00002 0.00000 0.00054 0.00151 28981.7\"\"\"",
"_____no_output_____"
]
],
[
[
"This output actually came from a geometry optimization of a Silicon cluster using the [NWChem](http://www.nwchem-sw.org/index.php/Main_Page) quantum chemistry suite. At every step the program computes the energy of the molecular geometry, and then changes the geometry to minimize the computed forces, until the energy converges. I obtained this output via the unix command\n\n % grep @ nwchem.out\n\nsince NWChem is nice enough to precede the lines that you need to monitor job progress with the '@' symbol.\n\nWe could do the entire analysis in Python; I'll show how to do this later on, but first let's focus on turning this code into a usable Python object that we can plot.\n\nFirst, note that the data is entered into a multi-line string. When Python sees three quote marks \"\"\" or ''' it treats everything following as part of a single string, including newlines, tabs, and anything else, until it sees the same three quote marks (\"\"\" has to be followed by another \"\"\", and ''' has to be followed by another ''') again. This is a convenient way to quickly dump data into Python, and it also reinforces the important idea that you don't have to open a file and deal with it one line at a time. You can read everything in, and deal with it as one big chunk.\n\nThe first thing we'll do, though, is to split the big string into a list of strings, since each line corresponds to a separate piece of data. We will use the **splitlines()** function on the big myout string to break it into a new element every time it sees a newline (\\n) character:",
"_____no_output_____"
]
],
[
[
"lines = myoutput.splitlines()\nlines",
"_____no_output_____"
]
],
[
[
"Splitting is a big concept in text processing. We used **splitlines()** here, and we will use the more general **split()** function below to split each line into whitespace-delimited words.\n\nWe now want to do three things:\n\n* Skip over the lines that don't carry any information\n* Break apart each line that does carry information and grab the pieces we want\n* Turn the resulting data into something that we can plot.\n\nFor this data, we really only want the Energy column, the Gmax column (which contains the maximum gradient at each step), and perhaps the Walltime column. \n\nSince the data is now in a list of lines, we can iterate over it:",
"_____no_output_____"
]
],
[
[
"for line in lines[2:]:\n # do something with each line\n words = line.split()",
"_____no_output_____"
]
],
[
[
"Let's examine what we just did: first, we used a **for** loop to iterate over each line. However, we skipped the first two (the lines[2:] only takes the lines starting from index 2), since lines[0] contained the title information, and lines[1] contained underscores.\n\nWe then split each line into chunks (which we're calling \"words\", even though in most cases they're numbers) using the string **split()** command. Here's what split does:",
"_____no_output_____"
]
],
[
[
"import string\nhelp(string.split)",
"Help on function split in module string:\n\nsplit(s, sep=None, maxsplit=-1)\n split(s [,sep [,maxsplit]]) -> list of strings\n \n Return a list of the words in the string s, using sep as the\n delimiter string. If maxsplit is given, splits at no more than\n maxsplit places (resulting in at most maxsplit+1 words). If sep\n is not specified or is None, any whitespace string is a separator.\n \n (split and splitfields are synonymous)\n\n"
]
],
[
[
"Here we're implicitly passing in the first argument (s, in the doctext) by calling a method .split() on a string object. In this instance, we're not passing in a sep character, which means that the function splits on whitespace. Let's see what that does to one of our lines:",
"_____no_output_____"
]
],
[
[
"lines[2].split()",
"_____no_output_____"
]
],
[
[
"This is almost exactly what we want. We just have to now pick the fields we want:",
"_____no_output_____"
]
],
[
[
"for line in lines[2:]:\n # do something with each line\n words = line.split()\n energy = words[2]\n gmax = words[4]\n time = words[8]\n print energy,gmax,time",
"-6095.12544083 0.03686 1391.5\n-6095.25762870 0.00732 10468.0\n-6095.26325979 0.00233 11963.5\n-6095.26428124 0.00109 13331.9\n-6095.26463203 0.00057 14710.8\n-6095.26477615 0.00043 20211.1\n-6095.26482624 0.00015 21726.1\n-6095.26483584 0.00021 24890.5\n-6095.26484405 0.00005 26448.7\n-6095.26484599 0.00003 27258.1\n-6095.26484676 0.00003 28155.3\n-6095.26484693 0.00002 28981.7\n-6095.26484693 0.00002 28981.7\n"
]
],
[
[
"This is fine for printing things out, but if we want to do something with the data, either make a calculation with it or pass it into a plotting, we need to convert the strings into regular floating point numbers. We can use the **float()** command for this. We also need to save it in some form. I'll do this as follows:",
"_____no_output_____"
]
],
[
[
"data = []\nfor line in lines[2:]:\n # do something with each line\n words = line.split()\n energy = float(words[2])\n gmax = float(words[4])\n time = float(words[8])\n data.append((energy,gmax,time))\ndata = array(data)",
"_____no_output_____"
]
],
[
[
"We now have our data in a numpy array, so we can choose columns to print:",
"_____no_output_____"
]
],
[
[
"plot(data[:,0])\nxlabel('step')\nylabel('Energy (hartrees)')\ntitle('Convergence of NWChem geometry optimization for Si cluster')",
"_____no_output_____"
]
],
[
[
"I would write the code a little more succinctly if I were doing this for myself, but this is essentially a snippet I use repeatedly. \n\nSuppose our data was in CSV (comma separated values) format, a format that originally came from Microsoft Excel, and is increasingly used as a data interchange format in big data applications. How would we parse that?",
"_____no_output_____"
]
],
[
[
"csv = \"\"\"\\\n-6095.12544083, 0.03686, 1391.5\n-6095.25762870, 0.00732, 10468.0\n-6095.26325979, 0.00233, 11963.5\n-6095.26428124, 0.00109, 13331.9\n-6095.26463203, 0.00057, 14710.8\n-6095.26477615, 0.00043, 20211.1\n-6095.26482624, 0.00015, 21726.1\n-6095.26483584, 0.00021, 24890.5\n-6095.26484405, 0.00005, 26448.7\n-6095.26484599, 0.00003, 27258.1\n-6095.26484676, 0.00003, 28155.3\n-6095.26484693, 0.00002, 28981.7\n-6095.26484693, 0.00002, 28981.7\"\"\"",
"_____no_output_____"
]
],
[
[
"We can do much the same as before:",
"_____no_output_____"
]
],
[
[
"data = []\nfor line in csv.splitlines():\n words = line.split(',')\n data.append(map(float,words))\ndata = array(data)",
"_____no_output_____"
]
],
[
[
"There are two significant changes over what we did earlier. First, I'm passing the comma character ',' into the split function, so that it breaks to a new word every time it sees a comma. Next, to simplify things a big, I'm using the **map()** command to repeatedly apply a single function (**float()**) to a list, and to return the output as a list.",
"_____no_output_____"
]
],
[
[
"help(map)",
"Help on built-in function map in module __builtin__:\n\nmap(...)\n map(function, sequence[, sequence, ...]) -> list\n \n Return a list of the results of applying the function to the items of\n the argument sequence(s). If more than one sequence is given, the\n function is called with an argument list consisting of the corresponding\n item of each sequence, substituting None for missing values when not all\n sequences have the same length. If the function is None, return a list of\n the items of the sequence (or a list of tuples if more than one sequence).\n\n"
]
],
[
[
"Despite the differences, the resulting plot should be the same:",
"_____no_output_____"
]
],
[
[
"plot(data[:,0])\nxlabel('step')\nylabel('Energy (hartrees)')\ntitle('Convergence of NWChem geometry optimization for Si cluster')",
"_____no_output_____"
]
],
[
[
"Hartrees (what most quantum chemistry programs use by default) are really stupid units. We really want this in kcal/mol or eV or something we use. So let's quickly replot this in terms of eV above the minimum energy, which will give us a much more useful plot:",
"_____no_output_____"
]
],
[
[
"energies = data[:,0]\nminE = min(energies)\nenergies_eV = 27.211*(energies-minE)\nplot(energies_eV)\nxlabel('step')\nylabel('Energy (eV)')\ntitle('Convergence of NWChem geometry optimization for Si cluster')",
"_____no_output_____"
]
],
[
[
"This gives us the output in a form that we can think about: 4 eV is a fairly substantial energy change (chemical bonds are roughly this magnitude of energy), and most of the energy decrease was obtained in the first geometry iteration.",
"_____no_output_____"
],
[
"We mentioned earlier that we don't have to rely on **grep** to pull out the relevant lines for us. The **string** module has a lot of useful functions we can use for this. Among them is the **startswith** function. For example:",
"_____no_output_____"
]
],
[
[
"lines = \"\"\"\\\n ----------------------------------------\n | WALL | 0.45 | 443.61 |\n ----------------------------------------\n\n@ Step Energy Delta E Gmax Grms Xrms Xmax Walltime\n@ ---- ---------------- -------- -------- -------- -------- -------- --------\n@ 0 -6095.12544083 0.0D+00 0.03686 0.00936 0.00000 0.00000 1391.5\n ok ok\n\n\n\n Z-matrix (autoz)\n --------\n\"\"\".splitlines()\n\nfor line in lines:\n if line.startswith('@'):\n print line\n ",
"@ Step Energy Delta E Gmax Grms Xrms Xmax Walltime\n@ ---- ---------------- -------- -------- -------- -------- -------- --------\n@ 0 -6095.12544083 0.0D+00 0.03686 0.00936 0.00000 0.00000 1391.5\n"
]
],
[
[
"and we've successfully grabbed all of the lines that begin with the @ symbol.",
"_____no_output_____"
],
[
"The real value in a language like Python is that it makes it easy to take additional steps to analyze data in this fashion, which means you are thinking more about your data, and are more likely to see important patterns.",
"_____no_output_____"
],
[
"## More Sophisticated String Formatting and Processing\nStrings are a big deal in most modern languages, and hopefully the previous sections helped underscore how versatile Python's string processing techniques are. We will continue this topic in this chapter.\n\nWe can print out lines in Python using the print command. ",
"_____no_output_____"
]
],
[
[
"print \"I have 3 errands to run\"",
"I have 3 errands to run\n"
]
],
[
[
"In IPython we don't even need the print command, since it will display the last expression not assigned to a variable.",
"_____no_output_____"
]
],
[
[
"\"I have 3 errands to run\"",
"_____no_output_____"
]
],
[
[
"**print** even converts some arguments to strings for us:",
"_____no_output_____"
]
],
[
[
"a,b,c = 1,2,3\nprint \"The variables are \",1,2,3",
"The variables are 1 2 3\n"
]
],
[
[
"As versatile as this is, you typically need more freedom over the data you print out. For example, what if we want to print a bunch of data to exactly 4 decimal places? We can do this using formatted strings.\n\nFormatted strings share a syntax with the C **printf** statement. We make a string that has some funny *format characters* in it, and then pass a bunch of variables into the string that fill out those characters in different ways.\n\nFor example,",
"_____no_output_____"
]
],
[
[
"print \"Pi as a decimal = %d\" % pi\nprint \"Pi as a float = %f\" % pi\nprint \"Pi with 4 decimal places = %.4f\" % pi\nprint \"Pi with overall fixed length of 10 spaces, with 6 decimal places = %10.6f\" % pi\nprint \"Pi as in exponential format = %e\" % pi",
"Pi as a decimal = 3\nPi as a float = 3.141593\nPi with 4 decimal places = 3.1416\nPi with overall fixed length of 10 spaces, with 6 decimal places = 3.141593\nPi as in exponential format = 3.141593e+00\n"
]
],
[
[
"We use a percent sign in two different ways here. First, the format character itself starts with a percent sign. %d or %i are for integers, %f is for floats, %e is for numbers in exponential formats. All of the numbers can take number immediately after the percent that specifies the total spaces used to print the number. Formats with a decimal can take an additional number after a dot . to specify the number of decimal places to print.\n\nThe other use of the percent sign is after the string, to pipe a set of variables in. You can pass in multiple variables (if your formatting string supports it) by putting a tuple after the percent. Thus,",
"_____no_output_____"
]
],
[
[
"print \"The variables specified earlier are %d, %d, and %d\" % (a,b,c)",
"The variables specified earlier are 1, 2, and 3\n"
]
],
[
[
"This is a simple formatting structure that will satisfy most of your string formatting needs. More information on different format symbols is available in the [string formatting part of the standard docs](http://docs.python.org/release/2.5.2/lib/typesseq-strings.html).\n\nIt's worth noting that more complicated string formatting methods are in development, but I prefer this system due to its simplicity and its similarity to C formatting strings.\n\nRecall we discussed multiline strings. We can put format characters in these as well, and fill them with the percent sign as before.",
"_____no_output_____"
]
],
[
[
"form_letter = \"\"\"\\\n\n %s\n\nDear %s,\n\nWe regret to inform you that your product did not\nship today due to %s.\n\nWe hope to remedy this as soon as possible.\n\n From,\n Your Supplier\n\"\"\"\n\nprint form_letter % (\"July 1, 2013\",\"Valued Customer Bob\",\"alien attack\")",
"\n July 1, 2013\n\nDear Valued Customer Bob,\n\nWe regret to inform you that your product did not\nship today due to alien attack.\n\nWe hope to remedy this as soon as possible.\n\n From,\n Your Supplier\n\n"
]
],
[
[
"The problem with a long block of text like this is that it's often hard to keep track of what all of the variables are supposed to stand for. There's an alternate format where you can pass a dictionary into the formatted string, and give a little bit more information to the formatted string itself. This method looks like:",
"_____no_output_____"
]
],
[
[
"form_letter = \"\"\"\\\n\n %(date)s\n\nDear %(customer)s,\n\nWe regret to inform you that your product did not\nship today due to %(lame_excuse)s.\n\nWe hope to remedy this as soon as possible.\n\n From,\n Your Supplier\n\"\"\"\n\nprint form_letter % {\"date\" : \"July 1, 2013\",\"customer\":\"Valued Customer Bob\",\"lame_excuse\":\"alien attack\"}",
"\n July 1, 2013\n\nDear Valued Customer Bob,\n\nWe regret to inform you that your product did not\nship today due to alien attack.\n\nWe hope to remedy this as soon as possible.\n\n From,\n Your Supplier\n\n"
]
],
[
[
"By providing a little bit more information, you're less likely to make mistakes, like referring to your customer as \"alien attack\".\n\nAs a scientist, you're less likely to be sending bulk mailings to a bunch of customers. But these are great methods for generating and submitting lots of similar runs, say scanning a bunch of different structures to find the optimal configuration for something.\n\nFor example, you can use the following template for NWChem input files:",
"_____no_output_____"
]
],
[
[
"nwchem_format = \"\"\"\nstart %(jobname)s\n\ntitle \"%(thetitle)s\"\ncharge %(charge)d\n\ngeometry units angstroms print xyz autosym\n%(geometry)s\nend\n\nbasis\n * library 6-31G**\nend\n\ndft\n xc %(dft_functional)s\n mult %(multiplicity)d\nend\n\ntask dft %(jobtype)s\n\"\"\"",
"_____no_output_____"
]
],
[
[
"If you want to submit a sequence of runs to a computer somewhere, it's pretty easy to put together a little script, maybe even with some more string formatting in it:",
"_____no_output_____"
]
],
[
[
"oxygen_xy_coords = [(0,0),(0,0.1),(0.1,0),(0.1,0.1)]\ncharge = 0\nmultiplicity = 1\ndft_functional = \"b3lyp\"\njobtype = \"optimize\"\n\ngeometry_template = \"\"\"\\\n O %f %f 0.0\n H 0.0 1.0 0.0\n H 1.0 0.0 0.0\"\"\"\n\nfor i,xy in enumerate(oxygen_xy_coords):\n thetitle = \"Water run #%d\" % i\n jobname = \"h2o-%d\" % i\n geometry = geometry_template % xy\n print \"---------\"\n print nwchem_format % dict(thetitle=thetitle,charge=charge,jobname=jobname,jobtype=jobtype,\n geometry=geometry,dft_functional=dft_functional,multiplicity=multiplicity)",
"---------\n\nstart h2o-0\n\ntitle \"Water run #0\"\ncharge 0\n\ngeometry units angstroms print xyz autosym\n O 0.000000 0.000000 0.0\n H 0.0 1.0 0.0\n H 1.0 0.0 0.0\nend\n\nbasis\n * library 6-31G**\nend\n\ndft\n xc b3lyp\n mult 1\nend\n\ntask dft optimize\n\n---------\n\nstart h2o-1\n\ntitle \"Water run #1\"\ncharge 0\n\ngeometry units angstroms print xyz autosym\n O 0.000000 0.100000 0.0\n H 0.0 1.0 0.0\n H 1.0 0.0 0.0\nend\n\nbasis\n * library 6-31G**\nend\n\ndft\n xc b3lyp\n mult 1\nend\n\ntask dft optimize\n\n---------\n\nstart h2o-2\n\ntitle \"Water run #2\"\ncharge 0\n\ngeometry units angstroms print xyz autosym\n O 0.100000 0.000000 0.0\n H 0.0 1.0 0.0\n H 1.0 0.0 0.0\nend\n\nbasis\n * library 6-31G**\nend\n\ndft\n xc b3lyp\n mult 1\nend\n\ntask dft optimize\n\n---------\n\nstart h2o-3\n\ntitle \"Water run #3\"\ncharge 0\n\ngeometry units angstroms print xyz autosym\n O 0.100000 0.100000 0.0\n H 0.0 1.0 0.0\n H 1.0 0.0 0.0\nend\n\nbasis\n * library 6-31G**\nend\n\ndft\n xc b3lyp\n mult 1\nend\n\ntask dft optimize\n\n"
]
],
[
[
"This is a very bad geometry for a water molecule, and it would be silly to run so many geometry optimizations of structures that are guaranteed to converge to the same single geometry, but you get the idea of how you can run vast numbers of simulations with a technique like this.\n\nWe used the **enumerate** function to loop over both the indices and the items of a sequence, which is valuable when you want a clean way of getting both. **enumerate** is roughly equivalent to:",
"_____no_output_____"
]
],
[
[
"def my_enumerate(seq):\n l = []\n for i in range(len(seq)):\n l.append((i,seq[i]))\n return l\nmy_enumerate(oxygen_xy_coords)",
"_____no_output_____"
]
],
[
[
"Although enumerate uses **generators** (see below) so that it doesn't have to create a big list, which makes it faster for really long sequenes.",
"_____no_output_____"
],
[
"## Optional arguments\nYou will recall that the **linspace** function can take either two arguments (for the starting and ending points):",
"_____no_output_____"
]
],
[
[
"linspace(0,1)",
"_____no_output_____"
]
],
[
[
"or it can take three arguments, for the starting point, the ending point, and the number of points:",
"_____no_output_____"
]
],
[
[
"linspace(0,1,5)",
"_____no_output_____"
]
],
[
[
"You can also pass in keywords to exclude the endpoint:",
"_____no_output_____"
]
],
[
[
"linspace(0,1,5,endpoint=False)",
"_____no_output_____"
]
],
[
[
"Right now, we only know how to specify functions that have a fixed number of arguments. We'll learn how to do the more general cases here.\n\nIf we're defining a simple version of linspace, we would start with:",
"_____no_output_____"
]
],
[
[
"def my_linspace(start,end):\n npoints = 50\n v = []\n d = (end-start)/float(npoints-1)\n for i in range(npoints):\n v.append(start + i*d)\n return v\nmy_linspace(0,1)",
"_____no_output_____"
]
],
[
[
"We can add an optional argument by specifying a default value in the argument list:",
"_____no_output_____"
]
],
[
[
"def my_linspace(start,end,npoints = 50):\n v = []\n d = (end-start)/float(npoints-1)\n for i in range(npoints):\n v.append(start + i*d)\n return v",
"_____no_output_____"
]
],
[
[
"This gives exactly the same result if we don't specify anything:",
"_____no_output_____"
]
],
[
[
"my_linspace(0,1)",
"_____no_output_____"
]
],
[
[
"But also let's us override the default value with a third argument:",
"_____no_output_____"
]
],
[
[
"my_linspace(0,1,5)",
"_____no_output_____"
]
],
[
[
"We can add arbitrary keyword arguments to the function definition by putting a keyword argument \\*\\*kwargs handle in:",
"_____no_output_____"
]
],
[
[
"def my_linspace(start,end,npoints=50,**kwargs):\n endpoint = kwargs.get('endpoint',True)\n v = []\n if endpoint:\n d = (end-start)/float(npoints-1)\n else:\n d = (end-start)/float(npoints)\n for i in range(npoints):\n v.append(start + i*d)\n return v\nmy_linspace(0,1,5,endpoint=False)",
"_____no_output_____"
]
],
[
[
"What the keyword argument construction does is to take any additional keyword arguments (i.e. arguments specified by name, like \"endpoint=False\"), and stick them into a dictionary called \"kwargs\" (you can call it anything you like, but it has to be preceded by two stars). You can then grab items out of the dictionary using the **get** command, which also lets you specify a default value. I realize it takes a little getting used to, but it is a common construction in Python code, and you should be able to recognize it.\n\nThere's an analogous \\*args that dumps any additional arguments into a list called \"args\". Think about the **range** function: it can take one (the endpoint), two (starting and ending points), or three (starting, ending, and step) arguments. How would we define this?",
"_____no_output_____"
]
],
[
[
"def my_range(*args):\n start = 0\n step = 1\n if len(args) == 1:\n end = args[0]\n elif len(args) == 2:\n start,end = args\n elif len(args) == 3:\n start,end,step = args\n else:\n raise Exception(\"Unable to parse arguments\")\n v = []\n value = start\n while True:\n v.append(value)\n value += step\n if value > end: break\n return v",
"_____no_output_____"
]
],
[
[
"Note that we have defined a few new things you haven't seen before: a **break** statement, that allows us to exit a for loop if some conditions are met, and an exception statement, that causes the interpreter to exit with an error message. For example:",
"_____no_output_____"
]
],
[
[
"my_range()",
"_____no_output_____"
]
],
[
[
"## List Comprehensions and Generators\nList comprehensions are a streamlined way to make lists. They look something like a list definition, with some logic thrown in. For example:",
"_____no_output_____"
]
],
[
[
"evens1 = [2*i for i in range(10)]\nprint evens1",
"_____no_output_____"
]
],
[
[
"You can also put some boolean testing into the construct:",
"_____no_output_____"
]
],
[
[
"odds = [i for i in range(20) if i%2==1]\nodds",
"_____no_output_____"
]
],
[
[
"Here i%2 is the remainder when i is divided by 2, so that i%2==1 is true if the number is odd. Even though this is a relative new addition to the language, it is now fairly common since it's so convenient.",
"_____no_output_____"
],
[
"**iterators** are a way of making virtual sequence objects. Consider if we had the nested loop structure:\n\n for i in range(1000000):\n for j in range(1000000):\n\nInside the main loop, we make a list of 1,000,000 integers, just to loop over them one at a time. We don't need any of the additional things that a lists gives us, like slicing or random access, we just need to go through the numbers one at a time. And we're making 1,000,000 of them. \n\n**iterators** are a way around this. For example, the **xrange** function is the iterator version of range. This simply makes a counter that is looped through in sequence, so that the analogous loop structure would look like:\n\n for i in xrange(1000000):\n for j in xrange(1000000):\n\nEven though we've only added two characters, we've dramatically sped up the code, because we're not making 1,000,000 big lists.\n\nWe can define our own iterators using the **yield** statement:",
"_____no_output_____"
]
],
[
[
"def evens_below(n):\n for i in xrange(n):\n if i%2 == 0:\n yield i\n return\n\nfor i in evens_below(9):\n print i",
"0\n2\n4\n6\n8\n"
]
],
[
[
"We can always turn an iterator into a list using the **list** command:",
"_____no_output_____"
]
],
[
[
"list(evens_below(9))",
"_____no_output_____"
]
],
[
[
"There's a special syntax called a **generator expression** that looks a lot like a list comprehension:",
"_____no_output_____"
]
],
[
[
"evens_gen = (i for i in xrange(9) if i%2==0)\nfor i in evens_gen:\n print i",
"0\n2\n4\n6\n8\n"
]
],
[
[
"## Factory Functions\nA factory function is a function that returns a function. They have the fancy name *lexical closure*, which makes you sound really intelligent in front of your CS friends. But, despite the arcane names, factory functions can play a very practical role.\n\nSuppose you want the Gaussian function centered at 0.5, with height 99 and width 1.0. You could write a general function.",
"_____no_output_____"
]
],
[
[
"def gauss(x,A,a,x0):\n return A*exp(-a*(x-x0)**2)",
"_____no_output_____"
]
],
[
[
"But what if you need a function with only one argument, like f(x) rather than f(x,y,z,...)? You can do this with Factory Functions:",
"_____no_output_____"
]
],
[
[
"def gauss_maker(A,a,x0):\n def f(x):\n return A*exp(-a*(x-x0)**2)\n return f",
"_____no_output_____"
],
[
"x = linspace(0,1)\ng = gauss_maker(99.0,1.0,0.5)\nplot(x,g(x))",
"_____no_output_____"
]
],
[
[
"Everything in Python is an object, including functions. This means that functions can be returned by other functions. (They can also be passed into other functions, which is also useful, but a topic for another discussion.) In the **gauss_maker** example, the *g* function that is output \"remembers\" the A, a, x0 values it was constructed with, since they're all stored in the local memory space (this is what the *lexical closure* really refers to) of that function.\n\nFactories are one of the more important of the [Software Design Patterns](http://en.wikipedia.org/wiki/Software_design_pattern), which are a set of guidelines to follow to make high-quality, portable, readable, stable software. It's beyond the scope of the current work to go more into either factories or design patterns, but I thought I would mention them for people interested in software design.",
"_____no_output_____"
],
[
"## Serialization: Save it for later\n*Serialization* refers to the process of outputting data (and occasionally functions) to a database or a regular file, for the purpose of using it later on. In the very early days of programming languages, this was normally done in regular text files. Python is excellent at text processing, and you probably already know enough to get started with this.\n\nWhen accessing large amounts of data became important, people developed database software based around the Structured Query Language (SQL) standard. I'm not going to cover SQL here, but, if you're interested, I recommend using the [sqlite3](http://docs.python.org/2/library/sqlite3.html) module in the Python standard library.\n\nAs data interchange became important, the eXtensible Markup Language (XML) has emerged. XML makes data formats that are easy to write parsers for, greatly simplifying the ambiguity that sometimes arises in the process. Again, I'm not going to cover XML here, but if you're interested in learning more, look into [Element Trees](http://docs.python.org/2/library/xml.etree.elementtree.html), now part of the Python standard library.\n\nPython has a very general serialization format called **pickle** that can turn any Python object, even a function or a class, into a representation that can be written to a file and read in later. But, again, I'm not going to talk about this, since I rarely use it myself. Again, [the standard library documentation for pickle](http://docs.python.org/2/library/pickle.html#module-cPickle) is the place to go.\n\nWhat I am going to talk about is a relatively recent format call [JavaScript Object Notation](http://json.org/) (JSON) that has become very popular over the past few years. [There's a module in the standard library](http://docs.python.org/2/library/json.html) for encoding and decoding JSON formats. The reason I like JSON so much is that it looks almost like Python, so that, unlike the other options, you can look at your data and edit it, use it in another program, etc.\n\nHere's a little example:",
"_____no_output_____"
]
],
[
[
"# Data in a json format:\njson_data = \"\"\"\\\n{\n \"a\": [1,2,3],\n \"b\": [4,5,6],\n \"greeting\" : \"Hello\"\n}\"\"\"\nimport json\njson.loads(json_data)",
"_____no_output_____"
]
],
[
[
"Ignore the little u's before the strings, these just mean the strings are in UNICODE. Your data sits in something that looks like a Python dictionary, and in a single line of code, you can load it into a Python dictionary for use later.\n\nIn the same way, you can, with a single line of code, put a bunch of variables into a dictionary, and then output to a file using json:",
"_____no_output_____"
]
],
[
[
"json.dumps({\"a\":[1,2,3],\"b\":[9,10,11],\"greeting\":\"Hola\"})",
"_____no_output_____"
]
],
[
[
"## Functional programming\nFunctional programming is a very broad subject. The idea is to have a series of functions, each of which generates a new data structure from an input, without changing the input structure at all. By not modifying the input structure (something that is called not having *side effects*), many guarantees can be made about how independent the processes are, which can help parallelization and guarantees of program accuracy. There is a [Python Functional Programming HOWTO](http://docs.python.org/2/howto/functional.html) in the standard docs that goes into more details on functional programming. I just wanted to touch on a few of the most important ideas here.\n\nThere is an **operator** module that has function versions of most of the Python operators. For example:",
"_____no_output_____"
]
],
[
[
"from operator import add, mul\nadd(1,2)",
"_____no_output_____"
],
[
"mul(3,4)",
"_____no_output_____"
]
],
[
[
"These are useful building blocks for functional programming.",
"_____no_output_____"
],
[
"The **lambda** operator allows us to build *anonymous functions*, which are simply functions that aren't defined by a normal **def** statement with a name. For example, a function that doubles the input is:",
"_____no_output_____"
]
],
[
[
"def doubler(x): return 2*x\ndoubler(17)",
"_____no_output_____"
]
],
[
[
"We could also write this as:",
"_____no_output_____"
]
],
[
[
"lambda x: 2*x",
"_____no_output_____"
]
],
[
[
"And assign it to a function separately:",
"_____no_output_____"
]
],
[
[
"another_doubler = lambda x: 2*x\nanother_doubler(19)",
"_____no_output_____"
]
],
[
[
"**lambda** is particularly convenient (as we'll see below) in passing simple functions as arguments to other functions.",
"_____no_output_____"
],
[
"**map** is a way to repeatedly apply a function to a list:",
"_____no_output_____"
]
],
[
[
"map(float,'1 2 3 4 5'.split())",
"_____no_output_____"
]
],
[
[
"**reduce** is a way to repeatedly apply a function to the first two items of the list. There already is a **sum** function in Python that is a reduction:",
"_____no_output_____"
]
],
[
[
"sum([1,2,3,4,5])",
"_____no_output_____"
]
],
[
[
"We can use **reduce** to define an analogous **prod** function:",
"_____no_output_____"
]
],
[
[
"def prod(l): return reduce(mul,l)\nprod([1,2,3,4,5])",
"_____no_output_____"
]
],
[
[
"## Object Oriented Programming",
"_____no_output_____"
],
[
"We've seen a lot of examples of **objects** in Python. We create a string object with quote marks:",
"_____no_output_____"
]
],
[
[
"mystring = \"Hi there\"",
"_____no_output_____"
]
],
[
[
"and we have a bunch of methods we can use on the object:",
"_____no_output_____"
]
],
[
[
"mystring.split()",
"_____no_output_____"
],
[
"mystring.startswith('Hi')",
"_____no_output_____"
],
[
"len(mystring)",
"_____no_output_____"
]
],
[
[
"Object oriented programming simply gives you the tools to define objects and methods for yourself. It's useful anytime you want to keep some data (like the characters in the string) tightly coupled to the functions that act on the data (length, split, startswith, etc.).\n\nAs an example, we're going to bundle the functions we did to make the 1d harmonic oscillator eigenfunctions with arbitrary potentials, so we can pass in a function defining that potential, some additional specifications, and get out something that can plot the orbitals, as well as do other things with them, if desired.",
"_____no_output_____"
]
],
[
[
"class Schrod1d:\n \"\"\"\\\n Schrod1d: Solver for the one-dimensional Schrodinger equation.\n \"\"\"\n def __init__(self,V,start=0,end=1,npts=50,**kwargs):\n m = kwargs.get('m',1.0)\n self.x = linspace(start,end,npts)\n self.Vx = V(self.x)\n self.H = (-0.5/m)*self.laplacian() + diag(self.Vx)\n return\n \n def plot(self,*args,**kwargs):\n titlestring = kwargs.get('titlestring',\"Eigenfunctions of the 1d Potential\")\n xstring = kwargs.get('xstring',\"Displacement (bohr)\")\n ystring = kwargs.get('ystring',\"Energy (hartree)\")\n if not args:\n args = [3]\n x = self.x\n E,U = eigh(self.H)\n h = x[1]-x[0]\n\n # Plot the Potential\n plot(x,self.Vx,color='k')\n\n for i in range(*args):\n # For each of the first few solutions, plot the energy level:\n axhline(y=E[i],color='k',ls=\":\")\n # as well as the eigenfunction, displaced by the energy level so they don't\n # all pile up on each other:\n plot(x,U[:,i]/sqrt(h)+E[i])\n title(titlestring)\n xlabel(xstring)\n ylabel(ystring) \n return\n \n def laplacian(self):\n x = self.x\n h = x[1]-x[0] # assume uniformly spaced points\n n = len(x)\n M = -2*identity(n,'d')\n for i in range(1,n):\n M[i,i-1] = M[i-1,i] = 1\n return M/h**2",
"_____no_output_____"
]
],
[
[
"The **__init__()** function specifies what operations go on when the object is created. The **self** argument is the object itself, and we don't pass it in. The only required argument is the function that defines the QM potential. We can also specify additional arguments that define the numerical grid that we're going to use for the calculation.\n\nFor example, to do an infinite square well potential, we have a function that is 0 everywhere. We don't have to specify the barriers, since we'll only define the potential in the well, which means that it can't be defined anywhere else.",
"_____no_output_____"
]
],
[
[
"square_well = Schrod1d(lambda x: 0*x,m=10)\nsquare_well.plot(4,titlestring=\"Square Well Potential\")",
"_____no_output_____"
]
],
[
[
"We can similarly redefine the Harmonic Oscillator potential.",
"_____no_output_____"
]
],
[
[
"ho = Schrod1d(lambda x: x**2,start=-3,end=3)\nho.plot(6,titlestring=\"Harmonic Oscillator\")",
"_____no_output_____"
]
],
[
[
"Let's define a finite well potential:",
"_____no_output_____"
]
],
[
[
"def finite_well(x,V_left=1,V_well=0,V_right=1,d_left=10,d_well=10,d_right=10):\n V = zeros(x.size,'d')\n for i in range(x.size):\n if x[i] < d_left: \n V[i] = V_left\n elif x[i] > (d_left+d_well):\n V[i] = V_right\n else:\n V[i] = V_well\n return V\n \nfw = Schrod1d(finite_well,start=0,end=30,npts=100)\nfw.plot()",
"_____no_output_____"
]
],
[
[
"A triangular well:",
"_____no_output_____"
]
],
[
[
"def triangular(x,F=30): return F*x\n\ntw = Schrod1d(triangular,m=10)\ntw.plot()",
"_____no_output_____"
]
],
[
[
"Or we can combine the two, making something like a semiconductor quantum well with a top gate:",
"_____no_output_____"
]
],
[
[
"def tri_finite(x): return finite_well(x)+triangular(x,F=0.025)\n\ntfw = Schrod1d(tri_finite,start=0,end=30,npts=100)\ntfw.plot()",
"_____no_output_____"
]
],
[
[
"There's a lot of philosophy behind object oriented programming. Since I'm trying to focus on just the basics here, I won't go into them, but the internet is full of lots of resources on OO programming and theory. The best of this is contained in the [Design Patterns](http://en.wikipedia.org/wiki/Design_Patterns_(book)) book, which I highly recommend.",
"_____no_output_____"
],
[
"# IV. Speeding Python: Timeit, Profiling, Cython, SWIG, and PyPy\n\nThe first rule of speeding up your code is not to do it at all. As Donald Knuth said:\n\n> \"We should forget about small efficiencies, say about 97% of the time: premature optimization is the root of all evil.\"\n\nThe second rule of speeding up your code is to only do it if you really think you need to do it. Python has two tools to help with this process: a timing program called **timeit**, and a very good code profiler. We will discuss both of these tools in this section, as well as techniques to use to speed up your code once you know it's too slow.\n\n## Timeit\n**timeit** helps determine which of two similar routines is faster. Recall that some time ago we wrote a factorial routine, but also pointed out that Python had its own routine built into the math module. Is there any difference in the speed of the two? **timeit** helps us determine this. For example, **timeit** tells how long each method takes:",
"_____no_output_____"
]
],
[
[
"%timeit factorial(20)",
"1000000 loops, best of 3: 714 ns per loop\n"
]
],
[
[
"The little % sign that we have in front of the timeit call is an example of an IPython magic function, which we don't have time to go into here, but it's just some little extra mojo that IPython adds to the functions to make it run better in the IPython environment. You can read more about it in the [IPython tutorial](http://ipython.org/ipython-doc/dev/interactive/tutorial.html).\n\nIn any case, the timeit function runs 3 loops, and tells us that it took on the average of 583 ns to compute 20!. In contrast:",
"_____no_output_____"
]
],
[
[
"%timeit fact(20)",
"100000 loops, best of 3: 6 µs per loop\n"
]
],
[
[
"the factorial function we wrote is about a factor of 10 slower. This is because the built-in factorial function is written in C code and called from Python, and the version we wrote is written in plain old Python. A Python program has a lot of stuff in it that make it nice to interact with, but all that friendliness slows down the code. In contrast, the C code is less friendly but more efficient. If you want speed with as little effort as possible, write your code in an easy to program language like Python, but dump the slow parts into a faster language like C, and call it from Python. We'll go through some tricks to do this in this section.",
"_____no_output_____"
],
[
"## Profiling",
"_____no_output_____"
],
[
"Profiling complements what **timeit** does by splitting the overall timing into the time spent in each function. It can give us a better understanding of what our program is really spending its time on.\n\nSuppose we want to create a list of even numbers. Our first effort yields this:",
"_____no_output_____"
]
],
[
[
"def evens(n):\n \"Return a list of even numbers below n\"\n l = []\n for x in range(n):\n if x % 2 == 0:\n l.append(x)\n return l",
"_____no_output_____"
]
],
[
[
"Is this code fast enough? We find out by running the Python profiler on a longer run:",
"_____no_output_____"
]
],
[
[
"import cProfile\ncProfile.run('evens(100000)')",
" 50004 function calls in 0.048 seconds\n\n Ordered by: standard name\n\n ncalls tottime percall cumtime percall filename:lineno(function)\n 1 0.038 0.038 0.048 0.048 <ipython-input-199-9d23d9d62f6b>:1(evens)\n 1 0.001 0.001 0.048 0.048 <string>:1(<module>)\n 50000 0.007 0.000 0.007 0.000 {method 'append' of 'list' objects}\n 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}\n 1 0.003 0.003 0.003 0.003 {range}\n\n\n"
]
],
[
[
"This looks okay, 0.05 seconds isn't a *huge* amount of time, but looking at the profiling shows that the **append** function is taking almost 20% of the time. Can we do better? Let's try a list comprehension.",
"_____no_output_____"
]
],
[
[
"def evens2(n):\n \"Return a list of even numbers below n\"\n return [x for x in range(n) if x % 2 == 0]",
"_____no_output_____"
],
[
"import cProfile\ncProfile.run('evens2(100000)')",
" 4 function calls in 0.022 seconds\n\n Ordered by: standard name\n\n ncalls tottime percall cumtime percall filename:lineno(function)\n 1 0.020 0.020 0.022 0.022 <ipython-input-201-cbb0d0b3fc58>:1(evens2)\n 1 0.001 0.001 0.022 0.022 <string>:1(<module>)\n 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}\n 1 0.001 0.001 0.001 0.001 {range}\n\n\n"
]
],
[
[
"By removing a small part of the code using a list comprehension, we've doubled the overall speed of the code! \n\nIt seems like **range** is taking a long time, still. Can we get rid of it? We can, using the **xrange** generator:",
"_____no_output_____"
]
],
[
[
"def evens3(n):\n \"Return a list of even numbers below n\"\n return [x for x in xrange(n) if x % 2 == 0]",
"_____no_output_____"
],
[
"import cProfile\ncProfile.run('evens3(100000)')",
" 3 function calls in 0.021 seconds\n\n Ordered by: standard name\n\n ncalls tottime percall cumtime percall filename:lineno(function)\n 1 0.021 0.021 0.021 0.021 <ipython-input-203-3ee1b2b2b034>:1(evens3)\n 1 0.001 0.001 0.021 0.021 <string>:1(<module>)\n 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}\n\n\n"
]
],
[
[
"This is where profiling can be useful. Our code now runs 3x faster by making trivial changes. We wouldn't have thought to look in these places had we not had access to easy profiling. Imagine what you would find in more complicated programs.",
"_____no_output_____"
],
[
"## Other Ways to Speed Python\nWhen we compared the fact and factorial functions, above, we noted that C routines are often faster because they're more streamlined. Once we've determined that one routine is a bottleneck for the performance of a program, we can replace it with a faster version by writing it in C. This is called *extending* Python, and there's a [good section in the standard documents](http://docs.python.org/2/extending/extending.html). This can be a tedious process if you have many different routines to convert. Fortunately, there are several other options.\n\n[Swig](http://swig.org/) (the simplified wrapper and interface generator) is a method to generate binding not only for Python but also for Matlab, Perl, Ruby, and other scripting languages. Swig can scan the header files of a C project and generate Python binding for it. Using Swig is substantially easier than writing the routines in C.\n\n[Cython](http://www.cython.org/) is a C-extension language. You can start by compiling a Python routine into a shared object libraries that can be imported into faster versions of the routines. You can then add additional static typing and make other restrictions to further speed the code. Cython is generally easier than using Swig.\n\n[PyPy](http://pypy.org/) is the easiest way of obtaining fast code. PyPy compiles Python to a subset of the Python language called RPython that can be efficiently compiled and optimized. Over a wide range of tests, PyPy is [roughly 6 times faster than the standard Python Distribution](http://speed.pypy.org/).",
"_____no_output_____"
],
[
"## Fun: Finding Primes\n\n[Project Euler](http://projecteuler.net) is a site where programming puzzles are posed that might have interested Euler. [Problem 7](http://projecteuler.net/problem=7) asks the question:\n\n> By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.\n> \n> What is the 10,001st prime number?\n\nTo solve this we need a very long list of prime numbers. First we'll make a function that uses the Sieve of Erastothenes to generate all the primes less than n.",
"_____no_output_____"
]
],
[
[
"def primes(n):\n \"\"\"\\\n From python cookbook, returns a list of prime numbers from 2 to < n\n\n >>> primes(2)\n [2]\n >>> primes(10)\n [2, 3, 5, 7]\n \"\"\"\n if n==2: return [2]\n elif n<2: return []\n s=range(3,n+1,2)\n mroot = n ** 0.5\n half=(n+1)/2-1\n i=0\n m=3\n while m <= mroot:\n if s[i]:\n j=(m*m-3)/2\n s[j]=0\n while j<half:\n s[j]=0\n j+=m\n i=i+1\n m=2*i+3\n return [2]+[x for x in s if x]",
"_____no_output_____"
],
[
"number_to_try = 1000000\nlist_of_primes = primes(number_to_try)\nprint list_of_primes[10001]",
"104759\n"
]
],
[
[
"You might think that Python is a bad choice for something like this, but, in terms of time, it really doesn't take long:",
"_____no_output_____"
]
],
[
[
"cProfile.run('primes(1000000)')",
" 4 function calls in 0.372 seconds\n\n Ordered by: standard name\n\n ncalls tottime percall cumtime percall filename:lineno(function)\n 1 0.335 0.335 0.363 0.363 <ipython-input-205-57e280631d57>:1(primes)\n 1 0.009 0.009 0.372 0.372 <string>:1(<module>)\n 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}\n 1 0.028 0.028 0.028 0.028 {range}\n\n\n"
]
],
[
[
"Only takes 1/4 of a second to generate a list of all the primes below 1,000,000. It would be nice if we could use the same trick to get rid of the **range** function, but we actually need it, since we're using the object like a list, rather than like a counter as before.",
"_____no_output_____"
],
[
"# VII. References\n\n## Learning Resources\n* [Official Python Documentation](http://docs.python.org/2.7), including\n - [Python Tutorial](http://docs.python.org/2.7/tutorial)\n - [Python Language Reference](http://docs.python.org/2.7/reference)\n* If you're interested in Python 3, the [Official Python 3 Docs are here](http://docs.python.org/3/).\n* [IPython tutorial](http://ipython.org/ipython-doc/dev/interactive/tutorial.html).\n* [Learn Python The Hard Way](http://learnpythonthehardway.org/book/)\n* [Dive Into Python](http://www.diveintopython.net/), in particular if you're interested in Python 3.\n* [Invent With Python](http://inventwithpython.com/), probably best for kids.\n* [Python Functional Programming HOWTO](http://docs.python.org/2/howto/functional.html)\n* [The Structure and Interpretation of Computer Programs](http://mitpress.mit.edu/sicp/full-text/book/book.html), written in Scheme, a Lisp dialect, but one of the best books on computer programming ever written.\n* [Generator Tricks for Systems Programmers](http://www.dabeaz.com/generators/) Beazley's slides on just what generators can do for you.\n* [Python Module of the Week](http://pymotw.com/2/contents.html) is a series going through in-depth analysis of the Python standard library in a very easy to understand way.\n\n## Badass IPython Notebooks\n* Rob Johansson's [excellent notebooks](http://jrjohansson.github.io/), including [Scientific Computing with Python](https://github.com/jrjohansson/scientific-python-lectures) and [Computational Quantum Physics with QuTiP](https://github.com/jrjohansson/qutip-lectures) lectures;\n* [XKCD style graphs in matplotlib](http://nbviewer.ipython.org/url/jakevdp.github.com/downloads/notebooks/XKCD_plots.ipynb);\n* [A collection of Notebooks for using IPython effectively](https://github.com/ipython/ipython/tree/master/examples/notebooks#a-collection-of-notebooks-for-using-ipython-effectively)\n* [A gallery of interesting IPython Notebooks](https://github.com/ipython/ipython/wiki/A-gallery-of-interesting-IPython-Notebooks)\n* [Cross-disciplinary computational analysis IPython Notebooks From Hadoop World 2012](https://github.com/invisibleroads/crosscompute-tutorials)\n* [Quantites](http://nbviewer.ipython.org/urls/raw.github.com/tbekolay/pyconca2012/master/QuantitiesTutorial.ipynb) Units in Python.\n - [Another units module is here](http://www.southampton.ac.uk/~fangohr/blog/)\n\n## Packages for Scientists\nImportant libraries\n\n* [Python](http://www.python.org) version 2.7;\n* [Numpy](http://www.numpy.org), the core numerical extensions for linear algebra and multidimensional arrays;\n* [Scipy](http://www.scipy.org), additional libraries for scientific programming;\n* [Matplotlib](http://matplotlib.sf.net), excellent plotting and graphing libraries;\n* [IPython](http://ipython.org), with the additional libraries required for the notebook interface.\n* [Sympy](http://sympy.org), symbolic math in Python\n* [Pandas](http://pandas.pydata.org/) library for big data in Python\n\nOther packages of interest\n\n* [PyQuante](http://pyquante.sf.net) Python Quantum Chemistry\n* [QuTiP](https://code.google.com/p/qutip/) Quantum Toolbox in Python\n* Konrad Hinsen's [Scientific Python](http://dirac.cnrs-orleans.fr/plone/software/scientificpython/) and [MMTK](http://dirac.cnrs-orleans.fr/MMTK/)\n* [Atomic Simulation Environment](https://wiki.fysik.dtu.dk/ase/)\n\n\n## Cool Stuff\n* [Moin Moin](http://moinmo.in/), a wiki written in Python\n* [Project Euler](http://projecteuler.net/), programming problems that would (?) have interested Euler. Python is one of the most commonly used languages there.",
"_____no_output_____"
],
[
"# VI. Acknowledgements\nThanks to Alex and Tess for everything!\n\nThanks to Barbara Muller and Tom Tarman for helpful suggestions.",
"_____no_output_____"
],
[
"This work is licensed under a [Creative Commons Attribution-ShareAlike 3.0 Unported License](http://creativecommons.org/licenses/by-sa/3.0/deed.en_US). The work is offered for free, with the hope that it will be useful. Please consider making a donation to the [John Hunter Memorial Fund](http://numfocus.org/johnhunter/).\n\n\n\n\n\nSandia is a multiprogram laboratory operated by Sandia Corporation, a Lockheed Martin Company, for the United States Department of Energy's National Nuclear Security Administration under Contract DE-AC04-94AL85000.\n\n",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
]
|
ec6560528e3c6a24aeeedce3249063f9d6503dca | 350,795 | ipynb | Jupyter Notebook | beginners_espanol/01_1.Weather_Data_Collection.ipynb | xanderg666/redbull_es | de31ae367a3404f8dbf974820d8cf9f56045ac32 | [
"UPL-1.0"
]
| null | null | null | beginners_espanol/01_1.Weather_Data_Collection.ipynb | xanderg666/redbull_es | de31ae367a3404f8dbf974820d8cf9f56045ac32 | [
"UPL-1.0"
]
| null | null | null | beginners_espanol/01_1.Weather_Data_Collection.ipynb | xanderg666/redbull_es | de31ae367a3404f8dbf974820d8cf9f56045ac32 | [
"UPL-1.0"
]
| null | null | null | 465.245358 | 267,932 | 0.932827 | [
[
[
"# Recopilación de datos meteorológicos para carreras de Fórmula 1",
"_____no_output_____"
]
],
[
[
"import warnings\nwarnings.filterwarnings(\"ignore\")",
"_____no_output_____"
],
[
"import time\nstart = time.time()",
"_____no_output_____"
],
[
"import pandas as pd\nimport numpy as np\nfrom selenium import webdriver\nimport time",
"_____no_output_____"
],
[
"races = pd.read_csv('./data/races.csv')",
"_____no_output_____"
],
[
"races.head()",
"_____no_output_____"
],
[
"races.shape",
"_____no_output_____"
]
],
[
[
"## Weather Dataset Analysis",
"_____no_output_____"
]
],
[
[
"weather_info = pd.read_csv('./data/weather.csv')",
"_____no_output_____"
],
[
"weather_info.shape",
"_____no_output_____"
],
[
"weather_info.head()",
"_____no_output_____"
],
[
"weather_info.tail()",
"_____no_output_____"
]
],
[
[
"## Feature Distributions\n\nWe can also look at the distribution of each attribute by discretization the values into buckets and review the frequency in each bucket as histograms.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nfig = plt.figure(figsize=(30,30))\nax = fig.gca()\nweather_info.hist(ax = ax)",
"_____no_output_____"
]
],
[
[
"## Feature-Feature Relationships",
"_____no_output_____"
]
],
[
[
"from pandas.plotting import scatter_matrix\nscatter_matrix(weather_info, alpha=0.2, figsize=(30, 30), diagonal='kde')",
"_____no_output_____"
]
],
[
[
"From this chart, we can see that we have numeric variables x7 as columns. The diagonal shows the distribution of the seven numeric variables of our dataframe. We are using the density plot on the diagonal.",
"_____no_output_____"
]
],
[
[
"end = time.time()",
"_____no_output_____"
],
[
"import datetime\nstr(datetime.timedelta(seconds=(end - start)))",
"_____no_output_____"
],
[
"print(str(end - start)+\" seconds\")",
"17.624802827835083 seconds\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
]
|
ec658c39f10f2396f213faebec0c3bc907dab235 | 10,046 | ipynb | Jupyter Notebook | homework02.ipynb | Selen34/tvms | db6ffad71883a3ae360d427331e764d558ba8045 | [
"Unlicense"
]
| null | null | null | homework02.ipynb | Selen34/tvms | db6ffad71883a3ae360d427331e764d558ba8045 | [
"Unlicense"
]
| null | null | null | homework02.ipynb | Selen34/tvms | db6ffad71883a3ae360d427331e764d558ba8045 | [
"Unlicense"
]
| null | null | null | 19.171756 | 282 | 0.483775 | [
[
[
"$$P_n(X=k) = C_n^k p^k q^{n-k}$$",
"_____no_output_____"
],
[
"$$M(X) = np$$",
"_____no_output_____"
],
[
"$$D(X) = npq$$",
"_____no_output_____"
],
[
"$$P_m \\approx \\frac{\\lambda^m}{m!}e^{-\\lambda}$$",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom math import factorial",
"_____no_output_____"
],
[
"def C(n, k):\n return factorial(n)/(factorial(k)*(factorial(n-k)))",
"_____no_output_____"
]
],
[
[
"### Задача 1",
"_____no_output_____"
],
[
"Вероятность того, что стрелок попадет в мишень, выстрелив один раз, равна 0.8. Стрелок выстрелил 100 раз. Найдите вероятность того, что стрелок попадет в цель ровно 85 раз.",
"_____no_output_____"
],
[
"Решаем по формуле Бернулли",
"_____no_output_____"
]
],
[
[
"p = 0.8\nq = 1-p\nn = 100\nk = 85",
"_____no_output_____"
],
[
"prob = C(n, k)*(p**k)*(q**(n-k))\nprob",
"_____no_output_____"
]
],
[
[
"### Задача 2",
"_____no_output_____"
],
[
"Вероятность того, что лампочка перегорит в течение первого дня эксплуатации, равна 0.0004. В жилом комплексе после ремонта в один день включили 5000 новых лампочек. Какова вероятность, что ни одна из них не перегорит в первый день? Какова вероятность, что перегорят ровно две?",
"_____no_output_____"
],
[
"Решаем по формуле Пуасона",
"_____no_output_____"
]
],
[
[
"p = 0.0004\nn = 5000\ny = n*p",
"_____no_output_____"
],
[
"# вероятность что ни одна не перегорит\nm = 0\nprob = y**m/factorial(m)*np.exp(-y)\nprob",
"_____no_output_____"
],
[
"# вероятность что перегорит ровно две\nm = 2\nprob = y**m/factorial(m)*np.exp(-y)\nprob",
"_____no_output_____"
]
],
[
[
"### Задача 3",
"_____no_output_____"
],
[
"Монету подбросили 144 раза. Какова вероятность, что орел выпадет ровно 70 раз?",
"_____no_output_____"
],
[
"Аналогична первыой - решаем по формуле Бернулли",
"_____no_output_____"
]
],
[
[
"p = 0.5\nq = 1 - p\nn = 144\nk = 70",
"_____no_output_____"
],
[
"prob = C(n, k)*(p**k)*(q**(n-k))\nprob",
"_____no_output_____"
]
],
[
[
"### Задача 4",
"_____no_output_____"
],
[
"В первом ящике находится 10 мячей, из которых 7 - белые. Во втором ящике - 11 мячей, из которых 9 белых. Из каждого ящика вытаскивают случайным образом по два мяча.",
"_____no_output_____"
],
[
"### Какова вероятность того, что все мячи белые?",
"_____no_output_____"
],
[
"способ 1",
"_____no_output_____"
]
],
[
[
"P = C(7,2)/C(10,2)*C(9,2)/C(11,2)\nP",
"_____no_output_____"
]
],
[
[
"способ 2",
"_____no_output_____"
]
],
[
[
"P = (7/10)*(6/9)*(9/11)*(8/10)\nP",
"_____no_output_____"
]
],
[
[
"### Какова вероятность того, что ровно два мяча белые?",
"_____no_output_____"
],
[
"найдем 3 варианта:\n- 2 белых их первого и 2 черных из второго\n- 2 черных из первого и 2 белых из второго\n- 1 белый и 1 черный из первого и 1 белый и 1 черный из второго\n\nи сложим их",
"_____no_output_____"
]
],
[
[
"P1 = C(7,2)/C(10,2)*C(2,2)/C(11,2)\nprint('P1', P1)\nP2 = C(3,2)/C(10,2)*C(9,2)/C(11,2)\nprint('P2', P2)\nP3 = C(7,1)*C(3,1)/C(10,2)*C(9,1)*C(2,1)/C(11,2)\nprint('P3', P3)\nP = P1 + P2 + P3\nprint('P:', P)",
"P1 0.008484848484848486\nP2 0.04363636363636363\nP3 0.15272727272727274\nP: 0.20484848484848486\n"
]
],
[
[
"Найдем то же самое другим способом",
"_____no_output_____"
]
],
[
[
"P1 = (7/10)*(6/9)*(2/11)*(1/10)\nprint('P1', P1)\nP2 = (3/10)*(2/9)*(9/11)*(8/10)\nprint('P2', P2)\nP3 = (7/10)*(3/9)*(9/11)*(2/10)*4\nprint('P3', P3)\nP = P1 + P2 + P3\nprint('P', P)",
"P1 0.008484848484848484\nP2 0.04363636363636364\nP3 0.1527272727272727\nP 0.20484848484848484\n"
]
],
[
[
"Ответ совпал, но чтобы найти вероятность P3 этим способом пришлось проводить эксперименты, т.к. решение оказалось не совсем очевидным (если считать без множителя 4):",
"_____no_output_____"
]
],
[
[
"# вероятность вытащить из первого ящика сначала белый а потом черный почти такая же как если сначала черный а потом белый\nprint((7/10)*(3/9))\nprint((3/10)*(7/9))",
"0.2333333333333333\n0.23333333333333334\n"
]
],
[
[
"всего получается 4 варианта - \n- БЧБЧ\n- БЧЧБ\n- ЧББЧ\n- ЧБЧБ\n\nт.е. исходную вероятность `(7/10)*(3/9)*(9/11)*(2/10)` пришлось умножить на произведение размещений $$A_2^2 \\cdot A_2^2$$",
"_____no_output_____"
],
[
"Но возможно множитель 4 получился не отсюда...",
"_____no_output_____"
],
[
"### Какова вероятность того, что хотя бы один мяч белый?",
"_____no_output_____"
],
[
"Найдем вероятность что все мячи черные и вычтем ее из 1",
"_____no_output_____"
]
],
[
[
"# способ 1\nP = 1 - C(3,2)/C(10,2)*C(2,2)/C(11,2)\nP",
"_____no_output_____"
],
[
"# способ 2\nP = 1 - (3/10)*(2/9)*(2/11)*(1/10)\nP",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
]
]
|
ec6591f453b423bb633c41355c5e526d7023d1fd | 160,632 | ipynb | Jupyter Notebook | ch03/3_3_Training_for_Regression_Models.ipynb | arifmudi/Deep-Learning-with-MXNet-Cookbook | ce9cb41e87fdcb6bd0758c5924368ce5756d097c | [
"MIT"
]
| 4 | 2021-04-16T20:32:52.000Z | 2021-10-29T19:24:23.000Z | ch03/3_3_Training_for_Regression_Models.ipynb | arifmudi/Deep-Learning-with-MXNet-Cookbook | ce9cb41e87fdcb6bd0758c5924368ce5756d097c | [
"MIT"
]
| null | null | null | ch03/3_3_Training_for_Regression_Models.ipynb | arifmudi/Deep-Learning-with-MXNet-Cookbook | ce9cb41e87fdcb6bd0758c5924368ce5756d097c | [
"MIT"
]
| 1 | 2021-08-09T07:58:54.000Z | 2021-08-09T07:58:54.000Z | 119.964152 | 35,564 | 0.817907 | [
[
[
"# Run the command below if necessary, for example with Google Colab\n#!python3 -m pip install mxnet-cu110",
"_____no_output_____"
],
[
"# Global Libs\nimport matplotlib.pyplot as plt\nimport mxnet as mx\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport random\n\n# Seeds for reproducibility\nmx.random.seed(1)\nnp.random.seed(1)\nrandom.seed(1)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\n\n# Local libs\nimport model",
"_____no_output_____"
],
[
"# Loading data\nfull_house_df = pd.read_csv(\"kc_house_data.csv\")",
"_____no_output_____"
],
[
"full_house_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 21613 entries, 0 to 21612\nData columns (total 21 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 21613 non-null int64 \n 1 date 21613 non-null object \n 2 price 21613 non-null float64\n 3 bedrooms 21613 non-null int64 \n 4 bathrooms 21613 non-null float64\n 5 sqft_living 21613 non-null int64 \n 6 sqft_lot 21613 non-null int64 \n 7 floors 21613 non-null float64\n 8 waterfront 21613 non-null int64 \n 9 view 21613 non-null int64 \n 10 condition 21613 non-null int64 \n 11 grade 21613 non-null int64 \n 12 sqft_above 21613 non-null int64 \n 13 sqft_basement 21613 non-null int64 \n 14 yr_built 21613 non-null int64 \n 15 yr_renovated 21613 non-null int64 \n 16 zipcode 21613 non-null int64 \n 17 lat 21613 non-null float64\n 18 long 21613 non-null float64\n 19 sqft_living15 21613 non-null int64 \n 20 sqft_lot15 21613 non-null int64 \ndtypes: float64(5), int64(15), object(1)\nmemory usage: 3.5+ MB\n"
],
[
"# Working with all features\n#house_df = full_house_df[[\"price\", \"sqft_living\", \"bathrooms\", \"grade\"]]\nhouse_df = full_house_df.drop([\"date\", \"id\"], axis=1)",
"_____no_output_____"
],
[
"# Let's remove the outlier we found on recipe 2.1\nhouse_df.drop(15870, inplace=True)",
"_____no_output_____"
],
[
"house_df.head()",
"_____no_output_____"
],
[
"# Dataset Split 80/10/10\nfrom sklearn.model_selection import train_test_split\n\nfull_train_df, test_df = train_test_split(house_df, test_size=0.2, random_state=42)\n\n# To match correctly 10% size, we use previous size as reference\ntrain_df, val_df = train_test_split(full_train_df, test_size=len(test_df), random_state=42)",
"_____no_output_____"
],
[
"number_plots = 3\n# In this case, matplotlib parameter sharey would not set the right scale,\n# as validation and test sets are 8 time smaller\nfig, axs = plt.subplots(1, number_plots, sharex = True, sharey = False)\n\ntrain_df.hist(column = \"price\", bins = 24, ax = axs[0])\nval_df.hist(column = \"price\", bins = 24, ax = axs[1])\ntest_df.hist(column = \"price\", bins = 24, ax = axs[2])\nfig.suptitle(\"Price Distribution for Training, Validation and Test sets\")\nfor i in range(number_plots):\n axs[i].set_title(None)\n axs[i].set_xlabel(\"Price\")\naxs[0].set_ylabel(\"Number of Houses\")\nplt.show()",
"_____no_output_____"
],
[
"fig, axs = plt.subplots(1, number_plots, sharex = True, sharey = True)\n\naxs[0].scatter(train_df[\"sqft_living\"], train_df[\"price\"], alpha = 0.1)\naxs[1].scatter(val_df[\"sqft_living\"], val_df[\"price\"], alpha = 0.1)\naxs[2].scatter(test_df[\"sqft_living\"], test_df[\"price\"], alpha = 0.1)\nfig.suptitle(\"Living Sqft for Training, Validation and Test sets\")\nfor i in range(number_plots):\n axs[i].set_title(None)\n axs[i].set_xlabel(\"Living Sqft\")\naxs[0].set_ylabel(\"Price\")\nplt.show()",
"_____no_output_____"
],
[
"fig, axs = plt.subplots(1, number_plots, sharex = True, sharey = True)\n\naxs[0].scatter(train_df[\"bathrooms\"], train_df[\"price\"], alpha = 0.1)\naxs[1].scatter(val_df[\"bathrooms\"], val_df[\"price\"], alpha = 0.1)\naxs[2].scatter(test_df[\"bathrooms\"], test_df[\"price\"], alpha = 0.1)\nfig.suptitle(\"Bathrooms for Training, Validation and Test sets\")\nfor i in range(number_plots):\n axs[i].set_title(None)\n axs[i].set_xlabel(\"Number of Bathrooms\")\naxs[0].set_ylabel(\"Price\")\nplt.show()",
"_____no_output_____"
],
[
"fig, axs = plt.subplots(1, number_plots, sharex = True, sharey = True)\n\naxs[0].scatter(train_df[\"grade\"], train_df[\"price\"], alpha = 0.1)\naxs[1].scatter(val_df[\"grade\"], val_df[\"price\"], alpha = 0.1)\naxs[2].scatter(test_df[\"grade\"], test_df[\"price\"], alpha = 0.1)\nfig.suptitle(\"Bathrooms for Training, Validation and Test sets\")\nfor i in range(number_plots):\n axs[i].set_title(None)\n axs[i].set_xlabel(\"Grade\")\naxs[0].set_ylabel(\"Price\")\nplt.show()",
"_____no_output_____"
],
[
"print(\"Price Range:\", house_df[\"price\"].min(), house_df[\"price\"].max())\nprint(\"Living Sqft Range:\", house_df[\"sqft_living\"].min(), house_df[\"sqft_living\"].max())\nprint(\"Number of Bathrooms Range:\", house_df[\"bathrooms\"].min(), house_df[\"bathrooms\"].max())\nprint(\"Grade Range:\", house_df[\"grade\"].min(), house_df[\"grade\"].max())",
"Price Range: 75000.0 7700000.0\nLiving Sqft Range: 290 13540\nNumber of Bathrooms Range: 0.0 8.0\nGrade Range: 1 13\n"
],
[
"# Data Preprocessing Training set\n# 2 Steps:\n\n# Step 1: One-hot encoding for Grade\n# Grade goes from 1 to 13\ngrades_list = list(map(str, range(1, 14)))\n\ngrade_onehot_train_df_aux = pd.get_dummies(train_df.grade, prefix='', prefix_sep='')\ngrade_onehot_val_df_aux = pd.get_dummies(val_df.grade, prefix='', prefix_sep='')\ngrade_onehot_test_df_aux = pd.get_dummies(test_df.grade, prefix='', prefix_sep='')\n\n# Not all grade values might be present in the data, so we must fill those values with 0\ngrade_onehot_train_df = grade_onehot_train_df_aux.T.reindex(grades_list).T.fillna(0).astype(dtype=np.float32)\ngrade_onehot_val_df = grade_onehot_val_df_aux.T.reindex(grades_list).T.fillna(0).astype(dtype=np.float32)\ngrade_onehot_test_df = grade_onehot_test_df_aux.T.reindex(grades_list).T.fillna(0).astype(dtype=np.float32)\n\n# These one-hot values/columns will be added to the input data after scaling the other variables",
"_____no_output_____"
],
[
"# Grade not needed anymore\ntrain_df = train_df.drop([\"grade\"], axis=1)\nval_df = val_df.drop([\"grade\"], axis=1)\ntest_df = test_df.drop([\"grade\"], axis=1)\n\n# Grade feature now is stored in 13 columns",
"_____no_output_____"
],
[
"def scale_features(features, sc = None):\n \"\"\"\n Applies Standard Scaling to X, y\n returns scaled version of X,y\n \"\"\"\n if not sc:\n sc = StandardScaler()\n scaled_features = sc.fit_transform(features)\n return scaled_features, sc",
"_____no_output_____"
],
[
"# Data Preprocessing Training set\n\n# Step 2: Feature Scaling for Training, Validation and Testing\ntrain_df_no_price = train_df.drop([\"price\"], axis=1)\nval_df_no_price = val_df.drop([\"price\"], axis=1)\ntest_df_no_price = test_df.drop([\"price\"], axis=1)\n\nX_train = np.array(train_df_no_price.to_numpy(), dtype=np.float32)\ny_train = np.array(train_df[\"price\"].to_numpy().reshape(-1,1), dtype=np.float32)\nX_val = np.array(val_df.drop([\"price\"], axis=1).to_numpy(), dtype=np.float32)\ny_val = np.array(val_df[\"price\"].to_numpy().reshape(-1,1), dtype=np.float32)\nX_test = np.array(test_df.drop([\"price\"], axis=1).to_numpy(), dtype=np.float32)\ny_test = np.array(test_df[\"price\"].to_numpy().reshape(-1,1), dtype=np.float32)\n\nscaled_X_train, sc_X = scale_features(X_train)\nscaled_y_train, sc_y = scale_features(y_train)\nscaled_X_val, _ = scale_features(X_val, sc_X)\nscaled_y_val, _ = scale_features(y_val, sc_y)\nscaled_X_test, _ = scale_features(X_test, sc_X)\nscaled_y_test, _ = scale_features(y_test, sc_y)",
"_____no_output_____"
],
[
"# Add One-Hot Encoding fields (no scaling for these)\nscaled_X_train_df = pd.DataFrame(scaled_X_train, index=train_df_no_price.index, columns=train_df_no_price.columns)\nscaled_X_val_df = pd.DataFrame(scaled_X_val, index=val_df_no_price.index, columns=val_df_no_price.columns)\nscaled_X_test_df = pd.DataFrame(scaled_X_test, index=test_df_no_price.index, columns=test_df_no_price.columns)\n\nscaled_X_train_onehot_df = pd.concat([scaled_X_train_df, grade_onehot_train_df], axis=1)\nscaled_X_val_onehot_df = pd.concat([scaled_X_val_df, grade_onehot_val_df], axis=1)\nscaled_X_test_onehot_df = pd.concat([scaled_X_test_df, grade_onehot_test_df], axis=1)",
"_____no_output_____"
],
[
"scaled_X_train_onehot_df.head()",
"_____no_output_____"
],
[
"# Saving data to analyse further\nwith open(\"data.pkl\", \"wb\") as f:\n pickle.dump([X_train, y_train, X_val, y_val, X_test, y_test], f)\n\nwith open(\"scaled_data.pkl\", \"wb\") as f:\n pickle.dump([scaled_X_train_onehot_df, scaled_X_val_onehot_df, scaled_X_test_onehot_df, sc_X,\n scaled_y_train, scaled_y_val, scaled_y_test, sc_y], f)",
"_____no_output_____"
],
[
"# Prepare datasets to work with DataLoader\ntraining_set = mx.gluon.data.dataset.ArrayDataset(scaled_X_train_onehot_df.to_numpy(), scaled_y_train)\nvalidation_set = mx.gluon.data.dataset.ArrayDataset(scaled_X_val_onehot_df.to_numpy(), scaled_y_val)\ntest_set = mx.gluon.data.dataset.ArrayDataset(scaled_X_test_onehot_df.to_numpy(), scaled_y_test)\n\nscaled_example_input = mx.nd.array([scaled_X_train_onehot_df.values[0]])\n\n# training_set = mx.gluon.data.dataset.ArrayDataset(scaled_X_train, scaled_y_train)\n# validation_set = mx.gluon.data.dataset.ArrayDataset(scaled_X_val, scaled_y_val)\n# test_set = mx.gluon.data.dataset.ArrayDataset(scaled_X_test, scaled_y_test)\n\n# scaled_example_input = mx.nd.array([scaled_X_train[0]])",
"_____no_output_____"
],
[
"net = model.create_regression_network()",
"_____no_output_____"
],
[
"# Initialization\n\n# We need to set the context of the data, GPU in this case\n# Swap comments in the next 2 lines for CPU\nctx = mx.gpu()\n# ctx = mx.cpu()\n\nnet.collect_params().initialize(mx.init.MSRAPrelu(), ctx=ctx, force_reinit=True)",
"[03:52:42] ../src/base.cc:80: cuDNN lib mismatch: linked-against version 8101 != compiled-against version 8100. Set MXNET_CUDNN_LIB_CHECKING=0 to quiet this warning.\n"
],
[
"# Calculate number of trainable parameters\nnet.summary(scaled_example_input.as_in_context(ctx))",
"--------------------------------------------------------------------------------\n Layer (type) Output Shape Param #\n================================================================================\n Input (1, 30) 0\n Dense-1 (1, 128) 3968\n BatchNorm-2 (1, 128) 512\n Activation-3 (1, 128) 0\n Dropout-4 (1, 128) 0\n Dense-5 (1, 1024) 132096\n BatchNorm-6 (1, 1024) 4096\n Activation-7 (1, 1024) 0\n Dropout-8 (1, 1024) 0\n Dense-9 (1, 128) 131200\n BatchNorm-10 (1, 128) 512\n Activation-11 (1, 128) 0\n Dropout-12 (1, 128) 0\n Dense-13 (1, 1) 129\n================================================================================\nParameters in forward computation graph, duplicate included\n Total params: 272513\n Trainable params: 269953\n Non-trainable params: 2560\nShared params in forward computation graph: 0\nUnique parameters in model: 272513\n--------------------------------------------------------------------------------\n"
],
[
"# Training Loop, saving best model\nmodel_file_name = \"net.params\"\n\ndef training_loop(loss_fn, trainer, epochs, batch_size, training_set, validation_set, ctx = mx.gpu()):\n min_val_loss = float(\"inf\")\n \n # Returned values (use-case: plotting losses)\n training_loss, validation_loss = [], []\n \n # Iterator for Gluon data access\n training_data_iterator = mx.gluon.data.DataLoader(training_set, batch_size=batch_size, shuffle=True)\n validation_data_iterator = mx.gluon.data.DataLoader(validation_set, batch_size=batch_size, shuffle=False)\n \n num_training_batches = len(training_set) / batch_size\n num_validation_batches = len(validation_set) / batch_size\n\n for e in range(epochs):\n \n cumulative_loss = 0\n # inner loop\n for data, label in training_data_iterator:\n data = data.as_in_context(ctx)\n label = label.as_in_context(ctx)\n with mx.autograd.record():\n output = net(data)\n loss = loss_fn(output, label)\n loss.backward()\n trainer.step(batch_size)\n current_loss = mx.nd.mean(loss).asscalar()\n cumulative_loss += current_loss / num_training_batches\n \n # Validation Loss\n cumulative_val_loss = 0\n for data, label in validation_data_iterator:\n data = data.as_in_context(ctx)\n label = label.as_in_context(ctx)\n output = net(data)\n val_loss = loss_fn(output, label)\n current_val_loss = mx.nd.mean(val_loss).asscalar()\n cumulative_val_loss += current_val_loss / num_validation_batches\n \n added_info = \"\"\n if cumulative_val_loss < min_val_loss:\n added_info = \" --- Updating saved model\"\n min_val_loss = cumulative_val_loss\n net.save_parameters(model_file_name)\n \n desc = f\"E: {e:4d}, TrL: {cumulative_loss:8.6f}, VL: {cumulative_val_loss:8.6f}\" + added_info\n print(desc)\n \n # Saving loss values\n training_loss.append(cumulative_loss)\n validation_loss.append(cumulative_val_loss)\n \n return training_loss, validation_loss",
"_____no_output_____"
],
[
"# Define Loss Function\nloss_fn = mx.gluon.loss.HuberLoss()\n#loss_fn = mx.gluon.loss.L1Loss()\n#loss_fn = mx.gluon.loss.L2Loss()\n\n# Define Optimizer and Hyper Parameters\ntrainer = mx.gluon.Trainer(net.collect_params(), \"adam\", {\"learning_rate\": 0.01})\n#trainer = mx.gluon.Trainer(net.collect_params(), \"sgd\", {\"learning_rate\": 0.001})\n\n# Epochs & Batch Size\n# Optimal values: \"adam\", lr: 0.01, 100 epochs, batch size 128\nepochs = 200\nbatch_size = 128\n\nnet.collect_params().initialize(mx.init.MSRAPrelu(), ctx=ctx, force_reinit=True)\n\ntraining_loss, validation_loss = training_loop(loss_fn, trainer, epochs, batch_size, training_set, validation_set, ctx)",
"E: 0, TrL: 0.216299, VL: 0.105190 --- Updating saved model\nE: 1, TrL: 0.114229, VL: 0.087984 --- Updating saved model\nE: 2, TrL: 0.106613, VL: 0.091095\nE: 3, TrL: 0.097475, VL: 0.073358 --- Updating saved model\nE: 4, TrL: 0.095682, VL: 0.095781\nE: 5, TrL: 0.094447, VL: 0.073520\nE: 6, TrL: 0.088221, VL: 0.072670 --- Updating saved model\nE: 7, TrL: 0.086486, VL: 0.064848 --- Updating saved model\nE: 8, TrL: 0.086593, VL: 0.063994 --- Updating saved model\nE: 9, TrL: 0.084193, VL: 0.081723\nE: 10, TrL: 0.083392, VL: 0.081826\nE: 11, TrL: 0.085619, VL: 0.065957\nE: 12, TrL: 0.083586, VL: 0.061370 --- Updating saved model\nE: 13, TrL: 0.078926, VL: 0.063318\nE: 14, TrL: 0.084643, VL: 0.062237\nE: 15, TrL: 0.080406, VL: 0.066720\nE: 16, TrL: 0.077671, VL: 0.092025\nE: 17, TrL: 0.079065, VL: 0.062259\nE: 18, TrL: 0.075309, VL: 0.060337 --- Updating saved model\nE: 19, TrL: 0.073488, VL: 0.057444 --- Updating saved model\nE: 20, TrL: 0.074976, VL: 0.063040\nE: 21, TrL: 0.073711, VL: 0.064794\nE: 22, TrL: 0.075384, VL: 0.072185\nE: 23, TrL: 0.073066, VL: 0.059715\nE: 24, TrL: 0.074281, VL: 0.058936\nE: 25, TrL: 0.071981, VL: 0.058159\nE: 26, TrL: 0.072249, VL: 0.063877\nE: 27, TrL: 0.073320, VL: 0.076505\nE: 28, TrL: 0.071308, VL: 0.062848\nE: 29, TrL: 0.073431, VL: 0.058066\nE: 30, TrL: 0.071897, VL: 0.068149\nE: 31, TrL: 0.072718, VL: 0.064413\nE: 32, TrL: 0.075935, VL: 0.060878\nE: 33, TrL: 0.072598, VL: 0.059031\nE: 34, TrL: 0.071040, VL: 0.062101\nE: 35, TrL: 0.069606, VL: 0.056390 --- Updating saved model\nE: 36, TrL: 0.069655, VL: 0.062937\nE: 37, TrL: 0.070429, VL: 0.054631 --- Updating saved model\nE: 38, TrL: 0.071691, VL: 0.065079\nE: 39, TrL: 0.069270, VL: 0.061135\nE: 40, TrL: 0.073569, VL: 0.065285\nE: 41, TrL: 0.073758, VL: 0.063150\nE: 42, TrL: 0.072005, VL: 0.058218\nE: 43, TrL: 0.070212, VL: 0.058572\nE: 44, TrL: 0.070118, VL: 0.058473\nE: 45, TrL: 0.070495, VL: 0.074738\nE: 46, TrL: 0.069987, VL: 0.064082\nE: 47, TrL: 0.068189, VL: 0.061376\nE: 48, TrL: 0.070684, VL: 0.055269\nE: 49, TrL: 0.069195, VL: 0.058325\nE: 50, TrL: 0.067888, VL: 0.060269\nE: 51, TrL: 0.068590, VL: 0.058398\nE: 52, TrL: 0.069374, VL: 0.057508\nE: 53, TrL: 0.066458, VL: 0.065560\nE: 54, TrL: 0.068519, VL: 0.063957\nE: 55, TrL: 0.071169, VL: 0.057796\nE: 56, TrL: 0.066269, VL: 0.060113\nE: 57, TrL: 0.069054, VL: 0.054502 --- Updating saved model\nE: 58, TrL: 0.065915, VL: 0.059916\nE: 59, TrL: 0.067409, VL: 0.061951\nE: 60, TrL: 0.067600, VL: 0.060786\nE: 61, TrL: 0.066202, VL: 0.054787\nE: 62, TrL: 0.068624, VL: 0.066391\nE: 63, TrL: 0.066326, VL: 0.073789\nE: 64, TrL: 0.064658, VL: 0.052466 --- Updating saved model\nE: 65, TrL: 0.064762, VL: 0.053389\nE: 66, TrL: 0.069433, VL: 0.055919\nE: 67, TrL: 0.067005, VL: 0.054598\nE: 68, TrL: 0.066529, VL: 0.054448\nE: 69, TrL: 0.067031, VL: 0.051512 --- Updating saved model\nE: 70, TrL: 0.061980, VL: 0.052779\nE: 71, TrL: 0.064806, VL: 0.061064\nE: 72, TrL: 0.068644, VL: 0.074479\nE: 73, TrL: 0.067914, VL: 0.071089\nE: 74, TrL: 0.062484, VL: 0.058629\nE: 75, TrL: 0.062409, VL: 0.059704\nE: 76, TrL: 0.064707, VL: 0.060343\nE: 77, TrL: 0.064537, VL: 0.055997\nE: 78, TrL: 0.064403, VL: 0.054731\nE: 79, TrL: 0.065475, VL: 0.054536\nE: 80, TrL: 0.066189, VL: 0.053364\nE: 81, TrL: 0.067312, VL: 0.072062\nE: 82, TrL: 0.064150, VL: 0.063455\nE: 83, TrL: 0.067342, VL: 0.062526\nE: 84, TrL: 0.063298, VL: 0.055641\nE: 85, TrL: 0.071246, VL: 0.056285\nE: 86, TrL: 0.067946, VL: 0.055146\nE: 87, TrL: 0.066583, VL: 0.065440\nE: 88, TrL: 0.065157, VL: 0.051909\nE: 89, TrL: 0.063362, VL: 0.058382\nE: 90, TrL: 0.062991, VL: 0.061009\nE: 91, TrL: 0.061336, VL: 0.057842\nE: 92, TrL: 0.063952, VL: 0.052312\nE: 93, TrL: 0.062912, VL: 0.056960\nE: 94, TrL: 0.063202, VL: 0.054498\nE: 95, TrL: 0.065451, VL: 0.057543\nE: 96, TrL: 0.064491, VL: 0.061149\nE: 97, TrL: 0.061253, VL: 0.075612\nE: 98, TrL: 0.062619, VL: 0.052321\nE: 99, TrL: 0.064859, VL: 0.069082\nE: 100, TrL: 0.067216, VL: 0.066702\nE: 101, TrL: 0.061837, VL: 0.056486\nE: 102, TrL: 0.064851, VL: 0.053601\nE: 103, TrL: 0.063791, VL: 0.061350\nE: 104, TrL: 0.066557, VL: 0.062553\nE: 105, TrL: 0.064700, VL: 0.058782\nE: 106, TrL: 0.060465, VL: 0.058328\nE: 107, TrL: 0.063086, VL: 0.063850\nE: 108, TrL: 0.063130, VL: 0.056596\nE: 109, TrL: 0.064709, VL: 0.060259\nE: 110, TrL: 0.061714, VL: 0.054255\nE: 111, TrL: 0.062747, VL: 0.054015\nE: 112, TrL: 0.063493, VL: 0.056634\nE: 113, TrL: 0.060409, VL: 0.060101\nE: 114, TrL: 0.062352, VL: 0.066229\nE: 115, TrL: 0.064932, VL: 0.058567\nE: 116, TrL: 0.060930, VL: 0.055926\nE: 117, TrL: 0.061785, VL: 0.066638\nE: 118, TrL: 0.061152, VL: 0.051879\nE: 119, TrL: 0.061195, VL: 0.059654\nE: 120, TrL: 0.062517, VL: 0.053890\nE: 121, TrL: 0.064336, VL: 0.069348\nE: 122, TrL: 0.063307, VL: 0.067507\nE: 123, TrL: 0.062528, VL: 0.059475\nE: 124, TrL: 0.064088, VL: 0.052217\nE: 125, TrL: 0.062593, VL: 0.072805\nE: 126, TrL: 0.063016, VL: 0.052819\nE: 127, TrL: 0.060871, VL: 0.055918\nE: 128, TrL: 0.060788, VL: 0.057737\nE: 129, TrL: 0.061319, VL: 0.068341\nE: 130, TrL: 0.062824, VL: 0.056363\nE: 131, TrL: 0.061841, VL: 0.065526\nE: 132, TrL: 0.061134, VL: 0.054382\nE: 133, TrL: 0.064274, VL: 0.055112\nE: 134, TrL: 0.060970, VL: 0.054103\nE: 135, TrL: 0.058201, VL: 0.059942\nE: 136, TrL: 0.060538, VL: 0.058246\nE: 137, TrL: 0.060940, VL: 0.053427\nE: 138, TrL: 0.066262, VL: 0.052002\nE: 139, TrL: 0.057781, VL: 0.059798\nE: 140, TrL: 0.061775, VL: 0.053029\nE: 141, TrL: 0.061326, VL: 0.055040\nE: 142, TrL: 0.059491, VL: 0.053178\nE: 143, TrL: 0.062025, VL: 0.061984\nE: 144, TrL: 0.062997, VL: 0.059948\nE: 145, TrL: 0.063523, VL: 0.077052\nE: 146, TrL: 0.059826, VL: 0.053892\nE: 147, TrL: 0.061839, VL: 0.072692\nE: 148, TrL: 0.059771, VL: 0.052819\nE: 149, TrL: 0.062224, VL: 0.052914\nE: 150, TrL: 0.060653, VL: 0.059473\nE: 151, TrL: 0.059960, VL: 0.055691\nE: 152, TrL: 0.061071, VL: 0.059323\nE: 153, TrL: 0.059837, VL: 0.052283\nE: 154, TrL: 0.063387, VL: 0.056069\nE: 155, TrL: 0.059460, VL: 0.059802\nE: 156, TrL: 0.057699, VL: 0.061790\nE: 157, TrL: 0.061830, VL: 0.057717\nE: 158, TrL: 0.063311, VL: 0.059692\nE: 159, TrL: 0.058798, VL: 0.059394\nE: 160, TrL: 0.060605, VL: 0.053572\nE: 161, TrL: 0.061237, VL: 0.055582\nE: 162, TrL: 0.061781, VL: 0.055681\nE: 163, TrL: 0.059537, VL: 0.055585\nE: 164, TrL: 0.061638, VL: 0.063619\nE: 165, TrL: 0.060828, VL: 0.058454\nE: 166, TrL: 0.059256, VL: 0.058184\nE: 167, TrL: 0.060657, VL: 0.067744\nE: 168, TrL: 0.060659, VL: 0.058081\nE: 169, TrL: 0.059656, VL: 0.058948\nE: 170, TrL: 0.057988, VL: 0.052073\nE: 171, TrL: 0.059720, VL: 0.055316\nE: 172, TrL: 0.058925, VL: 0.051606\nE: 173, TrL: 0.062069, VL: 0.067151\nE: 174, TrL: 0.059520, VL: 0.063146\nE: 175, TrL: 0.059813, VL: 0.058899\nE: 176, TrL: 0.057019, VL: 0.057804\nE: 177, TrL: 0.059512, VL: 0.055417\nE: 178, TrL: 0.060091, VL: 0.056600\nE: 179, TrL: 0.059112, VL: 0.058946\nE: 180, TrL: 0.058816, VL: 0.058428\nE: 181, TrL: 0.060741, VL: 0.056347\nE: 182, TrL: 0.061760, VL: 0.056107\nE: 183, TrL: 0.058209, VL: 0.054034\nE: 184, TrL: 0.059309, VL: 0.054832\nE: 185, TrL: 0.059819, VL: 0.058996\nE: 186, TrL: 0.059990, VL: 0.053486\nE: 187, TrL: 0.057216, VL: 0.061776\nE: 188, TrL: 0.059234, VL: 0.053211\nE: 189, TrL: 0.061543, VL: 0.063123\nE: 190, TrL: 0.060211, VL: 0.062869\nE: 191, TrL: 0.058964, VL: 0.055350\nE: 192, TrL: 0.058421, VL: 0.063377\nE: 193, TrL: 0.059000, VL: 0.053336\nE: 194, TrL: 0.056833, VL: 0.060441\nE: 195, TrL: 0.059955, VL: 0.058353\nE: 196, TrL: 0.059519, VL: 0.053708\nE: 197, TrL: 0.060155, VL: 0.053436\nE: 198, TrL: 0.057958, VL: 0.057475\nE: 199, TrL: 0.058119, VL: 0.059574\n"
],
[
"# Saving losses to analyse further\nwith open(\"losses.pkl\", \"wb\") as f:\n pickle.dump([training_loss, validation_loss], f)",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec65ada02cd45261e531589132e3f6b81e37edbc | 9,619 | ipynb | Jupyter Notebook | steps/step52.ipynb | choiking10/mytorch | 67140b608b14e2ec6ecca1638705af91d2d71b6b | [
"MIT"
]
| null | null | null | steps/step52.ipynb | choiking10/mytorch | 67140b608b14e2ec6ecca1638705af91d2d71b6b | [
"MIT"
]
| null | null | null | steps/step52.ipynb | choiking10/mytorch | 67140b608b14e2ec6ecca1638705af91d2d71b6b | [
"MIT"
]
| null | null | null | 29.965732 | 89 | 0.526042 | [
[
[
"import sys\nsys.path.append(\"..\")",
"_____no_output_____"
],
[
"import cupy as cp\n\nx = cp.arange(6).reshape(2, 3)\n\nx",
"_____no_output_____"
],
[
"y = x.sum(axis=1)\ny",
"_____no_output_____"
],
[
"import numpy as np\nimport cupy as cp\n\nn = np.array([1, 2, 3])\nc = cp.asarray(n)\n\nassert type(c) == cp.ndarray",
"_____no_output_____"
],
[
"c = cp.array([1, 2, 3])\nn = cp.asnumpy(c)\nassert type(n) == np.ndarray",
"_____no_output_____"
],
[
"x = np.array([1, 2, 3])\nxp = cp.get_array_module(x)\nassert xp == np\n\nx = cp.array([1, 2, 3])\nxp = cp.get_array_module(x)\nassert xp == cp",
"_____no_output_____"
],
[
"import time\n\nimport mytorch\nimport mytorch.functions as F\nfrom mytorch import optimizers, DataLoader\nfrom mytorch.models import MLP\n\nmax_epoch = 5\nbatch_size = 100\n\nhidden_size = 1000\ntrain_set = mytorch.datasets.MNIST(train=True)\ntest_set = mytorch.datasets.MNIST(train=False)\n\ntrain_loader = DataLoader(train_set, batch_size)\ntest_loader = DataLoader(test_set, batch_size, shuffle=False)\n\nmodel = MLP((hidden_size, hidden_size, 10))\noptim = optimizers.SGD(model.parameters)\n\nprint(\"not using GPU\")\n \ntrain_loss_saver = []\ntest_loss_saver = []\ntrain_acc_saver = []\ntest_acc_saver = []\n\n\nfor epoch in range(max_epoch):\n start = time.time()\n sum_loss, sum_acc = 0, 0\n for x, t in train_loader:\n pred = model(x)\n loss = F.softmax_cross_entropy(pred, t)\n acc = F.accuracy(pred, t)\n \n model.zerograd()\n loss.backward()\n optim.step()\n \n sum_loss += float(loss.data) * len(t)\n sum_acc += float(acc.data) * len(t)\n train_loss_saver.append((epoch, sum_loss / len(train_set)))\n train_acc_saver.append((epoch, sum_acc / len(train_set)))\n print(f\"[{epoch+1}/{max_epoch}] train_loss: {sum_loss / len(train_set)}, \"\n f\"accuracy: {sum_acc / len(train_set)}\")\n \n sum_loss, sum_acc = 0, 0\n with mytorch.no_grad():\n for x, t in test_loader:\n pred = model(x)\n loss = mytorch.functions.softmax_cross_entropy(pred, t)\n acc = mytorch.functions.accuracy(pred, t)\n sum_loss += float(loss.data) * len(t)\n sum_acc += float(acc.data) * len(t)\n test_loss_saver.append((epoch, sum_loss / len(test_set)))\n test_acc_saver.append((epoch, sum_acc / len(test_set)))\n print(f\"test_loss: {sum_loss / len(test_set)}, \"\n f\"accuracy: {sum_acc / len(test_set)}\")\n \n print(f\"elapse times : {time.time() - start}\")\n",
"not using GPU\n[1/5] train_loss: 2.2670746636390686, accuracy: 0.1744\ntest_loss: 2.2247932720184327, accuracy: 0.1748\nelapse times : 7.385754823684692\n[2/5] train_loss: 2.1723104294141136, accuracy: 0.36628333333333335\ntest_loss: 2.104503791332245, accuracy: 0.517\nelapse times : 6.0160486698150635\n[3/5] train_loss: 2.0241631187995277, accuracy: 0.5096\ntest_loss: 1.9135946869850158, accuracy: 0.4951\nelapse times : 6.045778512954712\n[4/5] train_loss: 1.78109676361084, accuracy: 0.6079166666666667\ntest_loss: 1.6147808802127839, accuracy: 0.6482\nelapse times : 6.117432355880737\n[5/5] train_loss: 1.4714031920830408, accuracy: 0.6770833333333334\ntest_loss: 1.3059992212057114, accuracy: 0.7352\nelapse times : 6.076480150222778\n"
],
[
"import time\n\nimport mytorch\nimport mytorch.functions as F\nfrom mytorch import optimizers, DataLoader\nfrom mytorch.models import MLP\n\nmax_epoch = 5\nbatch_size = 100\n\nhidden_size = 1000\ntrain_set = mytorch.datasets.MNIST(train=True)\ntest_set = mytorch.datasets.MNIST(train=False)\n\ntrain_loader = DataLoader(train_set, batch_size)\ntest_loader = DataLoader(test_set, batch_size, shuffle=False)\n\nmodel = MLP((hidden_size, hidden_size, 10))\noptim = optimizers.SGD(model.parameters)\n\nif mytorch.cuda.gpu_enable:\n print(\"using GPU\")\n train_loader.to_gpu()\n test_loader.to_gpu()\n model.to_gpu()\n\n \ntrain_loss_saver = []\ntest_loss_saver = []\ntrain_acc_saver = []\ntest_acc_saver = []\n\n\nfor epoch in range(max_epoch):\n start = time.time()\n sum_loss, sum_acc = 0, 0\n for x, t in train_loader:\n pred = model(x)\n loss = F.softmax_cross_entropy(pred, t)\n acc = F.accuracy(pred, t)\n \n model.zerograd()\n loss.backward()\n optim.step()\n \n sum_loss += float(loss.data) * len(t)\n sum_acc += float(acc.data) * len(t)\n train_loss_saver.append((epoch, sum_loss / len(train_set)))\n train_acc_saver.append((epoch, sum_acc / len(train_set)))\n print(f\"[{epoch+1}/{max_epoch}] train_loss: {sum_loss / len(train_set)}, \"\n f\"accuracy: {sum_acc / len(train_set)}\")\n \n sum_loss, sum_acc = 0, 0\n with mytorch.no_grad():\n for x, t in test_loader:\n pred = model(x)\n loss = mytorch.functions.softmax_cross_entropy(pred, t)\n acc = mytorch.functions.accuracy(pred, t)\n sum_loss += float(loss.data) * len(t)\n sum_acc += float(acc.data) * len(t)\n test_loss_saver.append((epoch, sum_loss / len(test_set)))\n test_acc_saver.append((epoch, sum_acc / len(test_set)))\n print(f\"test_loss: {sum_loss / len(test_set)}, \"\n f\"accuracy: {sum_acc / len(test_set)}\")\n print(f\"elapse times : {time.time() - start}\")\n",
"using GPU\n[1/5] train_loss: 2.2655182472719306, accuracy: 0.17836666666666667\ntest_loss: 2.224961286230049, accuracy: 0.1358\nelapse times : 5.637044906616211\n[2/5] train_loss: 2.1678672456258536, accuracy: 0.36741666666666667\ntest_loss: 2.097939332704492, accuracy: 0.4365\nelapse times : 5.357033014297485\n[3/5] train_loss: 2.013332341243734, accuracy: 0.5107833333333334\ntest_loss: 1.8958331486127955, accuracy: 0.643\nelapse times : 5.3505449295043945\n[4/5] train_loss: 1.7633395323662284, accuracy: 0.6083333333333333\ntest_loss: 1.5931521402023174, accuracy: 0.6747\nelapse times : 5.3990960121154785\n[5/5] train_loss: 1.4502346608947838, accuracy: 0.6776833333333333\ntest_loss: 1.2931785370676505, accuracy: 0.6486\nelapse times : 5.393986940383911\n"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec65b0b3d4c7cd5b9dee2414a8486ba4993f3dbd | 42,987 | ipynb | Jupyter Notebook | Pytorch_Vsumm_Python2.ipynb | TruongChiDien/DSN--Video-Summariztion | 07f87d48b38a5eb99683454c4538252c73c87600 | [
"MIT"
]
| null | null | null | Pytorch_Vsumm_Python2.ipynb | TruongChiDien/DSN--Video-Summariztion | 07f87d48b38a5eb99683454c4538252c73c87600 | [
"MIT"
]
| null | null | null | Pytorch_Vsumm_Python2.ipynb | TruongChiDien/DSN--Video-Summariztion | 07f87d48b38a5eb99683454c4538252c73c87600 | [
"MIT"
]
| null | null | null | 42,987 | 42,987 | 0.717217 | [
[
[
"# **Mount Drive**",
"_____no_output_____"
]
],
[
[
"%cd \"/content/drive/MyDrive/Colab Notebooks/CS106 - AI/pytorch-vsumm-reinforce\"",
"/content/drive/MyDrive/Colab Notebooks/CS106 - AI/pytorch-vsumm-reinforce\n"
],
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"Mounted at /content/drive\n"
],
[
"!git add .",
"_____no_output_____"
]
],
[
[
"# **SUMME**",
"_____no_output_____"
],
[
"# R_div + R_rep",
"_____no_output_____"
]
],
[
[
"!python create_split.py -d datasets/eccv16_dataset_summe_google_pool5.h5 --save-dir datasets --save-name summe_splits --num-splits 5",
"==========\nArgs:Namespace(dataset='datasets/eccv16_dataset_summe_google_pool5.h5', num_splits=5, save_dir='datasets', save_name='summe_splits2', train_percent=0.8)\n==========\nGoal: randomly split data for 5 times, 80.0% for training and the rest for testing\nLoading dataset from datasets/eccv16_dataset_summe_google_pool5.h5\nSplit breakdown: # total videos 25. # train videos 20. # test videos 5\nSplits saved to datasets/summe_splits2.json\n"
],
[
"!python main.py -d datasets/eccv16_dataset_summe_google_pool5.h5 -s datasets/summe_splits.json -m summe --gpu 0 --save-dir log/summe-split0 --split-id 0 --verbose --num-episode 10 ",
"==========\nArgs:Namespace(beta=0.01, dataset='datasets/eccv16_dataset_summe_google_pool5.h5', evaluate=False, gamma=0.1, gpu='0', hidden_dim=256, input_dim=1024, lr=1e-05, max_epoch=60, metric='summe', num_episode=10, num_layers=1, resume='', rnn_cell='lstm', save_dir='log/summe-split0', save_results=False, seed=1, split='datasets/summe_splits.json', split_id=0, stepsize=30, use_cpu=False, verbose=True, weight_decay=1e-05)\n==========\nCurrently using CPU\nInitialize dataset datasets/eccv16_dataset_summe_google_pool5.h5\n# total videos 25. # train videos 20. # test videos 5\nInitialize model\nModel size: 2.62605M\n==> Start training\n/usr/local/lib/python2.7/dist-packages/torch/nn/functional.py:1351: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\nepoch 1/60\t reward 0.890339483619\t\nepoch 2/60\t reward 0.890349634886\t\nepoch 3/60\t reward 0.890205361843\t\nepoch 4/60\t reward 0.890506898761\t\nepoch 5/60\t reward 0.889753085375\t\nepoch 6/60\t reward 0.890138254762\t\nepoch 7/60\t reward 0.890176760852\t\nepoch 8/60\t reward 0.890733137727\t\nepoch 9/60\t reward 0.889817873836\t\nepoch 10/60\t reward 0.890453404188\t\nepoch 11/60\t reward 0.889769539833\t\nepoch 12/60\t reward 0.890372365713\t\nepoch 13/60\t reward 0.889337153137\t\nepoch 14/60\t reward 0.890152650774\t\nepoch 15/60\t reward 0.889566709399\t\nepoch 16/60\t reward 0.890420225561\t\nepoch 17/60\t reward 0.889932754636\t\nepoch 18/60\t reward 0.889912070334\t\nepoch 19/60\t reward 0.890516562462\t\nepoch 20/60\t reward 0.890511679649\t\nepoch 21/60\t reward 0.889321421385\t\nepoch 22/60\t reward 0.890171942413\t\nepoch 23/60\t reward 0.890678575635\t\nepoch 24/60\t reward 0.890038665533\t\nepoch 25/60\t reward 0.890749092698\t\nepoch 26/60\t reward 0.890231349468\t\nepoch 27/60\t reward 0.889812584221\t\nepoch 28/60\t reward 0.890198176205\t\nepoch 29/60\t reward 0.890180433393\t\nepoch 30/60\t reward 0.890344361663\t\nepoch 31/60\t reward 0.88981575489\t\nepoch 32/60\t reward 0.890298307836\t\nepoch 33/60\t reward 0.890616716743\t\nepoch 34/60\t reward 0.890378356874\t\nepoch 35/60\t reward 0.89085419327\t\nepoch 36/60\t reward 0.890490875542\t\nepoch 37/60\t reward 0.890710602403\t\nepoch 38/60\t reward 0.889366292655\t\nepoch 39/60\t reward 0.890091146827\t\nepoch 40/60\t reward 0.890592958629\t\nepoch 41/60\t reward 0.890845787227\t\nepoch 42/60\t reward 0.89114710182\t\nepoch 43/60\t reward 0.891215231121\t\nepoch 44/60\t reward 0.891198295653\t\nepoch 45/60\t reward 0.890719830096\t\nepoch 46/60\t reward 0.890837545991\t\nepoch 47/60\t reward 0.890119651556\t\nepoch 48/60\t reward 0.890354970992\t\nepoch 49/60\t reward 0.891455593705\t\nepoch 50/60\t reward 0.890701160431\t\nepoch 51/60\t reward 0.891124907732\t\nepoch 52/60\t reward 0.891186130643\t\nepoch 53/60\t reward 0.890581825376\t\nepoch 54/60\t reward 0.891473172009\t\nepoch 55/60\t reward 0.891380977631\t\nepoch 56/60\t reward 0.890748949051\t\nepoch 57/60\t reward 0.891069375575\t\nepoch 58/60\t reward 0.891647360325\t\nepoch 59/60\t reward 0.891730097234\t\nepoch 60/60\t reward 0.891822666824\t\n==> Test\n--- -------- -------\nNo. Video F-score\n1 video_11 60.2%\n2 video_19 60.8%\n3 video_2 22.3%\n4 video_5 29.7%\n5 video_7 29.2%\n--- -------- -------\nAverage F-score 40.4%\nFinished. Total elapsed time (h:m:s): 0:13:53\nModel saved to log/summe-split0/model_epoch60.pth.tar\n"
],
[
"!python main.py -d datasets/eccv16_dataset_summe_google_pool5.h5 -s datasets/summe_splits.json -m summe --gpu 0 --save-dir log/summe-split0 --split-id 0 --evaluate --resume log/summe-split0/model_epoch60.pth.tar --verbose --save-results",
"==========\nArgs:Namespace(beta=0.01, dataset='datasets/eccv16_dataset_summe_google_pool5.h5', evaluate=True, gamma=0.1, gpu='0', hidden_dim=256, input_dim=1024, lr=1e-05, max_epoch=60, metric='summe', num_episode=5, num_layers=1, resume='log/summe-split0/model_epoch60.pth.tar', rnn_cell='lstm', save_dir='log/summe-split0', save_results=True, seed=1, split='datasets/summe_splits.json', split_id=0, stepsize=30, use_cpu=False, verbose=True, weight_decay=1e-05)\n==========\nCurrently using CPU\nInitialize dataset datasets/eccv16_dataset_summe_google_pool5.h5\n# total videos 25. # train videos 20. # test videos 5\nInitialize model\nModel size: 2.62605M\nLoading checkpoint from 'log/summe-split0/model_epoch60.pth.tar'\nEvaluate only\n==> Test\n/usr/local/lib/python2.7/dist-packages/torch/nn/functional.py:1351: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\n--- -------- -------\nNo. Video F-score\n1 video_11 60.2%\n2 video_19 60.8%\n3 video_2 22.3%\n4 video_5 29.7%\n5 video_7 29.2%\n--- -------- -------\nAverage F-score 40.4%\n"
],
[
"!python parse_log.py -p log/summe-split0/log_train.txt\n",
"_____no_output_____"
],
[
"!python parse_json.py -p log/summe-split0/rewards.json -i 19",
"_____no_output_____"
]
],
[
[
"# R_div",
"_____no_output_____"
]
],
[
[
"!python create_split.py -d datasets/eccv16_dataset_summe_google_pool5.h5 --save-dir datasets --save-name summe_splits1 --num-splits 5",
"==========\nArgs:Namespace(dataset='datasets/eccv16_dataset_summe_google_pool5.h5', num_splits=5, save_dir='datasets', save_name='summe_splits1', train_percent=0.8)\n==========\nGoal: randomly split data for 5 times, 80.0% for training and the rest for testing\nLoading dataset from datasets/eccv16_dataset_summe_google_pool5.h5\nSplit breakdown: # total videos 25. # train videos 20. # test videos 5\nSplits saved to datasets/summe_splits1.json\n"
],
[
"!python main_div.py -d datasets/eccv16_dataset_summe_google_pool5.h5 -s datasets/summe_splits1.json -m summe --gpu 0 --save-dir log/summe-split1 --split-id 0 --verbose --num-episode 10 ",
"==========\nArgs:Namespace(beta=0.01, dataset='datasets/eccv16_dataset_summe_google_pool5.h5', evaluate=False, gamma=0.1, gpu='0', hidden_dim=256, input_dim=1024, lr=1e-05, max_epoch=60, metric='summe', num_episode=10, num_layers=1, resume='', rnn_cell='lstm', save_dir='log/summe-split1', save_results=False, seed=1, split='datasets/summe_splits1.json', split_id=0, stepsize=30, use_cpu=False, verbose=True, weight_decay=1e-05)\n==========\nCurrently using CPU\nInitialize dataset datasets/eccv16_dataset_summe_google_pool5.h5\n# total videos 25. # train videos 20. # test videos 5\nInitialize model\nModel size: 2.62605M\n==> Start training\n/usr/local/lib/python2.7/dist-packages/torch/nn/functional.py:1351: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\nepoch 1/60\t reward 0.867160550058\t\nepoch 2/60\t reward 0.86785408318\t\nepoch 3/60\t reward 0.867389362752\t\nepoch 4/60\t reward 0.868012881577\t\nepoch 5/60\t reward 0.867713506222\t\nepoch 6/60\t reward 0.86847011894\t\nepoch 7/60\t reward 0.867064812779\t\nepoch 8/60\t reward 0.867290450633\t\nepoch 9/60\t reward 0.866872667968\t\nepoch 10/60\t reward 0.867202639282\t\nepoch 11/60\t reward 0.868167421818\t\nepoch 12/60\t reward 0.868087991476\t\nepoch 13/60\t reward 0.867793970108\t\nepoch 14/60\t reward 0.867457414865\t\nepoch 15/60\t reward 0.868066786826\t\nepoch 16/60\t reward 0.86697135359\t\nepoch 17/60\t reward 0.867860839069\t\nepoch 18/60\t reward 0.867895079851\t\nepoch 19/60\t reward 0.867470587492\t\nepoch 20/60\t reward 0.867410061061\t\nepoch 21/60\t reward 0.868498460948\t\nepoch 22/60\t reward 0.867278649807\t\nepoch 23/60\t reward 0.868289049268\t\nepoch 24/60\t reward 0.867782856524\t\nepoch 25/60\t reward 0.868304485679\t\nepoch 26/60\t reward 0.868397538662\t\nepoch 27/60\t reward 0.867659041286\t\nepoch 28/60\t reward 0.868256370425\t\nepoch 29/60\t reward 0.868655376136\t\nepoch 30/60\t reward 0.867476089597\t\nepoch 31/60\t reward 0.86844506681\t\nepoch 32/60\t reward 0.868420004249\t\nepoch 33/60\t reward 0.867336914539\t\nepoch 34/60\t reward 0.868328128159\t\nepoch 35/60\t reward 0.867556632757\t\nepoch 36/60\t reward 0.867856134772\t\nepoch 37/60\t reward 0.867175328135\t\nepoch 38/60\t reward 0.8681237939\t\nepoch 39/60\t reward 0.867351621985\t\nepoch 40/60\t reward 0.868251627386\t\nepoch 41/60\t reward 0.867761220336\t\nepoch 42/60\t reward 0.86741286099\t\nepoch 43/60\t reward 0.867865216136\t\nepoch 44/60\t reward 0.867700620592\t\nepoch 45/60\t reward 0.867896019518\t\nepoch 46/60\t reward 0.86772082746\t\nepoch 47/60\t reward 0.867422542274\t\nepoch 48/60\t reward 0.868180064261\t\nepoch 49/60\t reward 0.866916987896\t\nepoch 50/60\t reward 0.868641804457\t\nepoch 51/60\t reward 0.867623659968\t\nepoch 52/60\t reward 0.867117165327\t\nepoch 53/60\t reward 0.868311988711\t\nepoch 54/60\t reward 0.867439972162\t\nepoch 55/60\t reward 0.8683742401\t\nepoch 56/60\t reward 0.867966027856\t\nepoch 57/60\t reward 0.8679474774\t\nepoch 58/60\t reward 0.867494027019\t\nepoch 59/60\t reward 0.867401491702\t\nepoch 60/60\t reward 0.867298924923\t\n==> Test\n--- -------- -------\nNo. Video F-score\n1 video_1 60.0%\n2 video_10 26.6%\n3 video_2 35.7%\n4 video_21 17.4%\n5 video_8 49.4%\n--- -------- -------\nAverage F-score 37.8%\nFinished. Total elapsed time (h:m:s): 0:12:37\nModel saved to log/summe-split1/model_epoch60.pth.tar\n"
],
[
"!python parse_log.py -p log/summe-split1/log_train.txt",
"_____no_output_____"
],
[
"!python parse_json.py -p log/summe-split1/rewards.json -i 13",
"_____no_output_____"
]
],
[
[
"# R_rep",
"_____no_output_____"
]
],
[
[
"!python create_split.py -d datasets/eccv16_dataset_summe_google_pool5.h5 --save-dir datasets --save-name summe_splits2 --num-splits 5",
"==========\nArgs:Namespace(dataset='datasets/eccv16_dataset_summe_google_pool5.h5', num_splits=5, save_dir='datasets', save_name='summe_splits2', train_percent=0.8)\n==========\nGoal: randomly split data for 5 times, 80.0% for training and the rest for testing\nLoading dataset from datasets/eccv16_dataset_summe_google_pool5.h5\nSplit breakdown: # total videos 25. # train videos 20. # test videos 5\nSplits saved to datasets/summe_splits2.json\n"
],
[
"!python main_rep.py -d datasets/eccv16_dataset_summe_google_pool5.h5 -s datasets/summe_splits2.json -m summe --gpu 0 --save-dir log/summe-split2 --split-id 0 --verbose --num-episode 10 ",
"==========\nArgs:Namespace(beta=0.01, dataset='datasets/eccv16_dataset_summe_google_pool5.h5', evaluate=False, gamma=0.1, gpu='0', hidden_dim=256, input_dim=1024, lr=1e-05, max_epoch=60, metric='summe', num_episode=10, num_layers=1, resume='', rnn_cell='lstm', save_dir='log/summe-split2', save_results=False, seed=1, split='datasets/summe_splits2.json', split_id=0, stepsize=30, use_cpu=False, verbose=True, weight_decay=1e-05)\n==========\nCurrently using CPU\nInitialize dataset datasets/eccv16_dataset_summe_google_pool5.h5\n# total videos 25. # train videos 20. # test videos 5\nInitialize model\nModel size: 2.62605M\n==> Start training\n/usr/local/lib/python2.7/dist-packages/torch/nn/functional.py:1351: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\nepoch 1/60\t reward 0.912273336649\t\nepoch 2/60\t reward 0.912762309909\t\nepoch 3/60\t reward 0.912096489966\t\nepoch 4/60\t reward 0.913193582296\t\nepoch 5/60\t reward 0.91240547061\t\nepoch 6/60\t reward 0.912786197364\t\nepoch 7/60\t reward 0.912501425743\t\nepoch 8/60\t reward 0.91246183157\t\nepoch 9/60\t reward 0.911975803077\t\nepoch 10/60\t reward 0.911831765771\t\nepoch 11/60\t reward 0.912444827557\t\nepoch 12/60\t reward 0.911301446855\t\nepoch 13/60\t reward 0.911633495688\t\nepoch 14/60\t reward 0.912076304555\t\nepoch 15/60\t reward 0.912143250704\t\nepoch 16/60\t reward 0.912158835828\t\nepoch 17/60\t reward 0.91221550703\t\nepoch 18/60\t reward 0.91302095592\t\nepoch 19/60\t reward 0.912706927657\t\nepoch 20/60\t reward 0.912347377539\t\nepoch 21/60\t reward 0.912654126883\t\nepoch 22/60\t reward 0.912181709111\t\nepoch 23/60\t reward 0.913805272281\t\nepoch 24/60\t reward 0.912668733299\t\nepoch 25/60\t reward 0.912187154293\t\nepoch 26/60\t reward 0.911475602984\t\nepoch 27/60\t reward 0.912717954516\t\nepoch 28/60\t reward 0.911808088124\t\nepoch 29/60\t reward 0.913933404982\t\nepoch 30/60\t reward 0.914366125464\t\nepoch 31/60\t reward 0.91278129518\t\nepoch 32/60\t reward 0.911681730151\t\nepoch 33/60\t reward 0.913388167024\t\nepoch 34/60\t reward 0.91442933321\t\nepoch 35/60\t reward 0.912767903507\t\nepoch 36/60\t reward 0.914162442088\t\nepoch 37/60\t reward 0.914106355011\t\nepoch 38/60\t reward 0.915020836592\t\nepoch 39/60\t reward 0.914432610571\t\nepoch 40/60\t reward 0.913383885324\t\nepoch 41/60\t reward 0.914761800468\t\nepoch 42/60\t reward 0.915102592111\t\nepoch 43/60\t reward 0.914300976396\t\nepoch 44/60\t reward 0.914712825716\t\nepoch 45/60\t reward 0.915158033371\t\nepoch 46/60\t reward 0.915562510192\t\nepoch 47/60\t reward 0.916003290117\t\nepoch 48/60\t reward 0.914701936245\t\nepoch 49/60\t reward 0.914606919289\t\nepoch 50/60\t reward 0.914833345711\t\nepoch 51/60\t reward 0.915317614973\t\nepoch 52/60\t reward 0.915001612902\t\nepoch 53/60\t reward 0.916046698689\t\nepoch 54/60\t reward 0.914928715527\t\nepoch 55/60\t reward 0.916261955798\t\nepoch 56/60\t reward 0.915933358669\t\nepoch 57/60\t reward 0.916869821548\t\nepoch 58/60\t reward 0.916133842468\t\nepoch 59/60\t reward 0.915980260074\t\nepoch 60/60\t reward 0.915898680389\t\n==> Test\n--- -------- -------\nNo. Video F-score\n1 video_10 26.5%\n2 video_12 26.7%\n3 video_16 28.6%\n4 video_17 32.7%\n5 video_2 28.6%\n--- -------- -------\nAverage F-score 28.6%\nFinished. Total elapsed time (h:m:s): 0:14:19\nModel saved to log/summe-split2/model_epoch60.pth.tar\n"
],
[
"!python parse_log.py -p log/summe-split2/log_train.txt",
"_____no_output_____"
],
[
"!python parse_json.py -p log/summe-split2/rewards.json -i 5",
"_____no_output_____"
],
[
"%cd /content/drive/MyDrive/Colab Notebooks/CS106 - AI/SumMe/temp_video",
"/content/drive/MyDrive/Colab Notebooks/CS106 - AI/SumMe/temp_video\n"
],
[
"!sh videos2frames.sh",
"_____no_output_____"
]
],
[
[
"# **TVSUM**",
"_____no_output_____"
],
[
"# R_div + R_rep",
"_____no_output_____"
]
],
[
[
"!python create_split.py -d datasets/eccv16_dataset_tvsum_google_pool5.h5 --save-dir datasets --save-name tvsum_splits --num-splits 5",
"==========\nArgs:Namespace(dataset='datasets/eccv16_dataset_tvsum_google_pool5.h5', num_splits=5, save_dir='datasets', save_name='tvsum_splits2', train_percent=0.8)\n==========\nGoal: randomly split data for 5 times, 80.0% for training and the rest for testing\nLoading dataset from datasets/eccv16_dataset_tvsum_google_pool5.h5\nSplit breakdown: # total videos 50. # train videos 40. # test videos 10\nSplits saved to datasets/tvsum_splits2.json\n"
],
[
"!python main.py -d datasets/eccv16_dataset_tvsum_google_pool5.h5 -s datasets/tvsum_splits.json -m tvsum --gpu 0 --save-dir log/tvsum-split0 --split-id 0 --verbose",
"==========\nArgs:Namespace(beta=0.01, dataset='datasets/eccv16_dataset_tvsum_google_pool5.h5', evaluate=False, gamma=0.1, gpu='0', hidden_dim=256, input_dim=1024, lr=1e-05, max_epoch=60, metric='tvsum', num_episode=5, num_layers=1, resume='', rnn_cell='lstm', save_dir='log/tvsum-split0', save_results=False, seed=1, split='datasets/tvsum_splits.json', split_id=0, stepsize=30, use_cpu=False, verbose=True, weight_decay=1e-05)\n==========\nCurrently using CPU\nInitialize dataset datasets/eccv16_dataset_tvsum_google_pool5.h5\n# total videos 50. # train videos 40. # test videos 10\nInitialize model\nModel size: 2.62605M\n==> Start training\n/usr/local/lib/python2.7/dist-packages/torch/nn/functional.py:1351: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\nepoch 1/60\t reward 0.912426979244\t\nepoch 2/60\t reward 0.911523909569\t\nepoch 3/60\t reward 0.911415727437\t\nepoch 4/60\t reward 0.911149952412\t\nepoch 5/60\t reward 0.911935922503\t\nepoch 6/60\t reward 0.912045218647\t\nepoch 7/60\t reward 0.911045377254\t\nepoch 8/60\t reward 0.911129715145\t\nepoch 9/60\t reward 0.910419083238\t\nepoch 10/60\t reward 0.910945565104\t\nepoch 11/60\t reward 0.9112701267\t\nepoch 12/60\t reward 0.911287863851\t\nepoch 13/60\t reward 0.910884581208\t\nepoch 14/60\t reward 0.911598317623\t\nepoch 15/60\t reward 0.911288600266\t\nepoch 16/60\t reward 0.911055259705\t\nepoch 17/60\t reward 0.911009218693\t\nepoch 18/60\t reward 0.911579205096\t\nepoch 19/60\t reward 0.911206223965\t\nepoch 20/60\t reward 0.910255340338\t\nepoch 21/60\t reward 0.911314222813\t\nepoch 22/60\t reward 0.911029625535\t\nepoch 23/60\t reward 0.911669132113\t\nepoch 24/60\t reward 0.91104741782\t\nepoch 25/60\t reward 0.911245469153\t\nepoch 26/60\t reward 0.91143083334\t\nepoch 27/60\t reward 0.912107397914\t\nepoch 28/60\t reward 0.911167498231\t\nepoch 29/60\t reward 0.911527051926\t\nepoch 30/60\t reward 0.911687725782\t\nepoch 31/60\t reward 0.911764424741\t\nepoch 32/60\t reward 0.911718553603\t\nepoch 33/60\t reward 0.9124785164\t\nepoch 34/60\t reward 0.912517130375\t\nepoch 35/60\t reward 0.912133946419\t\nepoch 36/60\t reward 0.911699423194\t\nepoch 37/60\t reward 0.912683994174\t\nepoch 38/60\t reward 0.912411473989\t\nepoch 39/60\t reward 0.912684269845\t\nepoch 40/60\t reward 0.91193816483\t\nepoch 41/60\t reward 0.913155402839\t\nepoch 42/60\t reward 0.912854163051\t\nepoch 43/60\t reward 0.912637076676\t\nepoch 44/60\t reward 0.913611509204\t\nepoch 45/60\t reward 0.913408909738\t\nepoch 46/60\t reward 0.913063324392\t\nepoch 47/60\t reward 0.913132035136\t\nepoch 48/60\t reward 0.91305821836\t\nepoch 49/60\t reward 0.913463102579\t\nepoch 50/60\t reward 0.91340709269\t\nepoch 51/60\t reward 0.913534905016\t\nepoch 52/60\t reward 0.913348135054\t\nepoch 53/60\t reward 0.913386230171\t\nepoch 54/60\t reward 0.914196991622\t\nepoch 55/60\t reward 0.913770130277\t\nepoch 56/60\t reward 0.913761564493\t\nepoch 57/60\t reward 0.913795455694\t\nepoch 58/60\t reward 0.913671698272\t\nepoch 59/60\t reward 0.91403110832\t\nepoch 60/60\t reward 0.914407488108\t\n==> Test\n--- -------- -------\nNo. Video F-score\n1 video_16 62.1%\n2 video_3 54.3%\n3 video_30 48.8%\n4 video_36 63.5%\n5 video_41 77.9%\n6 video_45 72.7%\n7 video_46 60.3%\n8 video_48 50.4%\n9 video_49 48.4%\n10 video_7 53.9%\n--- -------- -------\nAverage F-score 59.2%\nFinished. Total elapsed time (h:m:s): 0:44:01\nModel saved to log/tvsum-split0/model_epoch60.pth.tar\n"
],
[
"!python main.py -d datasets/eccv16_dataset_tvsum_google_pool5.h5 -s datasets/tvsum_splits.json -m tvsum --gpu 0 --save-dir log/tvsum-split0 --split-id 0 --evaluate --resume log/tvsum-split0/model_epoch60.pth.tar --verbose --save-results",
"==========\nArgs:Namespace(beta=0.01, dataset='datasets/eccv16_dataset_tvsum_google_pool5.h5', evaluate=True, gamma=0.1, gpu='0', hidden_dim=256, input_dim=1024, lr=1e-05, max_epoch=60, metric='tvsum', num_episode=5, num_layers=1, resume='log/tvsum-split0/model_epoch60.pth.tar', rnn_cell='lstm', save_dir='log/tvsum-split0', save_results=True, seed=1, split='datasets/tvsum_splits.json', split_id=0, stepsize=30, use_cpu=False, verbose=True, weight_decay=1e-05)\n==========\nCurrently using CPU\nInitialize dataset datasets/eccv16_dataset_tvsum_google_pool5.h5\n# total videos 50. # train videos 40. # test videos 10\nInitialize model\nModel size: 2.62605M\nLoading checkpoint from 'log/tvsum-split0/model_epoch60.pth.tar'\nEvaluate only\n==> Test\n/usr/local/lib/python2.7/dist-packages/torch/nn/functional.py:1351: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\n--- -------- -------\nNo. Video F-score\n1 video_16 62.1%\n2 video_3 54.3%\n3 video_30 48.8%\n4 video_36 63.5%\n5 video_41 77.9%\n6 video_45 72.7%\n7 video_46 60.3%\n8 video_48 50.4%\n9 video_49 48.4%\n10 video_7 53.9%\n--- -------- -------\nAverage F-score 59.2%\n"
],
[
"!python parse_log.py -p log/tvsum-split0/log_train.txt",
"_____no_output_____"
],
[
"!python parse_json.py -p log/tvsum-split0/rewards.json -i 24",
"_____no_output_____"
]
],
[
[
"# R_div",
"_____no_output_____"
]
],
[
[
"!python create_split.py -d datasets/eccv16_dataset_tvsum_google_pool5.h5 --save-dir datasets --save-name tvsum_splits1 --num-splits 5",
"==========\nArgs:Namespace(dataset='datasets/eccv16_dataset_tvsum_google_pool5.h5', num_splits=5, save_dir='datasets', save_name='tvsum_splits1', train_percent=0.8)\n==========\nGoal: randomly split data for 5 times, 80.0% for training and the rest for testing\nLoading dataset from datasets/eccv16_dataset_tvsum_google_pool5.h5\nSplit breakdown: # total videos 50. # train videos 40. # test videos 10\nSplits saved to datasets/tvsum_splits1.json\n"
],
[
"!python main_div.py -d datasets/eccv16_dataset_tvsum_google_pool5.h5 -s datasets/tvsum_splits1.json -m tvsum --gpu 0 --save-dir log/tvsum-split1 --split-id 0 --verbose",
"==========\nArgs:Namespace(beta=0.01, dataset='datasets/eccv16_dataset_tvsum_google_pool5.h5', evaluate=False, gamma=0.1, gpu='0', hidden_dim=256, input_dim=1024, lr=1e-05, max_epoch=60, metric='tvsum', num_episode=5, num_layers=1, resume='', rnn_cell='lstm', save_dir='log/tvsum-split1', save_results=False, seed=1, split='datasets/tvsum_splits1.json', split_id=0, stepsize=30, use_cpu=False, verbose=True, weight_decay=1e-05)\n==========\nCurrently using CPU\nInitialize dataset datasets/eccv16_dataset_tvsum_google_pool5.h5\n# total videos 50. # train videos 40. # test videos 10\nInitialize model\nModel size: 2.62605M\n==> Start training\n/usr/local/lib/python2.7/dist-packages/torch/nn/functional.py:1351: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\nepoch 1/60\t reward 0.938043119609\t\nepoch 2/60\t reward 0.938400234282\t\nepoch 3/60\t reward 0.938253195882\t\nepoch 4/60\t reward 0.938583692908\t\nepoch 5/60\t reward 0.938292444646\t\nepoch 6/60\t reward 0.938257283866\t\nepoch 7/60\t reward 0.938186262846\t\nepoch 8/60\t reward 0.93813426584\t\nepoch 9/60\t reward 0.938209501207\t\nepoch 10/60\t reward 0.938341689706\t\nepoch 11/60\t reward 0.938333625495\t\nepoch 12/60\t reward 0.93800614357\t\nepoch 13/60\t reward 0.93828440845\t\nepoch 14/60\t reward 0.938267458379\t\nepoch 15/60\t reward 0.938313495517\t\nepoch 16/60\t reward 0.938284590244\t\nepoch 17/60\t reward 0.938324292004\t\nepoch 18/60\t reward 0.938311501741\t\nepoch 19/60\t reward 0.938363215923\t\nepoch 20/60\t reward 0.938105996251\t\nepoch 21/60\t reward 0.938195751905\t\nepoch 22/60\t reward 0.93807068944\t\nepoch 23/60\t reward 0.93816544652\t\nepoch 24/60\t reward 0.93843480587\t\nepoch 25/60\t reward 0.938192577362\t\nepoch 26/60\t reward 0.938053421378\t\nepoch 27/60\t reward 0.938410209715\t\nepoch 28/60\t reward 0.938272690475\t\nepoch 29/60\t reward 0.938725028932\t\nepoch 30/60\t reward 0.938516854644\t\nepoch 31/60\t reward 0.938319037557\t\nepoch 32/60\t reward 0.938309394121\t\nepoch 33/60\t reward 0.938261999488\t\nepoch 34/60\t reward 0.938490971029\t\nepoch 35/60\t reward 0.93820677191\t\nepoch 36/60\t reward 0.938336056173\t\nepoch 37/60\t reward 0.938365980387\t\nepoch 38/60\t reward 0.93804390192\t\nepoch 39/60\t reward 0.938415810764\t\nepoch 40/60\t reward 0.938093420863\t\nepoch 41/60\t reward 0.938333251774\t\nepoch 42/60\t reward 0.938402777612\t\nepoch 43/60\t reward 0.938338042796\t\nepoch 44/60\t reward 0.938449821472\t\nepoch 45/60\t reward 0.938419136405\t\nepoch 46/60\t reward 0.938232370615\t\nepoch 47/60\t reward 0.938380878568\t\nepoch 48/60\t reward 0.938436509669\t\nepoch 49/60\t reward 0.938377420902\t\nepoch 50/60\t reward 0.938576954901\t\nepoch 51/60\t reward 0.93829452008\t\nepoch 52/60\t reward 0.938318389654\t\nepoch 53/60\t reward 0.93827870667\t\nepoch 54/60\t reward 0.938456414938\t\nepoch 55/60\t reward 0.93847063303\t\nepoch 56/60\t reward 0.938471703529\t\nepoch 57/60\t reward 0.938195561171\t\nepoch 58/60\t reward 0.938435505033\t\nepoch 59/60\t reward 0.938518308103\t\nepoch 60/60\t reward 0.938437383473\t\n==> Test\n--- -------- -------\nNo. Video F-score\n1 video_11 62.6%\n2 video_16 62.1%\n3 video_18 69.0%\n4 video_22 57.4%\n5 video_3 54.6%\n6 video_37 45.2%\n7 video_38 48.9%\n8 video_44 71.4%\n9 video_6 62.3%\n10 video_8 49.6%\n--- -------- -------\nAverage F-score 58.3%\nFinished. Total elapsed time (h:m:s): 0:38:53\nModel saved to log/tvsum-split1/model_epoch60.pth.tar\n"
],
[
"!python parse_log.py -p log/tvsum-split1/log_train.txt",
"_____no_output_____"
],
[
"!python parse_json.py -p log/tvsum-split1/rewards.json -i 10",
"_____no_output_____"
]
],
[
[
"# R_rep",
"_____no_output_____"
]
],
[
[
"!python create_split.py -d datasets/eccv16_dataset_tvsum_google_pool5.h5 --save-dir datasets --save-name tvsum_splits2 --num-splits 5",
"==========\nArgs:Namespace(dataset='datasets/eccv16_dataset_tvsum_google_pool5.h5', num_splits=5, save_dir='datasets', save_name='tvsum_splits2', train_percent=0.8)\n==========\nGoal: randomly split data for 5 times, 80.0% for training and the rest for testing\nLoading dataset from datasets/eccv16_dataset_tvsum_google_pool5.h5\nSplit breakdown: # total videos 50. # train videos 40. # test videos 10\nSplits saved to datasets/tvsum_splits2.json\n"
],
[
"!python main_rep.py -d datasets/eccv16_dataset_tvsum_google_pool5.h5 -s datasets/tvsum_splits2.json -m tvsum --gpu 0 --save-dir log/tvsum-split2 --split-id 0 --verbose",
"==========\nArgs:Namespace(beta=0.01, dataset='datasets/eccv16_dataset_tvsum_google_pool5.h5', evaluate=False, gamma=0.1, gpu='0', hidden_dim=256, input_dim=1024, lr=1e-05, max_epoch=60, metric='tvsum', num_episode=5, num_layers=1, resume='', rnn_cell='lstm', save_dir='log/tvsum-split2', save_results=False, seed=1, split='datasets/tvsum_splits2.json', split_id=0, stepsize=30, use_cpu=False, verbose=True, weight_decay=1e-05)\n==========\nCurrently using CPU\nInitialize dataset datasets/eccv16_dataset_tvsum_google_pool5.h5\n# total videos 50. # train videos 40. # test videos 10\nInitialize model\nModel size: 2.62605M\n==> Start training\n/usr/local/lib/python2.7/dist-packages/torch/nn/functional.py:1351: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\nepoch 1/60\t reward 0.888394364119\t\nepoch 2/60\t reward 0.887358208299\t\nepoch 3/60\t reward 0.886654397845\t\nepoch 4/60\t reward 0.888023816347\t\nepoch 5/60\t reward 0.888429674208\t\nepoch 6/60\t reward 0.887696173787\t\nepoch 7/60\t reward 0.886315981448\t\nepoch 8/60\t reward 0.886596548557\t\nepoch 9/60\t reward 0.886581307054\t\nepoch 10/60\t reward 0.887606523633\t\nepoch 11/60\t reward 0.887472450733\t\nepoch 12/60\t reward 0.887350874841\t\nepoch 13/60\t reward 0.887763604522\t\nepoch 14/60\t reward 0.888605780005\t\nepoch 15/60\t reward 0.886457652748\t\nepoch 16/60\t reward 0.8876038149\t\nepoch 17/60\t reward 0.889096388221\t\nepoch 18/60\t reward 0.887394300103\t\nepoch 19/60\t reward 0.887724066675\t\nepoch 20/60\t reward 0.889242350161\t\nepoch 21/60\t reward 0.887514548004\t\nepoch 22/60\t reward 0.889729852974\t\nepoch 23/60\t reward 0.887274071276\t\nepoch 24/60\t reward 0.888711953759\t\nepoch 25/60\t reward 0.888701313436\t\nepoch 26/60\t reward 0.889578024149\t\nepoch 27/60\t reward 0.889856281579\t\nepoch 28/60\t reward 0.889950846434\t\nepoch 29/60\t reward 0.890381496847\t\nepoch 30/60\t reward 0.889729915559\t\nepoch 31/60\t reward 0.889845364392\t\nepoch 32/60\t reward 0.890871754289\t\nepoch 33/60\t reward 0.890853833556\t\nepoch 34/60\t reward 0.890351344943\t\nepoch 35/60\t reward 0.891536044478\t\nepoch 36/60\t reward 0.891479934156\t\nepoch 37/60\t reward 0.890921652019\t\nepoch 38/60\t reward 0.892039113641\t\nepoch 39/60\t reward 0.892545349598\t\nepoch 40/60\t reward 0.892459248006\t\nepoch 41/60\t reward 0.893760026693\t\nepoch 42/60\t reward 0.893153256178\t\nepoch 43/60\t reward 0.893123142719\t\nepoch 44/60\t reward 0.893671055436\t\nepoch 45/60\t reward 0.894869040251\t\nepoch 46/60\t reward 0.893560576737\t\nepoch 47/60\t reward 0.894436597824\t\nepoch 48/60\t reward 0.893951062858\t\nepoch 49/60\t reward 0.893668349087\t\nepoch 50/60\t reward 0.893982833028\t\nepoch 51/60\t reward 0.894911154509\t\nepoch 52/60\t reward 0.895653438568\t\nepoch 53/60\t reward 0.895687381625\t\nepoch 54/60\t reward 0.895343169868\t\nepoch 55/60\t reward 0.894854463339\t\nepoch 56/60\t reward 0.896412267089\t\nepoch 57/60\t reward 0.897174422741\t\nepoch 58/60\t reward 0.896814547479\t\nepoch 59/60\t reward 0.897635444403\t\nepoch 60/60\t reward 0.898057544231\t\n==> Test\n--- -------- -------\nNo. Video F-score\n1 video_10 41.9%\n2 video_11 62.6%\n3 video_14 40.2%\n4 video_24 53.5%\n5 video_34 50.6%\n6 video_38 41.4%\n7 video_4 60.8%\n8 video_45 72.7%\n9 video_6 62.3%\n10 video_8 51.6%\n--- -------- -------\nAverage F-score 53.8%\nFinished. Total elapsed time (h:m:s): 0:44:33\nModel saved to log/tvsum-split2/model_epoch60.pth.tar\n"
],
[
"!python parse_log.py -p log/tvsum-split2/log_train.txt",
"_____no_output_____"
],
[
"!python parse_json.py -p log/tvsum-split2/rewards.json -i 21",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
]
|
ec65bce78a08e9b43030f6a4d6f40641df80494c | 16,029 | ipynb | Jupyter Notebook | notebooks/calculate_training_time.ipynb | se-jaeger/data-imputation-paper | 498d2d871302d917f58ecf6a9576e3a3451c5faa | [
"Apache-2.0"
]
| 2 | 2022-01-18T09:59:01.000Z | 2022-02-02T10:01:45.000Z | notebooks/calculate_training_time.ipynb | se-jaeger/data-imputation-paper | 498d2d871302d917f58ecf6a9576e3a3451c5faa | [
"Apache-2.0"
]
| null | null | null | notebooks/calculate_training_time.ipynb | se-jaeger/data-imputation-paper | 498d2d871302d917f58ecf6a9576e3a3451c5faa | [
"Apache-2.0"
]
| null | null | null | 34.32334 | 684 | 0.479132 | [
[
[
"import glob\nimport json\n\nimport pandas as pd\n\nfrom pathlib import Path\n\nfrom data_imputation_paper.experiment import _recursive_split",
"_____no_output_____"
],
[
"glob_a = str(Path(f\"../data/experiments/fully_observed\") / \"**\" / \"MCAR\" / \"**\" / \"single_all\" / \"elapsed_train_time_*.json\")\nglob_b = str(Path(f\"../data/experiments/corrupted\") / \"**\" / \"MCAR\" / \"**\" / \"single_all\" / \"elapsed_train_time_*.json\")\ntraining_time_files = [*glob.glob(glob_a, recursive=True), *glob.glob(glob_b, recursive=True)]",
"_____no_output_____"
],
[
"column_names = [\"imputer\", \"task\", \"type\", \"fraction\"]\n\ndfs = []\n\nfor path in training_time_files:\n df = pd.read_json(path, orient=\"index\").T.reset_index(drop=True)\n df[column_names] = _recursive_split(path)[4:-2]\n dfs.append(df)\n\ntraining_time_all = pd.concat(dfs)",
"_____no_output_____"
],
[
"training_time = training_time_all.drop(\"std\", axis=1)\ntraining_time = training_time.rename(columns={\"mean\": \"training_time\"})\ntraining_time = training_time.replace({\n \"ModeImputer\": \"Mean/Mode\",\n \"KNNImputer\": \"$k$-NN\",\n \"ForestImputer\": \"Random Forest\",\n \"AutoKerasImputer\": \"Discriminative DL\",\n \"VAEImputer\": \"VAE\",\n \"GAINImputer\": \"GAIN\" \n})",
"_____no_output_____"
],
[
"# First calculate mean and relative std for each imputer and task\ntraining_time_grouped = training_time.groupby([\"imputer\", \"task\"]).agg([\"mean\", \"std\"])\ntraining_time_grouped.columns = [\"mean\", \"std\"]\ntraining_time_grouped[\"rel std\"] = training_time_grouped.loc[:, \"std\"] / training_time_grouped.loc[:, \"mean\"]\n\n# Then average over all data sets. This leads to relativ sd that is less dependant on the data set size.\ntraining_time_grouped = training_time_grouped.groupby(\"imputer\").agg([\"mean\", \"std\"])\ntraining_time_grouped = training_time_grouped.loc[:, [(\"mean\", \"mean\"), (\"rel std\", \"mean\")]]",
"_____no_output_____"
]
],
[
[
"# Predict Time",
"_____no_output_____"
]
],
[
[
"predict_time_files = glob.glob(str(Path(f\"../data/experiments/time_measure_predict\") / \"**\" / \"MCAR\" / \"**\" / \"single_single\" / \"elapsed_train_time_*.json\"), recursive=True)",
"_____no_output_____"
],
[
"column_names = [\"imputer\", \"task\", \"type\", \"fraction\"]\n\ndfs = []\n\nfor path in predict_time_files:\n df = pd.read_json(path, orient=\"index\").T.reset_index(drop=True)\n df[column_names] = _recursive_split(path)[4:-2]\n dfs.append(df)\n\npredict_time_all = pd.concat(dfs)",
"_____no_output_____"
],
[
"predict_time = predict_time_all.drop(\"std\", axis=1)\npredict_time = predict_time.rename(columns={\"mean\": \"predict_time\"})\npredict_time = predict_time.replace({\n \"ModeImputer\": \"Mean/Mode\",\n \"KNNImputer\": \"$k$-NN\",\n \"ForestImputer\": \"Random Forest\",\n \"AutoKerasImputer\": \"Discriminative DL\",\n \"VAEImputer\": \"VAE\",\n \"GAINImputer\": \"GAIN\" \n})",
"_____no_output_____"
],
[
"# First calculate mean and relative std for each imputer and task\npredict_time_grouped = predict_time.groupby([\"imputer\", \"task\"]).agg([\"mean\", \"std\"])\npredict_time_grouped.columns = [\"mean\", \"std\"]\npredict_time_grouped[\"rel std\"] = predict_time_grouped.loc[:, \"std\"] / predict_time_grouped.loc[:, \"mean\"]\n\n# Then average over all data sets. This leads to relativ sd that is less dependant on the data set size.\npredict_time_grouped = predict_time_grouped.groupby(\"imputer\").agg([\"mean\", \"std\"])\npredict_time_grouped = predict_time_grouped.loc[:, [(\"mean\", \"mean\"), (\"rel std\", \"mean\")]]\npredict_time_grouped = predict_time_grouped.loc[[\"Mean/Mode\", \"$k$-NN\", \"Random Forest\", \"Discriminative DL\", \"VAE\", \"GAIN\"],:].reset_index()",
"_____no_output_____"
],
[
"predict_time_grouped",
"_____no_output_____"
]
],
[
[
"# Latex Table",
"_____no_output_____"
]
],
[
[
"table_latex = training_time_grouped.loc[[\"Mean/Mode\", \"$k$-NN\", \"Random Forest\", \"Discriminative DL\", \"VAE\", \"GAIN\"],:].reset_index()\ntable_latex[(\"a\", \"mean\")] = predict_time_grouped[(\"mean\", \"mean\")]\ntable_latex[(\"a\", \"std\")] = predict_time_grouped[(\"rel std\", \"mean\")]\ntable_latex = pd.DataFrame(table_latex.values, columns=pd.MultiIndex.from_tuples([(\"Imputation Method\", \"\"), (\"Training\", \"Mean Duration\"), (\"Training\", \"Rel. SD\"), (\"Inference\", \"Mean Duration\"), (\"Inference\", \"Rel. SD\")]))\ntable_latex",
"_____no_output_____"
],
[
"print(\n table_latex.to_latex(\n caption=\"Training and inference duration for each imputation method in seconds. We use the wall-time to measure the durations for training including hyperparameter optimization and inference for MCAR missingness pattern and all missingness fractions shown in Table TODO. Because training and inference durations depend heavily on the data set, we first average all measurements for imputation method and data set combinations and calculate the standard deviation relatives relative to there mean durations. Second, we average both mean durations and relative standard devaition for the imputation methods. Abbreviations: Rel. SD means Relative Standard Deviation.\",\n label=\"tab:time\",\n index=False,\n escape=False\n )\n)",
"\\begin{table}\n\\centering\n\\caption{Training and inference duration for each imputation method in seconds. We use the wall-time to measure the durations for training including hyperparameter optimization and inference for MCAR missingness pattern and all missingness fractions shown in Table TODO. Because training and inference durations depend heavily on the data set, we first average all measurements for imputation method and data set combinations and calculate the standard deviation relatives relative to there mean durations. Second, we average both mean durations and relative standard devaition for the imputation methods. Abbreviations: Rel. SD means Relative Standard Deviation.}\n\\label{tab:time}\n\\begin{tabular}{lllll}\n\\toprule\nImputation Method & \\multicolumn{2}{l}{Training} & \\multicolumn{2}{l}{Inference} \\\\\n & Mean Duration & Rel. SD & Mean Duration & Rel. SD \\\\\n\\midrule\n Mean/Mode & 0.005277 & 0.550878 & 0.029195 & 0.171139 \\\\\n $k$-NN & 41.204365 & 0.253716 & 7.01833 & 0.602026 \\\\\n Random Forest & 226.076551 & 0.119295 & 44.047614 & 0.236052 \\\\\nDiscriminative DL & 6275.019244 & 0.40505 & 440.388738 & 0.210621 \\\\\n VAE & 71.095282 & 0.098795 & 11.21483 & 0.084604 \\\\\n GAIN & 878.058286 & 0.311553 & 137.96578 & 0.083426 \\\\\n\\bottomrule\n\\end{tabular}\n\\end{table}\n\n"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
]
|
ec65c484944965cd0046313915ba283b756792a7 | 6,533 | ipynb | Jupyter Notebook | HeroesHomework.ipynb | mjenkins15/JenkinsPandas_Challenge | 68f07745797abee8452f9438ba59db6ec238754b | [
"ADSL"
]
| null | null | null | HeroesHomework.ipynb | mjenkins15/JenkinsPandas_Challenge | 68f07745797abee8452f9438ba59db6ec238754b | [
"ADSL"
]
| null | null | null | HeroesHomework.ipynb | mjenkins15/JenkinsPandas_Challenge | 68f07745797abee8452f9438ba59db6ec238754b | [
"ADSL"
]
| null | null | null | 35.699454 | 176 | 0.73963 | [
[
[
"empty"
]
]
]
| [
"empty"
]
| [
[
"empty"
]
]
|
ec65c89735f6725d517950ab6ad2625f1ae5ddb3 | 1,157 | ipynb | Jupyter Notebook | nbcollection_tests/ci/tools/template/quick-build-collection/collection_one/quick-build/Quickbuild-Notebook.ipynb | eteq/nbcollection-1 | cf1266b38bc6e6f2bcab64ab93feaa3034b552b2 | [
"BSD-3-Clause"
]
| null | null | null | nbcollection_tests/ci/tools/template/quick-build-collection/collection_one/quick-build/Quickbuild-Notebook.ipynb | eteq/nbcollection-1 | cf1266b38bc6e6f2bcab64ab93feaa3034b552b2 | [
"BSD-3-Clause"
]
| 1 | 2021-06-14T15:28:10.000Z | 2021-06-14T15:28:10.000Z | nbcollection_tests/ci/tools/template/quick-build-collection/collection_one/quick-build/Quickbuild-Notebook.ipynb | eteq/nbcollection-1 | cf1266b38bc6e6f2bcab64ab93feaa3034b552b2 | [
"BSD-3-Clause"
]
| 2 | 2021-04-13T17:10:48.000Z | 2021-09-09T21:37:23.000Z | 20.298246 | 163 | 0.553155 | [
[
[
"# Quickbuild Notebook\n\nThis notebook exists to provide a quick build option for nbcollection-ci testing. It downloads https://jbcurtin.io/ and parses the content for <title> text\"",
"_____no_output_____"
]
],
[
[
"import bs4\nimport requests",
"_____no_output_____"
],
[
"url = 'https://jbcurtin.io'\nresponse = requests.get(url)\nsoup = bs4.BeautifulSoup(response.content)\nsoup.find('title').text",
"_____no_output_____"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code"
]
]
|
ec65d7abe565c91b0065dafaf79690ec6e15d689 | 7,269 | ipynb | Jupyter Notebook | Topic 2- Python Data Structures/2.5 - Test Your Knowledge.ipynb | KaiyeYang-git/Introduction-to-Computer-Science | 3f820854acb86f1abeda3f8ef308b6602a512410 | [
"Apache-2.0"
]
| null | null | null | Topic 2- Python Data Structures/2.5 - Test Your Knowledge.ipynb | KaiyeYang-git/Introduction-to-Computer-Science | 3f820854acb86f1abeda3f8ef308b6602a512410 | [
"Apache-2.0"
]
| null | null | null | Topic 2- Python Data Structures/2.5 - Test Your Knowledge.ipynb | KaiyeYang-git/Introduction-to-Computer-Science | 3f820854acb86f1abeda3f8ef308b6602a512410 | [
"Apache-2.0"
]
| null | null | null | 23.990099 | 452 | 0.546705 | [
[
[
"## Test Your Knowledge\n\nIn the blocks below you will find a range of questions that cover the material we've looked at in previous notebooks. Make sure you can do these tasks before you move on to the next topic, which will cover control-flow in Python. ",
"_____no_output_____"
],
[
"1) Define S to be the string \"abcdefgh\". Write expressions using S and the index operator to return the following strings:\n\na) \"a\" <br>\nb) \"c\" <br>\nc) \"e\" <br>",
"_____no_output_____"
]
],
[
[
"#YOUR CODE GOES HERE",
"_____no_output_____"
]
],
[
[
"2) Using the same string S, this time return a sublist that spells out the word \"ace\". You should be able to do this with a single line of code using the indexing operator. ",
"_____no_output_____"
]
],
[
[
"#YOUR CODE GOES HERE",
"_____no_output_____"
]
],
[
[
"3) In the code block below we have defined L to be a list of prices for a new CPU that I would like to buy.\n\na) Print out the minimum and maximum prices without sorting the list first.",
"_____no_output_____"
]
],
[
[
"L = [199.99, 219.99, 220.00, 222.99, 195.99, 210.00]\n\n#YOUR CODE GOES HERE",
"_____no_output_____"
]
],
[
[
"b) Use the built-in sort function to sort the list, and print out the minimum and maximum price using the indexing operator.",
"_____no_output_____"
]
],
[
[
"#YOUR CODE GOES HERE",
"_____no_output_____"
]
],
[
[
"4) Using the built-in len function, and the boolean operators we saw in the previous topic, check if the two strings given in the code block have the same length. Print `True` if they do and `False` otherwise.",
"_____no_output_____"
]
],
[
[
"x = \"Hello\"\ny = \"Goodbye\"\n\n#YOUR CODE GOES HERE",
"_____no_output_____"
]
],
[
[
"5) Given the list in the code block below, calculate the sum of all of the elements, except for the largest and smallest elements. There are multiple ways to solve this problem, try to make your code succinct. ",
"_____no_output_____"
]
],
[
[
"L = [9,5,7,10,6,8,1,3,2,4]\n\n#YOUR CODE GOES HERE",
"_____no_output_____"
]
],
[
[
"6) Given the matrix (list of lists) in the code block below, try to calculate the following and return your answers as a list:\n\na) Row Sums",
"_____no_output_____"
]
],
[
[
"L = [[0,1,2],[3,4,5],[6,7,8]]\n\n#YOUR CODE GOES HERE",
"_____no_output_____"
]
],
[
[
"b) Column Sums",
"_____no_output_____"
]
],
[
[
"#YOUR CODE GOES HERE",
"_____no_output_____"
]
],
[
[
"7) Check if the element \"E\" exists in the tuple defined below:",
"_____no_output_____"
]
],
[
[
"T = (\"A\", \"e\", \"I\", \"O\", \"U\")\n\n#YOUR CODE GOES HERE",
"_____no_output_____"
]
],
[
[
"8) Replace the element \"e\" with the element \"E\" in the data structure below:",
"_____no_output_____"
]
],
[
[
"T = ([\"A\", \"e\", \"I\", \"O\", \"U\"],[\"B\",\"C\"])\n\n#YOUR CODE GOES HERE",
"_____no_output_____"
]
],
[
[
"9) Using the dictionary defined below, calculate the total population of all of the settlements. Then add a new key-value pair to the dictionary with the key \"County Durham\", and the value being that total population. ",
"_____no_output_____"
]
],
[
[
"D = {\n \"Darlington\" : 92363,\n \"Hartlepool\" : 88855,\n \"Stockton-on-Tees\" : 82729,\n \"Durham\" : 47785,\n \"Billingham\" : 35392\n}\n\n#YOUR CODE GOES HERE",
"_____no_output_____"
]
],
[
[
"10) Using the set defined in the code block below, work out which set operator to use to answer the following questions:\n\na) The set of odd numbers that are not prime.",
"_____no_output_____"
]
],
[
[
"odd = {1,3,5,7,9,11,13,15,17,19,21,23,25}\neven = {2,4,6,8,10,12,14,16,18,20,22,24}\nprime = {2,3,5,7,11,13,17,19,23}\n\n#YOUR CODE GOES HERE",
"_____no_output_____"
]
],
[
[
"b) The set of prime numbers that are not odd (try to find multiple ways in which to do this).",
"_____no_output_____"
]
],
[
[
"#YOUR CODE GOES HERE",
"_____no_output_____"
]
],
[
[
"c) All of the numbers between 1 and 25 that are not prime.",
"_____no_output_____"
]
],
[
[
"#YOUR CODE GOES HERE",
"_____no_output_____"
]
],
[
[
"<br>\n\nIf you're not confident with your solutions, you should ask a demonstrator to explain any concepts you don't fully understand. The demonstrators will occasionally drop in to your breakout rooms to see how you are getting on with the tasks, and you should show them your solutions to check if they are correct. Once you've completed this and you're confident you have the correct answers, feel free to move on to the next topic if it is available.",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
]
|
ec65dcbd1ff3b7ec9bdc2fddc07bd64419f9816e | 25,361 | ipynb | Jupyter Notebook | Tutorial-BSSN_in_terms_of_ADM.ipynb | leowerneck/NRPyIGM | f483d6123424fb3e6860dfac4325dd232b223005 | [
"BSD-2-Clause"
]
| null | null | null | Tutorial-BSSN_in_terms_of_ADM.ipynb | leowerneck/NRPyIGM | f483d6123424fb3e6860dfac4325dd232b223005 | [
"BSD-2-Clause"
]
| null | null | null | Tutorial-BSSN_in_terms_of_ADM.ipynb | leowerneck/NRPyIGM | f483d6123424fb3e6860dfac4325dd232b223005 | [
"BSD-2-Clause"
]
| null | null | null | 47.051948 | 528 | 0.557194 | [
[
[
"<script async src=\"https://www.googletagmanager.com/gtag/js?id=UA-59152712-8\"></script>\n<script>\n window.dataLayer = window.dataLayer || [];\n function gtag(){dataLayer.push(arguments);}\n gtag('js', new Date());\n\n gtag('config', 'UA-59152712-8');\n</script>\n\n# ADM Quantities in terms of BSSN Quantities\n## Author: Zach Etienne\n\n[comment]: <> (Abstract: TODO)\n\n**Notebook Status:** <font color='orange'><b> Self-Validated </b></font>\n\n**Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](#code_validation). **Additional validation tests may have been performed, but are as yet, undocumented. (TODO)**\n\n### NRPy+ Source Code for this module: [ADM_in_terms_of_BSSN.py](../edit/BSSN/BSSN_in_terms_of_ADM.py)\n\n## Introduction:\nThis module documents the conversion of ADM variables:\n\n$$\\left\\{\\gamma_{ij}, K_{ij}, \\alpha, \\beta^i\\right\\}$$\n\ninto BSSN variables\n\n$$\\left\\{\\bar{\\gamma}_{i j},\\bar{A}_{i j},\\phi, K, \\bar{\\Lambda}^{i}, \\alpha, \\beta^i, B^i\\right\\},$$ \n\nin the desired curvilinear basis (given by `reference_metric::CoordSystem`). Then it rescales the resulting BSSNCurvilinear variables (as defined in [the covariant BSSN formulation tutorial](Tutorial-BSSN_formulation.ipynb)) into the form needed for solving Einstein's equations with the BSSN formulation:\n\n$$\\left\\{h_{i j},a_{i j},\\phi, K, \\lambda^{i}, \\alpha, \\mathcal{V}^i, \\mathcal{B}^i\\right\\}.$$",
"_____no_output_____"
],
[
"# Table of Contents\n$$\\label{toc}$$ \n\nThis notebook is organized as follows\n\n1. [Step 1](#initializenrpy): Initialize core Python/NRPy+ modules; set desired output BSSN Curvilinear coordinate system set to Spherical\n1. [Step 2](#adm2bssn): Perform the ADM-to-BSSN conversion for 3-metric, extrinsic curvature, and gauge quantities\n 1. [Step 2.a](#adm2bssn_gamma): Convert ADM $\\gamma_{ij}$ to BSSN $\\bar{\\gamma}_{ij}$; rescale to get $h_{ij}$\n 1. [Step 2.b](#admexcurv_convert): Convert the ADM extrinsic curvature $K_{ij}$ to BSSN $\\bar{A}_{ij}$ and $K$; rescale to get $a_{ij}$, $K$.\n 1. [Step 2.c](#lambda): Define $\\bar{\\Lambda}^i$\n 1. [Step 2.d](#conformal): Define the conformal factor variable `cf`\n1. [Step 3](#code_validation): Code Validation against `BSSN.BSSN_in_terms_of_ADM` NRPy+ module\n1. [Step 4](#latex_pdf_output): Output this notebook to $\\LaTeX$-formatted PDF file",
"_____no_output_____"
],
[
"<a id='initializenrpy'></a>\n\n# Step 1: Initialize core Python/NRPy+ modules \\[Back to [top](#toc)\\]\n$$\\label{initializenrpy}$$\n",
"_____no_output_____"
]
],
[
[
"# Step 1: Import needed core NRPy+ modules\nfrom outputC import * # NRPy+: Core C code output module\nimport NRPy_param_funcs as par # NRPy+: Parameter interface\nimport indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support\nimport reference_metric as rfm # NRPy+: Reference metric support\nimport sys # Standard Python modules for multiplatform OS-level functions\nimport BSSN.BSSN_quantities as Bq # NRPy+: This module depends on the parameter EvolvedConformalFactor_cf,\n # which is defined in BSSN.BSSN_quantities\n\n# Step 1.a: Set DIM=3, as we're using a 3+1 decomposition of Einstein's equations\nDIM=3",
"_____no_output_____"
]
],
[
[
"<a id='adm2bssn'></a>\n\n# Step 2: Perform the ADM-to-BSSN conversion for 3-metric, extrinsic curvature, and gauge quantities \\[Back to [top](#toc)\\]\n$$\\label{adm2bssn}$$\n\nHere we convert ADM quantities to their BSSN Curvilinear counterparts.",
"_____no_output_____"
],
[
"<a id='adm2bssn_gamma'></a>\n\n## Step 2.a: Convert ADM $\\gamma_{ij}$ to BSSN $\\bar{\\gamma}_{ij}$; rescale to get $h_{ij}$ \\[Back to [top](#toc)\\]\n$$\\label{adm2bssn_gamma}$$\n\nWe have (Eqs. 2 and 3 of [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)):\n$$\n\\bar{\\gamma}_{i j} = \\left(\\frac{\\bar{\\gamma}}{\\gamma}\\right)^{1/3} \\gamma_{ij},\n$$\nwhere we always make the choice $\\bar{\\gamma} = \\hat{\\gamma}$.\n\nAfter constructing $\\bar{\\gamma}_{ij}$, we rescale to get $h_{ij}$ according to the prescription described in the [the covariant BSSN formulation tutorial](Tutorial-BSSN_formulation.ipynb) (also [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)):\n\n$$\nh_{ij} = (\\bar{\\gamma}_{ij} - \\hat{\\gamma}_{ij})/\\text{ReDD[i][j]}.\n$$",
"_____no_output_____"
]
],
[
[
"# Step 2: All ADM quantities were input into this function in the Spherical or Cartesian\n# basis, as functions of r,th,ph or x,y,z, respectively. In Steps 1 and 2 above,\n# we converted them to the xx0,xx1,xx2 basis, and as functions of xx0,xx1,xx2.\n# Here we convert ADM quantities to their BSSN Curvilinear counterparts:\n\n# Step 2.a: Convert ADM $\\gamma_{ij}$ to BSSN $\\bar{gamma}_{ij}$:\n# We have (Eqs. 2 and 3 of [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)):\ndef gammabarDD_hDD(gammaDD):\n global gammabarDD,hDD\n if rfm.have_already_called_reference_metric_function == False:\n print(\"BSSN.BSSN_in_terms_of_ADM.hDD_given_ADM(): Must call reference_metric() first!\")\n sys.exit(1)\n # \\bar{gamma}_{ij} = (\\frac{\\bar{gamma}}{gamma})^{1/3}*gamma_{ij}.\n gammaUU, gammaDET = ixp.symm_matrix_inverter3x3(gammaDD)\n gammabarDD = ixp.zerorank2()\n hDD = ixp.zerorank2()\n for i in range(DIM):\n for j in range(DIM):\n gammabarDD[i][j] = (rfm.detgammahat/gammaDET)**(sp.Rational(1,3))*gammaDD[i][j]\n hDD[i][j] = (gammabarDD[i][j] - rfm.ghatDD[i][j]) / rfm.ReDD[i][j]",
"_____no_output_____"
]
],
[
[
"<a id='admexcurv_convert'></a>\n\n## Step 2.b: Convert the ADM extrinsic curvature $K_{ij}$ to BSSN quantities $\\bar{A}_{ij}$ and $K={\\rm tr}(K_{ij})$; rescale $\\bar{A}_{ij}$ to get $a_{ij}$ \\[Back to [top](#toc)\\]\n$$\\label{admexcurv_convert}$$\n\nConvert the ADM extrinsic curvature $K_{ij}$ to the trace-free extrinsic curvature $\\bar{A}_{ij}$, plus the trace of the extrinsic curvature $K$, where (Eq. 3 of [Baumgarte *et al.*](https://arxiv.org/pdf/1211.6632.pdf)):\n\\begin{align}\nK &= \\gamma^{ij} K_{ij} \\\\\n\\bar{A}_{ij} &= \\left(\\frac{\\bar{\\gamma}}{\\gamma}\\right)^{1/3} \\left(K_{ij} - \\frac{1}{3} \\gamma_{ij} K \\right)\n\\end{align}\n\nAfter constructing $\\bar{A}_{ij}$, we rescale to get $a_{ij}$ according to the prescription described in the [the covariant BSSN formulation tutorial](Tutorial-BSSN_formulation.ipynb) (also [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)):\n\n$$\na_{ij} = \\bar{A}_{ij}/\\text{ReDD[i][j]}.\n$$",
"_____no_output_____"
]
],
[
[
"# Step 2.b: Convert the extrinsic curvature K_{ij} to the trace-free extrinsic \n# curvature \\bar{A}_{ij}, plus the trace of the extrinsic curvature K, \n# where (Eq. 3 of [Baumgarte *et al.*](https://arxiv.org/pdf/1211.6632.pdf)):\ndef trK_AbarDD_aDD(gammaDD,KDD):\n global trK,AbarDD,aDD\n if rfm.have_already_called_reference_metric_function == False:\n print(\"BSSN.BSSN_in_terms_of_ADM.trK_AbarDD(): Must call reference_metric() first!\")\n sys.exit(1)\n # \\bar{gamma}_{ij} = (\\frac{\\bar{gamma}}{gamma})^{1/3}*gamma_{ij}.\n gammaUU, gammaDET = ixp.symm_matrix_inverter3x3(gammaDD)\n # K = gamma^{ij} K_{ij}, and\n # \\bar{A}_{ij} &= (\\frac{\\bar{gamma}}{gamma})^{1/3}*(K_{ij} - \\frac{1}{3}*gamma_{ij}*K)\n trK = sp.sympify(0)\n for i in range(DIM):\n for j in range(DIM):\n trK += gammaUU[i][j]*KDD[i][j]\n\n AbarDD = ixp.zerorank2()\n aDD = ixp.zerorank2()\n for i in range(DIM):\n for j in range(DIM):\n AbarDD[i][j] = (rfm.detgammahat/gammaDET)**(sp.Rational(1,3))*(KDD[i][j] - sp.Rational(1,3)*gammaDD[i][j]*trK)\n aDD[i][j] = AbarDD[i][j] / rfm.ReDD[i][j]",
"_____no_output_____"
]
],
[
[
"<a id='lambda'></a>\n\n## Step 2.c: Assuming the ADM 3-metric $\\gamma_{ij}$ is given as an explicit function of `(xx0,xx1,xx2)`, convert to BSSN $\\bar{\\Lambda}^i$; rescale to compute $\\lambda^i$ \\[Back to [top](#toc)\\]\n$$\\label{lambda}$$\n\nTo define $\\bar{\\Lambda}^i$ we implement Eqs. 4 and 5 of [Baumgarte *et al.*](https://arxiv.org/pdf/1211.6632.pdf):\n$$\n\\bar{\\Lambda}^i = \\bar{\\gamma}^{jk}\\left(\\bar{\\Gamma}^i_{jk} - \\hat{\\Gamma}^i_{jk}\\right).\n$$\n\nThe [reference_metric.py](../edit/reference_metric.py) module provides us with exact, closed-form expressions for $\\hat{\\Gamma}^i_{jk}$, so here we need only compute exact expressions for $\\bar{\\Gamma}^i_{jk}$, based on $\\gamma_{ij}$ given as an explicit function of `(xx0,xx1,xx2)`. This is particularly useful when setting up initial data.\n\nAfter constructing \\bar{\\Lambda}^i$, we rescale to get $\\lambda^i$ according to the prescription described in the [the covariant BSSN formulation tutorial](Tutorial-BSSN_formulation.ipynb) (also [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)):\n\n$$\n\\lambda^i = \\bar{\\Lambda}^i/\\text{ReU[i]}.\n$$",
"_____no_output_____"
]
],
[
[
"# Step 2.c: Define \\bar{Lambda}^i (Eqs. 4 and 5 of [Baumgarte *et al.*](https://arxiv.org/pdf/1211.6632.pdf)):\ndef LambdabarU_lambdaU__exact_gammaDD(gammaDD):\n global LambdabarU,lambdaU\n \n # \\bar{Lambda}^i = \\bar{gamma}^{jk}(\\bar{Gamma}^i_{jk} - \\hat{Gamma}^i_{jk}).\n gammabarDD_hDD(gammaDD)\n gammabarUU, gammabarDET = ixp.symm_matrix_inverter3x3(gammabarDD)\n\n # First compute Christoffel symbols \\bar{Gamma}^i_{jk}, with respect to barred metric:\n GammabarUDD = ixp.zerorank3()\n for i in range(DIM):\n for j in range(DIM):\n for k in range(DIM):\n for l in range(DIM):\n GammabarUDD[i][j][k] += sp.Rational(1,2)*gammabarUU[i][l]*( sp.diff(gammabarDD[l][j],rfm.xx[k]) +\n sp.diff(gammabarDD[l][k],rfm.xx[j]) -\n sp.diff(gammabarDD[j][k],rfm.xx[l]) )\n # Next evaluate \\bar{Lambda}^i, based on GammabarUDD above and GammahatUDD\n # (from the reference metric):\n LambdabarU = ixp.zerorank1()\n for i in range(DIM):\n for j in range(DIM):\n for k in range(DIM):\n LambdabarU[i] += gammabarUU[j][k] * (GammabarUDD[i][j][k] - rfm.GammahatUDD[i][j][k])\n lambdaU = ixp.zerorank1()\n for i in range(DIM):\n lambdaU[i] = LambdabarU[i] / rfm.ReU[i]",
"_____no_output_____"
]
],
[
[
"<a id='conformal'></a>\n\n## Step 2.d: Define the conformal factor variable `cf` \\[Back to [top](#toc)\\]\n$$\\label{conformal}$$\n\nWe define the conformal factor variable `cf` based on the setting of the `\"BSSN_quantities::EvolvedConformalFactor_cf\"` parameter.\n\nFor example if `\"BSSN_quantities::EvolvedConformalFactor_cf\"` is set to `\"phi\"`, we can use Eq. 3 of [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf), which in arbitrary coordinates is written:\n\n$$\n\\phi = \\frac{1}{12} \\log\\left(\\frac{\\gamma}{\\bar{\\gamma}}\\right).\n$$\n\nAlternatively if `\"BSSN_quantities::EvolvedConformalFactor_cf\"` is set to `\"chi\"`, then\n$$\n\\chi = e^{-4 \\phi} = \\exp\\left(-4 \\frac{1}{12} \\left(\\frac{\\gamma}{\\bar{\\gamma}}\\right)\\right) \n= \\exp\\left(-\\frac{1}{3} \\log\\left(\\frac{\\gamma}{\\bar{\\gamma}}\\right)\\right) = \\left(\\frac{\\gamma}{\\bar{\\gamma}}\\right)^{-1/3}.\n$$\n\nFinally if `\"BSSN_quantities::EvolvedConformalFactor_cf\"` is set to `\"W\"`, then\n$$\nW = e^{-2 \\phi} = \\exp\\left(-2 \\frac{1}{12} \\log\\left(\\frac{\\gamma}{\\bar{\\gamma}}\\right)\\right) = \n\\exp\\left(-\\frac{1}{6} \\log\\left(\\frac{\\gamma}{\\bar{\\gamma}}\\right)\\right) = \n\\left(\\frac{\\gamma}{\\bar{\\gamma}}\\right)^{-1/6}.\n$$",
"_____no_output_____"
]
],
[
[
"# Step 2.d: Set the conformal factor variable cf, which is set \n# by the \"BSSN_quantities::EvolvedConformalFactor_cf\" parameter. For example if \n# \"EvolvedConformalFactor_cf\" is set to \"phi\", we can use Eq. 3 of \n# [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf), \n# which in arbitrary coordinates is written:\ndef cf_from_gammaDD(gammaDD):\n global cf\n \n # \\bar{Lambda}^i = \\bar{gamma}^{jk}(\\bar{Gamma}^i_{jk} - \\hat{Gamma}^i_{jk}).\n gammabarDD_hDD(gammaDD)\n gammabarUU, gammabarDET = ixp.symm_matrix_inverter3x3(gammabarDD)\n gammaUU, gammaDET = ixp.symm_matrix_inverter3x3(gammaDD)\n\n cf = sp.sympify(0)\n\n if par.parval_from_str(\"EvolvedConformalFactor_cf\") == \"phi\":\n # phi = \\frac{1}{12} log(\\frac{gamma}{\\bar{gamma}}).\n cf = sp.Rational(1,12)*sp.log(gammaDET/gammabarDET)\n elif par.parval_from_str(\"EvolvedConformalFactor_cf\") == \"chi\":\n # chi = exp(-4*phi) = exp(-4*\\frac{1}{12}*(\\frac{gamma}{\\bar{gamma}}))\n # = exp(-\\frac{1}{3}*log(\\frac{gamma}{\\bar{gamma}})) = (\\frac{gamma}{\\bar{gamma}})^{-1/3}.\n #\n cf = (gammaDET/gammabarDET)**(-sp.Rational(1,3))\n elif par.parval_from_str(\"EvolvedConformalFactor_cf\") == \"W\":\n # W = exp(-2*phi) = exp(-2*\\frac{1}{12}*log(\\frac{gamma}{\\bar{gamma}})) \n # = exp(-\\frac{1}{6}*log(\\frac{gamma}{\\bar{gamma}})) = (\\frac{gamma}{bar{gamma}})^{-1/6}.\n cf = (gammaDET/gammabarDET)**(-sp.Rational(1,6))\n else:\n print(\"Error EvolvedConformalFactor_cf type = \\\"\"+par.parval_from_str(\"EvolvedConformalFactor_cf\")+\"\\\" unknown.\")\n sys.exit(1)",
"_____no_output_____"
]
],
[
[
"<a id='betvet'></a>\n\n## Step 2.e: Rescale $\\beta^i$ and $B^i$ to compute $\\mathcal{V}^i={\\rm vet}^i$ and $\\mathcal{B}^i={\\rm bet}^i$, respectively \\[Back to [top](#toc)\\]\n$$\\label{betvet}$$\n\nWe rescale $\\beta^i$ and $B^i$ according to the prescription described in the [the covariant BSSN formulation tutorial](Tutorial-BSSN_formulation.ipynb) (also [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)):\n\\begin{align}\n\\mathcal{V}^i &= \\beta^i/\\text{ReU[i]}\\\\\n\\mathcal{B}^i &= B^i/\\text{ReU[i]}.\n\\end{align}",
"_____no_output_____"
]
],
[
[
"# Step 2.e: Rescale beta^i and B^i according to the prescription described in \n# the [BSSN in curvilinear coordinates tutorial notebook](Tutorial-BSSNCurvilinear.ipynb) \n# (also [Ruchlin *et al.*](https://arxiv.org/pdf/1712.07658.pdf)):\n#\n# \\mathcal{V}^i &= beta^i/(ReU[i])\n# \\mathcal{B}^i &= B^i/(ReU[i])\ndef betU_vetU(betaU,BU):\n global vetU,betU\n\n if rfm.have_already_called_reference_metric_function == False:\n print(\"BSSN.BSSN_in_terms_of_ADM.bet_vet(): Must call reference_metric() first!\")\n sys.exit(1)\n vetU = ixp.zerorank1()\n betU = ixp.zerorank1()\n for i in range(DIM):\n vetU[i] = betaU[i] / rfm.ReU[i]\n betU[i] = BU[i] / rfm.ReU[i]",
"_____no_output_____"
]
],
[
[
"<a id='code_validation'></a>\n\n# Step 3: Code Validation against `BSSN.BSSN_in_terms_of_ADM` module \\[Back to [top](#toc)\\] \n$$\\label{code_validation}$$\n\nHere, as a code validation check, we verify agreement in the SymPy expressions for [UIUC initial data](Tutorial-ADM_Initial_Data-UIUC_BlackHole.ipynb) between\n1. this tutorial and \n2. the NRPy+ [BSSN.BSSN_in_terms_of_ADM](../edit/BSSN/BSSN_in_terms_of_ADM.py) module.\n\nAs no basis transformation is performed, we analyze these expressions in their native, Spherical coordinates.",
"_____no_output_____"
]
],
[
[
"# Step 3.a: Set the desired *output* coordinate system to Spherical:\npar.set_parval_from_str(\"reference_metric::CoordSystem\",\"Spherical\")\nrfm.reference_metric()\n\n# Step 3.b: Set up initial data; assume UIUC spinning black hole initial data\nimport BSSN.UIUCBlackHole as uibh\nuibh.UIUCBlackHole(ComputeADMGlobalsOnly=True)\n\n# Step 3.c: Call above functions to convert ADM to BSSN curvilinear\ngammabarDD_hDD( uibh.gammaSphDD)\ntrK_AbarDD_aDD( uibh.gammaSphDD,uibh.KSphDD)\nLambdabarU_lambdaU__exact_gammaDD(uibh.gammaSphDD)\ncf_from_gammaDD( uibh.gammaSphDD)\nbetU_vetU( uibh.betaSphU,uibh.BSphU)\n\n# Step 3.d: Now load the BSSN_in_terms_of_ADM module and perform the same conversion\nimport BSSN.BSSN_in_terms_of_ADM as BitoA\nBitoA.gammabarDD_hDD( uibh.gammaSphDD)\nBitoA.trK_AbarDD_aDD( uibh.gammaSphDD,uibh.KSphDD)\nBitoA.LambdabarU_lambdaU__exact_gammaDD(uibh.gammaSphDD)\nBitoA.cf_from_gammaDD( uibh.gammaSphDD)\nBitoA.betU_vetU( uibh.betaSphU,uibh.BSphU)\n\n# Step 3.e: Perform the consistency check\nprint(\"Consistency check between this tutorial notebook and BSSN.BSSN_in_terms_of_ADM NRPy+ module: ALL SHOULD BE ZERO.\")\n\nprint(\"cf - BitoA.cf = \" + str(cf - BitoA.cf))\nprint(\"trK - BitoA.trK = \" + str(trK - BitoA.trK))\n# alpha is the only variable that remains unchanged:\n# print(\"alpha - BitoA.alpha = \" + str(alpha - BitoA.alpha))\n\nfor i in range(DIM):\n print(\"vetU[\"+str(i)+\"] - BitoA.vetU[\"+str(i)+\"] = \" + str(vetU[i] - BitoA.vetU[i]))\n print(\"betU[\"+str(i)+\"] - BitoA.betU[\"+str(i)+\"] = \" + str(betU[i] - BitoA.betU[i]))\n print(\"lambdaU[\"+str(i)+\"] - BitoA.lambdaU[\"+str(i)+\"] = \" + str(lambdaU[i] - BitoA.lambdaU[i]))\n for j in range(DIM):\n print(\"hDD[\"+str(i)+\"][\"+str(j)+\"] - BitoA.hDD[\"+str(i)+\"][\"+str(j)+\"] = \" \n + str(hDD[i][j] - BitoA.hDD[i][j]))\n print(\"aDD[\"+str(i)+\"][\"+str(j)+\"] - BitoA.aDD[\"+str(i)+\"][\"+str(j)+\"] = \" \n + str(aDD[i][j] - BitoA.aDD[i][j]))",
"Consistency check between this tutorial notebook and BSSN.BSSN_in_terms_of_ADM NRPy+ module: ALL SHOULD BE ZERO.\ncf - BitoA.cf = 0\ntrK - BitoA.trK = 0\nvetU[0] - BitoA.vetU[0] = 0\nbetU[0] - BitoA.betU[0] = 0\nlambdaU[0] - BitoA.lambdaU[0] = 0\nhDD[0][0] - BitoA.hDD[0][0] = 0\naDD[0][0] - BitoA.aDD[0][0] = 0\nhDD[0][1] - BitoA.hDD[0][1] = 0\naDD[0][1] - BitoA.aDD[0][1] = 0\nhDD[0][2] - BitoA.hDD[0][2] = 0\naDD[0][2] - BitoA.aDD[0][2] = 0\nvetU[1] - BitoA.vetU[1] = 0\nbetU[1] - BitoA.betU[1] = 0\nlambdaU[1] - BitoA.lambdaU[1] = 0\nhDD[1][0] - BitoA.hDD[1][0] = 0\naDD[1][0] - BitoA.aDD[1][0] = 0\nhDD[1][1] - BitoA.hDD[1][1] = 0\naDD[1][1] - BitoA.aDD[1][1] = 0\nhDD[1][2] - BitoA.hDD[1][2] = 0\naDD[1][2] - BitoA.aDD[1][2] = 0\nvetU[2] - BitoA.vetU[2] = 0\nbetU[2] - BitoA.betU[2] = 0\nlambdaU[2] - BitoA.lambdaU[2] = 0\nhDD[2][0] - BitoA.hDD[2][0] = 0\naDD[2][0] - BitoA.aDD[2][0] = 0\nhDD[2][1] - BitoA.hDD[2][1] = 0\naDD[2][1] - BitoA.aDD[2][1] = 0\nhDD[2][2] - BitoA.hDD[2][2] = 0\naDD[2][2] - BitoA.aDD[2][2] = 0\n"
]
],
[
[
"<a id='latex_pdf_output'></a>\n\n# Step 5: Output this notebook to $\\LaTeX$-formatted PDF file \\[Back to [top](#toc)\\]\n$$\\label{latex_pdf_output}$$\n\nThe following code cell converts this Jupyter notebook into a proper, clickable $\\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename [Tutorial-ADM_Initial_Data-Converting_Exact_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.pdf](Tutorial-ADM_Initial_Data-Converting_Exact_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)",
"_____no_output_____"
]
],
[
[
"!jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-BSSN_in_terms_of_ADM.ipynb\n!pdflatex -interaction=batchmode Tutorial-BSSN_in_terms_of_ADM.tex\n!pdflatex -interaction=batchmode Tutorial-BSSN_in_terms_of_ADM.tex\n!pdflatex -interaction=batchmode Tutorial-BSSN_in_terms_of_ADM.tex\n!rm -f Tut*.out Tut*.aux Tut*.log",
"[NbConvertApp] Converting notebook Tutorial-BSSN_in_terms_of_ADM.ipynb to latex\r\n[NbConvertApp] Writing 60791 bytes to Tutorial-BSSN_in_terms_of_ADM.tex\r\nThis is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)\r\n restricted \\write18 enabled.\r\nentering extended mode\r\nThis is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)\r\n restricted \\write18 enabled.\r\nentering extended mode\r\nThis is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)\r\n restricted \\write18 enabled.\r\nentering extended mode\r\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec65ddae783b2d9a942f59578ebc6c7ee4f8a1db | 328,908 | ipynb | Jupyter Notebook | .ipynb_checkpoints/Untitled-Copy1-checkpoint.ipynb | vishwas04/build_my_web | a1b63fc25e56bebde3d8efef3c88355024bbfd9d | [
"MIT"
]
| 1 | 2021-11-08T08:50:19.000Z | 2021-11-08T08:50:19.000Z | .ipynb_checkpoints/Untitled-Copy1-checkpoint.ipynb | vishwas04/build_my_web | a1b63fc25e56bebde3d8efef3c88355024bbfd9d | [
"MIT"
]
| null | null | null | .ipynb_checkpoints/Untitled-Copy1-checkpoint.ipynb | vishwas04/build_my_web | a1b63fc25e56bebde3d8efef3c88355024bbfd9d | [
"MIT"
]
| 1 | 2021-12-08T06:34:00.000Z | 2021-12-08T06:34:00.000Z | 187.411966 | 79,360 | 0.872104 | [
[
[
"!head -20 data/words.txt",
"#--- words.txt ---------------------------------------------------------------#\n#\n# iam database word information\n#\n# format: a01-000u-00-00 ok 154 1 408 768 27 51 AT A\n#\n# a01-000u-00-00 -> word id for line 00 in form a01-000u\n# ok -> result of word segmentation\n# ok: word was correctly\n# er: segmentation of word can be bad\n#\n# 154 -> graylevel to binarize the line containing this word\n# 1 -> number of components for this word\n# 408 768 27 51 -> bounding box around this word in x,y,w,h format\n# AT -> the grammatical tag for this word, see the\n# file tagset.txt for an explanation\n# A -> the transcription for this word\n#\na01-000u-00-00 ok 154 408 768 27 51 AT A\na01-000u-00-01 ok 154 507 766 213 48 NN MOVE\n"
],
[
"from tensorflow import keras\n\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nimport os\n\nnp.random.seed(42)\ntf.random.set_seed(42)",
"_____no_output_____"
],
[
"base_path = \"data\"\nwords_list = []\n\nwords = open(\"/Users/vishwas/Desktop/build_my_web/data/words.txt\", \"r\").readlines()\nfor line in words:\n if line[0]=='#':\n continue\n if line.split(\" \")[1]!=\"err\": # We don't need to deal with errored entries.\n words_list.append(line)\n\nlen(words_list)",
"_____no_output_____"
],
[
"np.random.shuffle(words_list)\nsplit_idx = int(0.9 * len(words_list))\ntrain_samples = words_list[:split_idx]\ntest_samples = words_list[split_idx:]\n\nval_split_idx = int(0.5 * len(test_samples))\nvalidation_samples = test_samples[:val_split_idx]\ntest_samples = test_samples[val_split_idx:]\n\nassert len(words_list) == len(train_samples) + len(validation_samples) + len(test_samples)\n\nprint(f\"Total training samples: {len(train_samples)}\")\nprint(f\"Total validation samples: {len(validation_samples)}\")\nprint(f\"Total test samples: {len(test_samples)}\")",
"Total training samples: 86810\nTotal validation samples: 4823\nTotal test samples: 4823\n"
],
[
"base_image_path = \"/Users/vishwas/Desktop/build_my_web/data/words\"\ndef get_image_paths_and_labels(samples):\n paths = []\n corrected_samples = []\n for (i, file_line) in enumerate(samples):\n line_split = file_line.strip()\n line_split = line_split.split(\" \")\n \n # Each line split will have this format for the corresponding image:\n # part1/part1-part2/part1-part2-part3.png\n image_name = line_split[0] \n partI = image_name.split(\"-\")[0]\n partII = image_name.split(\"-\")[1]\n img_path = os.path.join(base_image_path, partI, \n partI + \"-\" + partII,\n image_name + \".png\"\n )\n if os.path.getsize(img_path):\n paths.append(img_path)\n corrected_samples.append(file_line.split(\"\\n\")[0])\n# print(file_line,\"..\",image_name,\"..\",partI,\"..\",partII,\"..\",img_path)\n# break\n return paths, corrected_samples\n\n\ntrain_img_paths, train_labels = get_image_paths_and_labels(train_samples)\nvalidation_img_paths, validation_labels = get_image_paths_and_labels(validation_samples)\ntest_img_paths, test_labels = get_image_paths_and_labels(test_samples)\n",
"_____no_output_____"
],
[
"print(train_img_paths[0],\"__\", train_labels[0])",
"/Users/vishwas/Desktop/build_my_web/data/words/e04/e04-030/e04-030-04-08.png __ e04-030-04-08 ok 170 1489 1499 120 39 JJ sure\n"
],
[
"# Find maximum length and the size of the vocabulary in the training data.\ntrain_labels_cleaned = []\ncharacters = set()\nmax_len = 0\n\nfor label in train_labels:\n label = label.split(\" \")[-1].strip()\n for char in label:\n characters.add(char)\n\n max_len = max(max_len, len(label))\n train_labels_cleaned.append(label)\n\nprint(\"Maximum length: \", max_len)\nprint(\"Vocab size(alpha+num+punch): \", len(characters))\nprint(len(train_labels_cleaned))\ncharacters",
"Maximum length: 21\nVocab size(alpha+num+punch): 78\n86808\n"
],
[
"def clean_labels(labels):\n cleaned_labels = []\n for label in labels:\n label = label.split(\" \")[-1].strip()\n cleaned_labels.append(label)\n return cleaned_labels\n\n\nvalidation_labels_cleaned = clean_labels(validation_labels)\ntest_labels_cleaned = clean_labels(test_labels)",
"_____no_output_____"
],
[
"from tensorflow.keras.layers.experimental.preprocessing import StringLookup\n# AUTOTUNE = tf.data.AUTOTUNE",
"_____no_output_____"
],
[
"# Mapping characters to integers.\nchar_to_num = StringLookup(vocabulary=list(characters), mask_token=None)\n\n# Mapping integers back to original characters.\nnum_to_char = StringLookup(\n vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True)\n",
"_____no_output_____"
],
[
"x=tf.constant(\n[[77],\n [58],\n [20],\n [56],\n [27],\n [56],\n [62]])\nnum_to_char(x)",
"_____no_output_____"
],
[
"def distortion_free_resize(image, img_size):\n w, h = img_size\n image = tf.image.resize(image, size=(h, w), preserve_aspect_ratio=True)\n\n # Check tha amount of padding needed to be done.\n pad_height = h - tf.shape(image)[0]\n pad_width = w - tf.shape(image)[1]\n\n # Only necessary if you want to do same amount of padding on both sides.\n if pad_height % 2 != 0:\n height = pad_height // 2\n pad_height_top = height + 1\n pad_height_bottom = height\n else:\n pad_height_top = pad_height_bottom = pad_height // 2\n \n if pad_width % 2 != 0:\n width = pad_width // 2\n pad_width_left = width + 1\n pad_width_right = width\n else:\n pad_width_left = pad_width_right = pad_width // 2\n\n image = tf.pad(\n image,\n paddings=[\n [pad_height_top, pad_height_bottom],\n [pad_width_left, pad_width_right],\n [0, 0]\n ]\n )\n\n image = tf.transpose(image, perm=[1, 0, 2])\n image = tf.image.flip_left_right(image)\n return image",
"_____no_output_____"
],
[
"batch_size = 64\npadding_token = 99\nimage_width = 128\nimage_height = 32\n\n\ndef preprocess_image(image_path, img_size=(image_width, image_height)):\n image = tf.io.read_file(image_path)\n image = tf.image.decode_png(image, 1)\n image = distortion_free_resize(image, img_size)\n image = tf.cast(image, tf.float32) / 255.\n return image\n\n\ndef vectorize_label(label):\n label = char_to_num(tf.strings.unicode_split(label, input_encoding=\"UTF-8\"))\n length = tf.shape(label)[0]\n pad_amount = max_len - length\n label = tf.pad(label, paddings=[[0, pad_amount]], constant_values=padding_token)\n return label\n\n\ndef process_images_labels(image_path, label):\n image = preprocess_image(image_path)\n label = vectorize_label(label)\n return {\"image\": image, \"label\": label}\n\n\ndef prepare_dataset(image_paths, labels):\n #creats a something like generator and applies a funtion for each element (.map)\n dataset = tf.data.Dataset.from_tensor_slices((image_paths, labels)).map(\n process_images_labels, num_parallel_calls=AUTOTUNE\n )\n #batch is like grouping into buckets \n return dataset.batch(batch_size).cache().prefetch(AUTOTUNE)",
"_____no_output_____"
],
[
"train_ds = prepare_dataset(train_img_paths, train_labels_cleaned)\nvalidation_ds = prepare_dataset(validation_img_paths, validation_labels_cleaned)\ntest_ds = prepare_dataset(test_img_paths, test_labels_cleaned)",
"_____no_output_____"
],
[
"#Creates a Dataset with at most count elements from this dataset. - \n#take(1) takes first data from dataset\n#take(5) takes first 5 datas from dataset\nfor data in train_ds.take(1):\n images, labels = data[\"image\"], data[\"label\"]\n\n _, ax = plt.subplots(4, 4, figsize=(15, 8))\n\n for i in range(16):\n img = images[i]\n img = tf.image.flip_left_right(img)\n img = tf.transpose(img, perm=[1, 0, 2])\n img = (img * 255.).numpy().clip(0, 255).astype(np.uint8)\n img = img[:, :, 0]\n\n # Gather indices where label!= 99.\n label = labels[i]\n indices = tf.gather(label, tf.where(tf.math.not_equal(label, padding_token)))\n # Convert to string.\n label = tf.strings.reduce_join(num_to_char(indices))\n label = label.numpy().decode(\"utf-8\")\n\n ax[i // 4, i % 4].imshow(img, cmap=\"gray\")\n ax[i // 4, i % 4].set_title(label)\n ax[i // 4, i % 4].axis(\"off\")\n\n\nplt.show()\n",
"_____no_output_____"
],
[
"class CTCLayer(keras.layers.Layer):\n def __init__(self, name=None,**kwargs):\n super().__init__(name=name)\n self.loss_fn = keras.backend.ctc_batch_cost\n# super(CustomLayer, self).__init__(name=name)\n# self.k = k\n super(CTCLayer, self).__init__(**kwargs)\n\n def call(self, y_true, y_pred):\n batch_len = tf.cast(tf.shape(y_true)[0], dtype=\"int64\")\n input_length = tf.cast(tf.shape(y_pred)[1], dtype=\"int64\")\n label_length = tf.cast(tf.shape(y_true)[1], dtype=\"int64\")\n\n input_length = input_length * tf.ones(shape=(batch_len, 1), dtype=\"int64\")\n label_length = label_length * tf.ones(shape=(batch_len, 1), dtype=\"int64\")\n loss = self.loss_fn(y_true, y_pred, input_length, label_length)\n self.add_loss(loss)\n\n # At test time, just return the computed predictions.\n return y_pred\n\n\ndef build_model():\n # Inputs to the model\n input_img = keras.Input(\n shape=(image_width, image_height, 1), name=\"image\")\n labels = keras.layers.Input(name=\"label\", shape=(None,))\n\n # First conv block.\n x = keras.layers.Conv2D(\n 32,\n (3, 3),\n activation=\"relu\",\n kernel_initializer=\"he_normal\",\n padding=\"same\",\n name=\"Conv1\",\n )(input_img)\n x = keras.layers.MaxPooling2D((2, 2), name=\"pool1\")(x)\n\n # Second conv block.\n x = keras.layers.Conv2D(\n 64,\n (3, 3),\n activation=\"relu\",\n kernel_initializer=\"he_normal\",\n padding=\"same\",\n name=\"Conv2\",\n )(x)\n x = keras.layers.MaxPooling2D((2, 2), name=\"pool2\")(x)\n\n # We have used two max pool with pool size and strides 2.\n # Hence, downsampled feature maps are 4x smaller. The number of\n # filters in the last layer is 64. Reshape accordingly before\n # passing the output to the RNN part of the model.\n new_shape = ((image_width // 4), (image_height // 4) * 64)\n x = keras.layers.Reshape(target_shape=new_shape, name=\"reshape\")(x)\n x = keras.layers.Dense(64, activation=\"relu\", name=\"dense1\")(x)\n x = keras.layers.Dropout(0.2)(x)\n\n # RNNs.\n x = keras.layers.Bidirectional(keras.layers.LSTM(128, return_sequences=True, dropout=0.25))(x)\n x = keras.layers.Bidirectional(keras.layers.LSTM(64, return_sequences=True, dropout=0.25))(x)\n\n # Output layer (the tokenizer is char-level)\n # +2 is to account for the two special tokens introduced by the CTC loss.\n # The recommendation comes here: https://git.io/J0eXP.\n x = keras.layers.Dense(len(char_to_num.get_vocabulary()) + 2, activation=\"softmax\", name=\"dense2\")(x)\n \n # Add CTC layer for calculating CTC loss at each step.\n output = CTCLayer(name=\"ctc_loss\")(labels, x)\n\n # Define the model.\n model = keras.models.Model(\n inputs=[input_img, labels], outputs=output, name=\"handwriting_recognizer\"\n )\n # Optimizer.\n opt = keras.optimizers.Adam()\n # Compile the model and return.\n model.compile(optimizer=opt)\n return model\n\n\n# Get the model.\nmodel = build_model()\nmodel.summary()",
"Model: \"handwriting_recognizer\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\nimage (InputLayer) [(None, 128, 32, 1)] 0 \n__________________________________________________________________________________________________\nConv1 (Conv2D) (None, 128, 32, 32) 320 image[0][0] \n__________________________________________________________________________________________________\npool1 (MaxPooling2D) (None, 64, 16, 32) 0 Conv1[0][0] \n__________________________________________________________________________________________________\nConv2 (Conv2D) (None, 64, 16, 64) 18496 pool1[0][0] \n__________________________________________________________________________________________________\npool2 (MaxPooling2D) (None, 32, 8, 64) 0 Conv2[0][0] \n__________________________________________________________________________________________________\nreshape (Reshape) (None, 32, 512) 0 pool2[0][0] \n__________________________________________________________________________________________________\ndense1 (Dense) (None, 32, 64) 32832 reshape[0][0] \n__________________________________________________________________________________________________\ndropout_2 (Dropout) (None, 32, 64) 0 dense1[0][0] \n__________________________________________________________________________________________________\nbidirectional_4 (Bidirectional) (None, 32, 256) 197632 dropout_2[0][0] \n__________________________________________________________________________________________________\nbidirectional_5 (Bidirectional) (None, 32, 128) 164352 bidirectional_4[0][0] \n__________________________________________________________________________________________________\nlabel (InputLayer) [(None, None)] 0 \n__________________________________________________________________________________________________\ndense2 (Dense) (None, 32, 81) 10449 bidirectional_5[0][0] \n__________________________________________________________________________________________________\nctc_layer (CTCLayer) (None, 32, 81) 0 label[0][0] \n dense2[0][0] \n==================================================================================================\nTotal params: 424,081\nTrainable params: 424,081\nNon-trainable params: 0\n__________________________________________________________________________________________________\n"
],
[
"epochs = 5 # To get good results this should be at least 50.\n\n# Train the model\nmodel = build_model()\nhistory = model.fit(\n train_ds,\n validation_data=validation_ds,\n epochs=epochs,\n)\nmodel.save('model.h5')",
"Epoch 1/5\n1357/1357 [==============================] - 290s 210ms/step - loss: 16.2973 - val_loss: 11.5877\nEpoch 2/5\n1357/1357 [==============================] - 273s 201ms/step - loss: 10.9080 - val_loss: 8.8931\nEpoch 3/5\n1357/1357 [==============================] - 277s 204ms/step - loss: 8.5519 - val_loss: 6.1984\nEpoch 4/5\n1357/1357 [==============================] - 280s 207ms/step - loss: 6.3779 - val_loss: 4.6595\nEpoch 5/5\n1357/1357 [==============================] - 617s 455ms/step - loss: 5.1141 - val_loss: 4.0595\n"
],
[
"from keras.models import model_from_json\nfrom keras.models import load_model\n\n# serialize model to JSON\n# the keras model which is trained is defined as 'model' in this example\nmodel_json = model.to_json()\n\n\nwith open(\"model_num.json\", \"w\") as json_file:\n json_file.write(model_json)\n\n# serialize weights to HDF5\nmodel.save_weights(\"model_num.h5\")",
"_____no_output_____"
],
[
"prediction_model = keras.models.Model(\n model.get_layer(name=\"image\").input, model.get_layer(name=\"dense2\").output\n)\ndef decode_batch_predictions(pred):\n input_len = np.ones(pred.shape[0]) * pred.shape[1]\n # Use greedy search. For complex tasks, you can use beam search.\n results = keras.backend.ctc_decode(pred, input_length=input_len, greedy=True)[0][0][\n :, :max_len\n ]\n # Iterate over the results and get back the text.\n output_text = []\n for res in results:\n res = tf.gather(res, tf.where(tf.math.not_equal(res, -1)))\n res = tf.strings.reduce_join(num_to_char(res)).numpy().decode(\"utf-8\")\n output_text.append(res)\n return output_text\n\n\n# Let's check results on some test samples.\nfor batch in test_ds.take(1):\n batch_images = batch[\"image\"]\n _, ax = plt.subplots(4, 4, figsize=(15, 8))\n\n preds = prediction_model.predict(batch_images)\n pred_texts = decode_batch_predictions(preds)\n\n for i in range(16):\n img = batch_images[i]\n img = tf.image.flip_left_right(img)\n img = tf.transpose(img, perm=[1, 0, 2])\n img = (img * 255.0).numpy().clip(0, 255).astype(np.uint8)\n img = img[:, :, 0]\n\n title = f\"Prediction: {pred_texts[i]}\"\n ax[i // 4, i % 4].imshow(img, cmap=\"gray\")\n ax[i // 4, i % 4].set_title(title)\n ax[i // 4, i % 4].axis(\"off\")\n\nplt.show()",
"_____no_output_____"
],
[
"new_model = tf.keras.models.load_model('model.h5', custom_objects={'CTCLayer': CTCLayer})",
"_____no_output_____"
],
[
"prediction_model = keras.models.Model(\n new_model.get_layer(name=\"image\").input, new_model.get_layer(name=\"dense2\").output\n)\ndef decode_batch_predictions(pred):\n input_len = np.ones(pred.shape[0]) * pred.shape[1]\n # Use greedy search. For complex tasks, you can use beam search.\n results = keras.backend.ctc_decode(pred, input_length=input_len, greedy=True)[0][0][\n :, :max_len\n ]\n \n # Iterate over the results and get back the text.\n output_text = []\n for res in results:\n res = tf.gather(res, tf.where(tf.math.not_equal(res, -1)))\n# print(res)\n res = tf.strings.reduce_join(num_to_char(res)).numpy().decode(\"utf-8\")\n output_text.append(res)\n with open('/Users/vishwas/Desktop/bd/model.txt', 'w') as f:\n f.write(str(output_text))\n return output_text\n\n\n# Let's check results on some test samples.\nfor batch in test_ds.take(1):\n batch_images = batch[\"image\"]\n _, ax = plt.subplots(4, 4, figsize=(15, 8))\n\n preds = prediction_model.predict(batch_images)\n pred_texts = decode_batch_predictions(preds)\n\n for i in range(16):\n img = batch_images[i]\n img = tf.image.flip_left_right(img)\n img = tf.transpose(img, perm=[1, 0, 2])\n img = (img * 255.0).numpy().clip(0, 255).astype(np.uint8)\n img = img[:, :, 0]\n\n title = f\"Prediction: {pred_texts[i]}\"\n ax[i // 4, i % 4].imshow(img, cmap=\"gray\")\n ax[i // 4, i % 4].set_title(title)\n ax[i // 4, i % 4].axis(\"off\")\n\nplt.show()",
"WARNING:tensorflow:6 out of the last 11 calls to <function Model.make_predict_function.<locals>.predict_function at 0x7fa4e5cff9e0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.\ntf.Tensor(\n[[77]\n [58]\n [20]\n [56]\n [27]\n [56]\n [62]], shape=(7, 1), dtype=int64)\ntf.Tensor(\n[[33]\n [56]\n [27]\n [56]], shape=(4, 1), dtype=int64)\ntf.Tensor([[69]], shape=(1, 1), dtype=int64)\ntf.Tensor(\n[[20]\n [70]\n [56]], shape=(3, 1), dtype=int64)\ntf.Tensor(\n[[60]\n [20]], shape=(2, 1), dtype=int64)\ntf.Tensor([[31]], shape=(1, 1), dtype=int64)\ntf.Tensor(\n[[77]\n [58]\n [11]\n [56]], shape=(4, 1), dtype=int64)\ntf.Tensor(\n[[45]\n [70]\n [56]], shape=(3, 1), dtype=int64)\ntf.Tensor(\n[[11]\n [76]\n [59]\n [42]\n [20]\n [59]\n [76]\n [ 6]], shape=(8, 1), dtype=int64)\ntf.Tensor(\n[[42]\n [43]\n [43]\n [77]\n [58]\n [43]\n [20]\n [56]\n [62]], shape=(9, 1), dtype=int64)\ntf.Tensor(\n[[77]\n [63]\n [63]\n [59]\n [43]\n [43]\n [ 6]\n [21]\n [42]], shape=(9, 1), dtype=int64)\ntf.Tensor(\n[[20]\n [76]], shape=(2, 1), dtype=int64)\ntf.Tensor(\n[[61]\n [20]\n [56]], shape=(3, 1), dtype=int64)\ntf.Tensor(\n[[78]\n [77]\n [31]\n [20]\n [56]\n [62]], shape=(6, 1), dtype=int64)\ntf.Tensor([[69]], shape=(1, 1), dtype=int64)\ntf.Tensor(\n[[18]\n [27]\n [56]\n [56]\n [ 6]], shape=(5, 1), dtype=int64)\ntf.Tensor(\n[[59]\n [11]\n [59]\n [77]\n [20]\n [59]\n [76]\n [ 6]], shape=(8, 1), dtype=int64)\ntf.Tensor([[1]], shape=(1, 1), dtype=int64)\ntf.Tensor(\n[[20]\n [70]\n [56]], shape=(3, 1), dtype=int64)\ntf.Tensor(\n[[59]\n [ 6]\n [59]\n [20]\n [77]\n [39]], shape=(6, 1), dtype=int64)\ntf.Tensor(\n[[70]\n [56]], shape=(2, 1), dtype=int64)\ntf.Tensor(\n[[78]\n [56]\n [56]\n [58]\n [42]], shape=(5, 1), dtype=int64)\ntf.Tensor(\n[[20]\n [76]], shape=(2, 1), dtype=int64)\ntf.Tensor(\n[[70]\n [59]\n [ 6]\n [ 9]], shape=(4, 1), dtype=int64)\ntf.Tensor([[69]], shape=(1, 1), dtype=int64)\ntf.Tensor(\n[[77]\n [ 6]\n [62]], shape=(3, 1), dtype=int64)\ntf.Tensor(\n[[76]\n [63]], shape=(2, 1), dtype=int64)\ntf.Tensor(\n[[76]\n [11]\n [56]\n [ 6]\n [56]\n [62]], shape=(6, 1), dtype=int64)\ntf.Tensor(\n[[55]\n [56]\n [77]\n [58]\n [77]\n [ 6]], shape=(6, 1), dtype=int64)\ntf.Tensor(\n[[76]\n [63]], shape=(2, 1), dtype=int64)\ntf.Tensor(\n[[59]\n [42]], shape=(2, 1), dtype=int64)\ntf.Tensor(\n[[ 9]\n [59]\n [20]\n [56]\n [77]\n [46]\n [56]\n [27]], shape=(8, 1), dtype=int64)\ntf.Tensor(\n[[42]\n [77]\n [78]], shape=(3, 1), dtype=int64)\ntf.Tensor(\n[[20]\n [77]\n [58]\n [58]\n [56]\n [42]], shape=(6, 1), dtype=int64)\ntf.Tensor(\n[[70]\n [77]\n [46]], shape=(3, 1), dtype=int64)\ntf.Tensor(\n[[78]\n [56]\n [43]\n [39]\n [62]\n [77]\n [31]\n [42]], shape=(8, 1), dtype=int64)\ntf.Tensor(\n[[12]\n [65]], shape=(2, 1), dtype=int64)\ntf.Tensor([[77]], shape=(1, 1), dtype=int64)\ntf.Tensor(\n[[20]\n [70]\n [56]\n [56]], shape=(4, 1), dtype=int64)\ntf.Tensor(\n[[59]\n [58]\n [58]\n [56]\n [13]], shape=(5, 1), dtype=int64)\ntf.Tensor(\n[[42]\n [76]\n [46]\n [56]], shape=(4, 1), dtype=int64)\ntf.Tensor(\n[[65]\n [76]\n [27]], shape=(3, 1), dtype=int64)\ntf.Tensor(\n[[18]\n [76]\n [31]\n [ 6]\n [59]\n [42]], shape=(6, 1), dtype=int64)\ntf.Tensor(\n[[62]\n [56]\n [77]\n [58]\n [20]], shape=(5, 1), dtype=int64)\ntf.Tensor(\n[[78]\n [56]\n [78]], shape=(3, 1), dtype=int64)\ntf.Tensor(\n[[20]\n [76]], shape=(2, 1), dtype=int64)\ntf.Tensor(\n[[77]\n [20]], shape=(2, 1), dtype=int64)\ntf.Tensor(\n[[59]\n [42]], shape=(2, 1), dtype=int64)\ntf.Tensor(\n[[46]\n [56]\n [ 6]], shape=(3, 1), dtype=int64)\ntf.Tensor(\n[[78]\n [76]\n [20]], shape=(3, 1), dtype=int64)\ntf.Tensor(\n[[58]\n [59]\n [ 6]\n [56]], shape=(4, 1), dtype=int64)\ntf.Tensor(\n[[58]\n [ 6]\n [62]], shape=(3, 1), dtype=int64)\ntf.Tensor(\n[[78]\n [ 6]\n [59]\n [20]\n [56]\n [62]], shape=(6, 1), dtype=int64)\ntf.Tensor(\n[[70]\n [56]\n [59]\n [77]\n [70]], shape=(5, 1), dtype=int64)\ntf.Tensor(\n[[76]\n [63]], shape=(2, 1), dtype=int64)\ntf.Tensor(\n[[12]\n [27]], shape=(2, 1), dtype=int64)\ntf.Tensor(\n[[70]\n [56]\n [77]\n [62]], shape=(4, 1), dtype=int64)\ntf.Tensor(\n[[46]\n [31]\n [43]\n [70]], shape=(4, 1), dtype=int64)\ntf.Tensor([[69]], shape=(1, 1), dtype=int64)\ntf.Tensor(\n[[20]\n [76]], shape=(2, 1), dtype=int64)\ntf.Tensor(\n[[76]\n [63]], shape=(2, 1), dtype=int64)\ntf.Tensor(\n[[20]\n [70]\n [56]], shape=(3, 1), dtype=int64)\ntf.Tensor(\n[[78]\n [70]\n [56]\n [ 6]], shape=(4, 1), dtype=int64)\ntf.Tensor(\n[[77]\n [58]\n [58]\n [ 6]\n [ 9]\n [70]], shape=(6, 1), dtype=int64)\n"
],
[
"loaded_model = load_model('my_model.h5', custom_objects={'CTCLayer': CTCLayer})\n# load weights into new model\nloaded_model.load_weights(\"model_num.h5\")\nprint(\"Loaded model from disk\")\n\n# loaded_model.save('model_num.hdf5')\n\n# loaded_model=load_model('model_num.hdf5')",
"Loaded model from disk\n"
],
[
"# %%capture cap --no-stderr\nc=model.weights",
"_____no_output_____"
],
[
"c=model.weights\nwith open('model.txt', 'w') as f:\n f.write(str(c))",
"_____no_output_____"
],
[
"prediction_model = keras.models.Model(\n model.get_layer(name=\"image\").input, model.get_layer(name=\"dense2\").output\n)\nmodel_json = prediction_model.to_json()\n\n\nwith open(\"model_num.json\", \"w\") as json_file:\n json_file.write(model_json)\n\n# serialize weights to HDF5\nprediction_model.save_weights(\"model_num.h5\")\n\nloaded_model = load_model('my_model.h5', custom_objects={'CTCLayer': CTCLayer})\n# load weights into new model\nloaded_model.load_weights(\"model_num.h5\")\nprint(\"Loaded model from disk\")\n\n# loaded_model.save('model_num.hdf5')\n# loaded_model=load_model('model_num.hdf5')\n\nloaded_model.summary()\ndef decode_batch_predictions(pred):\n input_len = np.ones(pred.shape[0]) * pred.shape[1]\n # Use greedy search. For complex tasks, you can use beam search.\n results = keras.backend.ctc_decode(pred, input_length=input_len, greedy=True)[0][0][\n :, :max_len\n ]\n # Iterate over the results and get back the text.\n output_text = []\n for res in results:\n res = tf.gather(res, tf.where(tf.math.not_equal(res, -1)))\n res = tf.strings.reduce_join(num_to_char(res)).numpy().decode(\"utf-8\")\n output_text.append(res)\n return output_text\n\n\n# Let's check results on some test samples.\nfor batch in test_ds.take(1):\n batch_images = batch[\"image\"]\n _, ax = plt.subplots(4, 4, figsize=(15, 8))\n\n preds = loaded_model.predict(batch_images)\n pred_texts = decode_batch_predictions(preds)\n\n for i in range(16):\n img = batch_images[i]\n img = tf.image.flip_left_right(img)\n img = tf.transpose(img, perm=[1, 0, 2])\n img = (img * 255.).numpy().clip(0, 255).astype(np.uint8)\n img = img[:, :, 0]\n\n title = f\"Prediction: {pred_texts[i]}\"\n ax[i // 4, i % 4].imshow(img, cmap=\"gray\")\n ax[i // 4, i % 4].set_title(title)\n ax[i // 4, i % 4].axis(\"off\")\n\nplt.show()",
"Loaded model from disk\nModel: \"handwriting_recognizer\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\nimage (InputLayer) [(None, 128, 32, 1)] 0 \n__________________________________________________________________________________________________\nConv1 (Conv2D) (None, 128, 32, 32) 320 image[0][0] \n__________________________________________________________________________________________________\npool1 (MaxPooling2D) (None, 64, 16, 32) 0 Conv1[0][0] \n__________________________________________________________________________________________________\nConv2 (Conv2D) (None, 64, 16, 64) 18496 pool1[0][0] \n__________________________________________________________________________________________________\npool2 (MaxPooling2D) (None, 32, 8, 64) 0 Conv2[0][0] \n__________________________________________________________________________________________________\nreshape (Reshape) (None, 32, 512) 0 pool2[0][0] \n__________________________________________________________________________________________________\ndense1 (Dense) (None, 32, 64) 32832 reshape[0][0] \n__________________________________________________________________________________________________\ndropout_1 (Dropout) (None, 32, 64) 0 dense1[0][0] \n__________________________________________________________________________________________________\nbidirectional_2 (Bidirectional) (None, 32, 256) 197632 dropout_1[0][0] \n__________________________________________________________________________________________________\nbidirectional_3 (Bidirectional) (None, 32, 128) 164352 bidirectional_2[0][0] \n__________________________________________________________________________________________________\nlabel (InputLayer) [(None, None)] 0 \n__________________________________________________________________________________________________\ndense2 (Dense) (None, 32, 81) 10449 bidirectional_3[0][0] \n__________________________________________________________________________________________________\nctc_loss (CTCLayer) (None, 32, 81) 0 label[0][0] \n dense2[0][0] \n==================================================================================================\nTotal params: 424,081\nTrainable params: 424,081\nNon-trainable params: 0\n__________________________________________________________________________________________________\n"
],
[
"model.save('my_model.h5')",
"_____no_output_____"
],
[
"model.get_weights()",
"_____no_output_____"
],
[
"from tensorflow import keras\nload_model = keras.models.load_model('my_model.h5')",
"_____no_output_____"
],
[
"from keras.models import load_model\n# Assuming your model includes instance of an \"AttentionLayer\" class\nl_m = load_model('my_model.h5', custom_objects={'CTCLayer': CTCLayer})",
"_____no_output_____"
],
[
"model.save('model.tf')\n# new_model = tf.keras.models.load_model('model.tf')",
"_____no_output_____"
],
[
"new_model = tf.keras.models.load_model('model.tf')",
"_____no_output_____"
],
[
"prediction_model = keras.models.Model(\n model.get_layer(name=\"image\").input, model.get_layer(name=\"dense2\").output\n)\nprediction_model.summary()",
"_____no_output_____"
],
[
"prediction_model.save(\"prediction_model.tf\")",
"_____no_output_____"
],
[
"prediction_model.save(\"prediction_model.h5\")",
"_____no_output_____"
],
[
"load_prediction_model = keras.models.load_model('prediction_model.h5')",
"_____no_output_____"
],
[
"load_1model = keras.models.load_model('my_model.h5')",
"_____no_output_____"
],
[
"model.save_weights('my_model_weights.h5')",
"_____no_output_____"
],
[
"json_string = model.to_json()\nopen('my_model_architecture.json', 'w').write(json_string)",
"_____no_output_____"
],
[
"from keras.models import Sequential,model_from_json\nmodel1 = model_from_json(open('my_model_architecture.json').read())\n\nmodel1.load_weights('my_model_weights.h5')",
"_____no_output_____"
],
[
"model.save('model.hdf5')",
"_____no_output_____"
],
[
"new_model.summary()",
"_____no_output_____"
],
[
"model_json = model.to_json()\nwith open(\"model_final.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\nmodel.save_weights(\"model_final.h5\")\n\nprint(\"Saved model to disk\")",
"_____no_output_____"
],
[
"son_file = open('model_final.json', 'r')\n# loaded_model_json = json_file.read()\n# json_file.close()\n# loaded_model = model_from_json(loaded_model_json)\n# load weights into new model\nloaded_modelxx = build_model()\n# loaded_modelxx.summary()\nloaded_modelxx.load_weights(\"model_final.h5\")\nprint(\"Loaded model from disk\")",
"_____no_output_____"
],
[
"model.save_weights(\"model_final.hdf5\")",
"_____no_output_____"
],
[
"model.save(\"m\")",
"_____no_output_____"
],
[
"prediction_model = keras.models.Model(\n model.get_layer(name=\"image\").input, model.get_layer(name=\"dense2\").output\n)\nprediction_model.summary()\n",
"_____no_output_____"
],
[
"prediction_model.save_weights(\"prediction_model.h5\")\nprediction_model.save_weights(\"prediction_model.hdf5\")",
"_____no_output_____"
],
[
"def decode_batch_predictions(pred):\n input_len = np.ones(pred.shape[0]) * pred.shape[1]\n # Use greedy search. For complex tasks, you can use beam search.\n results = keras.backend.ctc_decode(pred, input_length=input_len, greedy=True)[0][0][\n :, :max_len\n ]\n # Iterate over the results and get back the text.\n output_text = []\n for res in results:\n res = tf.gather(res, tf.where(tf.math.not_equal(res, -1)))\n res = tf.strings.reduce_join(num_to_char(res)).numpy().decode(\"utf-8\")\n output_text.append(res)\n return output_text\n\n\n# Let's check results on some test samples.\nfor batch in test_ds.take(1):\n batch_images = batch[\"image\"]\n _, ax = plt.subplots(4, 4, figsize=(15, 8))\n\n preds = prediction_model.predict(batch_images)\n pred_texts = decode_batch_predictions(preds)\n\n for i in range(16):\n img = batch_images[i]\n img = tf.image.flip_left_right(img)\n img = tf.transpose(img, perm=[1, 0, 2])\n img = (img * 255.).numpy().clip(0, 255).astype(np.uint8)\n img = img[:, :, 0]\n\n title = f\"Prediction: {pred_texts[i]}\"\n ax[i // 4, i % 4].imshow(img, cmap=\"gray\")\n ax[i // 4, i % 4].set_title(title)\n ax[i // 4, i % 4].axis(\"off\")\n\nplt.show()",
"_____no_output_____"
],
[
"ok_final = build_model()\n# loaded_modelxx.summary()\nok_final.load_weights(\"model_final.h5\")\nprint(\"Loaded model from disk\")\nok_final_prediction_model = keras.models.Model(\n ok_final.get_layer(name=\"image\").input, ok_final.get_layer(name=\"dense2\").output\n)\n\n\ndef decode_batch_predictions(pred):\n input_len = np.ones(pred.shape[0]) * pred.shape[1]\n # Use greedy search. For complex tasks, you can use beam search.\n results = keras.backend.ctc_decode(pred, input_length=input_len, greedy=True)[0][0][\n :, :max_len\n ]\n # Iterate over the results and get back the text.\n output_text = []\n for res in results:\n res = tf.gather(res, tf.where(tf.math.not_equal(res, -1)))\n res = tf.strings.reduce_join(num_to_char(res)).numpy().decode(\"utf-8\")\n output_text.append(res)\n return output_text\n\n\n# Let's check results on some test samples.\nfor batch in test_ds.take(1):\n batch_images = batch[\"image\"]\n _, ax = plt.subplots(4, 4, figsize=(15, 8))\n\n preds = ok_final_prediction_model.predict(batch_images)\n pred_texts = decode_batch_predictions(preds)\n\n for i in range(16):\n img = batch_images[i]\n img = tf.image.flip_left_right(img)\n img = tf.transpose(img, perm=[1, 0, 2])\n img = (img * 255.).numpy().clip(0, 255).astype(np.uint8)\n img = img[:, :, 0]\n\n title = f\"Prediction: {pred_texts[i]}\"\n ax[i // 4, i % 4].imshow(img, cmap=\"gray\")\n ax[i // 4, i % 4].set_title(title)\n ax[i // 4, i % 4].axis(\"off\")\n\nplt.show()",
"_____no_output_____"
],
[
"# modle.save_weigh",
"_____no_output_____"
],
[
"ok_final = build_model()\n# loaded_modelxx.summary()\nok_final.load_weights(\"model_final.h5\")\nprint(\"Loaded model from disk\")\nok_final_prediction_model = keras.models.Model(\n ok_final.get_layer(name=\"image\").input, ok_final.get_layer(name=\"dense2\").output\n)\n\n\ndef decode_batch_predictions(pred):\n input_len = np.ones(pred.shape[0]) * pred.shape[1]\n # Use greedy search. For complex tasks, you can use beam search.\n results = keras.backend.ctc_decode(pred, input_length=input_len, greedy=True)[0][0][\n :, :max_len\n ]\n # Iterate over the results and get back the text.\n output_text = []\n for res in results:\n res = tf.gather(res, tf.where(tf.math.not_equal(res, -1)))\n res = tf.strings.reduce_join(num_to_char(res)).numpy().decode(\"utf-8\")\n output_text.append(res)\n return output_text\n\n\n# Let's check results on some test samples.\nfor batch in test_ds.take(1):\n batch_images = batch[\"image\"]\n _, ax = plt.subplots(4, 4, figsize=(15, 8))\n\n preds = ok_final_prediction_model.predict(batch_images)\n pred_texts = decode_batch_predictions(preds)\n\n for i in range(16):\n img = batch_images[i]\n img = tf.image.flip_left_right(img)\n img = tf.transpose(img, perm=[1, 0, 2])\n img = (img * 255.).numpy().clip(0, 255).astype(np.uint8)\n img = img[:, :, 0]\n\n title = f\"Prediction: {pred_texts[i]}\"\n ax[i // 4, i % 4].imshow(img, cmap=\"gray\")\n ax[i // 4, i % 4].set_title(title)\n ax[i // 4, i % 4].axis(\"off\")\n\nplt.show()",
"_____no_output_____"
],
[
"model_json = model.to_json()\nwith open(\"model_last.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\nmodel.save_weights(\"model_last.h5\")\nprint(\"Saved model to disk\")",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec65e5a607df7a577c241f466b48bf84174cb91a | 4,797 | ipynb | Jupyter Notebook | f-denser/f-denser.ipynb | Md-Ferdous/fast-denser3 | 9c60144c5c68a26d933d7517a57ed03db9b95b6d | [
"Apache-2.0"
]
| 9 | 2019-11-19T12:18:15.000Z | 2021-04-21T17:15:37.000Z | f-denser/f-denser.ipynb | Md-Ferdous/fast-denser3 | 9c60144c5c68a26d933d7517a57ed03db9b95b6d | [
"Apache-2.0"
]
| 1 | 2022-02-09T23:35:00.000Z | 2022-02-09T23:35:00.000Z | f-denser/f-denser.ipynb | Md-Ferdous/fast-denser3 | 9c60144c5c68a26d933d7517a57ed03db9b95b6d | [
"Apache-2.0"
]
| 6 | 2019-11-19T12:21:00.000Z | 2022-03-23T21:58:06.000Z | 29.072727 | 143 | 0.536585 | [
[
[
"import fast_denser",
"Using TensorFlow backend.\n"
],
[
"fast_denser.search(0, 'fashion-mnist', '../example/config.json', '../example/cnn.grammar')",
"[0] Creating the initial population\n[0] Performing generation: 0\n[0] Best fitness of generation 0: 0.651143\n[0] Best overall fitness: 0.651143\n[0] Performing generation: 1\n[0] Best fitness of generation 1: 0.678286\n[0] Best overall fitness: 0.678286\n[0] Performing generation: 2\n[0] Best fitness of generation 2: 0.690571\n[0] Best overall fitness: 0.690571\n[0] Performing generation: 3\n[0] Best fitness of generation 3: 0.694000\n[0] Best overall fitness: 0.694000\n[0] Performing generation: 4\n[0] Best fitness of generation 4: 0.693714\n[0] Best overall fitness: 0.694000\n[0] Best test accuracy: 0.691300\n"
],
[
"from pickle import load\n\nwith open('experiments/run_0/evaluator.pkl', 'rb') as f_data:\n evaluator = load(f_data)\n x_test = evaluator.dataset['x_test']\n y_test = evaluator.dataset['y_test']",
"_____no_output_____"
],
[
"from keras.models import load_model\nfrom keras.preprocessing.image import ImageDataGenerator\n\nmodel = load_model('experiments/run_0/best.h5')\ndatagen_test = ImageDataGenerator(rescale=1/255.0)\n\nmodel.summary()",
"Model: \"model_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) (None, 32, 32, 3) 0 \n_________________________________________________________________\naverage_pooling2d_1 (Average (None, 10, 10, 3) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 4, 4, 209) 10241 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 4, 4, 209) 0 \n_________________________________________________________________\nflatten_1 (Flatten) (None, 3344) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 10) 33450 \n=================================================================\nTotal params: 43,691\nTrainable params: 43,691\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"import numpy as np\nfrom sklearn.metrics import accuracy_score\n\ny_pred_test = model.predict_generator(datagen_test.flow(x_test, batch_size=100, shuffle=False), steps=x_test.shape[0]//100, verbose=1)\ny_pred_labels = np.argmax(y_pred_test, axis=1)\n\naccuracy_score(y_test, y_pred_labels)",
"100/100 [==============================] - 0s 4ms/step\n"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code"
]
]
|
ec65efa7d40e10e629207e1095f30a1f48e3ca12 | 10,098 | ipynb | Jupyter Notebook | day2_1_dict.ipynb | YuelinS/bootcamp | 0136b4bb3f01449f61ed77040285a311bbeb2fda | [
"CC-BY-4.0",
"MIT"
]
| null | null | null | day2_1_dict.ipynb | YuelinS/bootcamp | 0136b4bb3f01449f61ed77040285a311bbeb2fda | [
"CC-BY-4.0",
"MIT"
]
| null | null | null | day2_1_dict.ipynb | YuelinS/bootcamp | 0136b4bb3f01449f61ed77040285a311bbeb2fda | [
"CC-BY-4.0",
"MIT"
]
| null | null | null | 21.0375 | 512 | 0.453357 | [
[
[
"my_dict={'a':6,\n 'b':7,\n 'c':27.6} # mixed types / key:thing itself & item\nmy_dict",
"_____no_output_____"
],
[
"my_dict['b']",
"_____no_output_____"
],
[
"# 2nd way to construct a dict\ndict(a=6,\n b=7,\n c=27.6)",
"_____no_output_____"
],
[
"# 3rd way to construct a dict, can be used after zip\ndict((('a',6),\n ('b',7),\n ('c',27.6)))\n",
"_____no_output_____"
],
[
"dict(1=6,\n b=7,\n c=27.6)",
"_____no_output_____"
],
[
"my_dict={4:6,\n 3:7,\n 'c':27.6} # mixed types / key:thing itself & item\nmy_dict",
"_____no_output_____"
],
[
"my_dict={(4,'a tuple'):6,\n 'b':7,\n 'c':27.6} # mixed types / key & item:thing itself, can be anything\nmy_dict",
"_____no_output_____"
],
[
"my_dict={[4,'a list']:6,\n 'b':7,\n 'c':27.6} # mixed types / key:thing itself & item\nmy_dict",
"_____no_output_____"
],
[
"### keys are not, but items are mutable\nmy_dict['b']='is this mutable?'\nmy_dict",
"_____no_output_____"
],
[
"# add key\nmy_dict['d']='adding'\nmy_dict",
"_____no_output_____"
],
[
"'b' in my_dict, 6 in my_dict",
"_____no_output_____"
],
[
"for key in my_dict:\n print(key)",
"(4, 'a tuple')\nb\nc\nd\n"
],
[
"for item in my_dict.items():\n print(key,item)",
"d ((4, 'a tuple'), 6)\nd ('b', 'is this mutable?')\nd ('c', 27.6)\nd ('d', 'adding')\n"
],
[
"for key,item in my_dict.items(): #iterator?\n #item='new item' #not working, why? maybe key and item is bounded\n my_dict[key]='new item'\nmy_dict",
"_____no_output_____"
],
[
"my_dict.values()",
"_____no_output_____"
],
[
"my_dict.keys()",
"_____no_output_____"
],
[
"my_dict.items()",
"_____no_output_____"
],
[
"# don't count on the ordre of a dict",
"_____no_output_____"
],
[
"def almost_right (a,b,c,report=False, greeting=None):\n '''triangle'''\n if greeting is not None:\n print(greeting)\n if abs(a**2+b**2-c**2)<1e-12:\n if report:\n print(\"It's right, baby!\")\n return True ## return also end the function defined, so no need to break\n if report: \n print(\"It's not right, but OK!\")\n return False",
"_____no_output_____"
],
[
"my_legs=(5,12,13)\nalmost_right (*my_legs) # wow, * breaks up a tuple automately",
"_____no_output_____"
],
[
"my_legs=(5,12,13)\nmy_legs_dict={'report':True,'greeting':'Good morning!'}\nalmost_right (*my_legs,**my_legs_dict)",
"Good morning!\nIt's right, baby!\n"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec65f0a681f16389aeccb9de504f47d562a8770a | 423,386 | ipynb | Jupyter Notebook | WeatherPy/WeatherPyMRD.ipynb | Davis1776/2020-01-python-api-challenge | 96519cb793829586f73329e6ec15dd8b84e7d805 | [
"ADSL"
]
| null | null | null | WeatherPy/WeatherPyMRD.ipynb | Davis1776/2020-01-python-api-challenge | 96519cb793829586f73329e6ec15dd8b84e7d805 | [
"ADSL"
]
| null | null | null | WeatherPy/WeatherPyMRD.ipynb | Davis1776/2020-01-python-api-challenge | 96519cb793829586f73329e6ec15dd8b84e7d805 | [
"ADSL"
]
| null | null | null | 268.987294 | 50,124 | 0.911931 | [
[
[
"# WeatherPy\n----\n\n#### Note\n* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.",
"_____no_output_____"
]
],
[
[
"# Dependencies and Setup\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport requests\nimport time\nfrom scipy.stats import linregress\n\n# Import API key\nfrom config import weather_api_key\n\n# Incorporated citipy to determine city based on latitude and longitude\nfrom citipy import citipy\n\n# Output File (CSV)\noutput_data_file = \"cities.csv\"\n\n# Range of latitudes and longitudes\nlat_range = (-90, 90)\nlng_range = (-180, 180)",
"_____no_output_____"
]
],
[
[
"## Generate Cities List",
"_____no_output_____"
]
],
[
[
"# List for holding lat_lngs and cities\nlat_lngs = []\ncities = []\n\n# Create a set of random lat and lng combinations\nlats = np.random.uniform(low=-90.000, high=90.000, size=1500)\nlngs = np.random.uniform(low=-180.000, high=180.000, size=1500)\nlat_lngs = zip(lats, lngs)\n\n# Identify nearest city for each lat, lng combination\nfor lat_lng in lat_lngs:\n city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name\n \n # If the city is unique, then add it to a our cities list\n if city not in cities:\n cities.append(city)\n\n# Print the city count to confirm sufficient count\nlen(cities)",
"_____no_output_____"
]
],
[
[
"### Perform API Calls\n* Perform a weather check on each city using a series of successive API calls.\n* Include a print log of each city as it'sbeing processed (with the city number and city name).\n",
"_____no_output_____"
]
],
[
[
"# url for Open Weather Map API\nurl = \"http://api.openweathermap.org/data/2.5/weather?\"\nparams = {\n \"appid\":weather_api_key,\n \"units\":\"imperial\" \n}\n\n# city container list\ncity_data = []",
"_____no_output_____"
],
[
"# Print Log of Each City\nprint(\"---------------------------------\")\nprint(\" \")\nprint(\" START: CITY WEATHER CHECK \")\nprint(\" \")\nprint(\"---------------------------------\")\n\n# counter list\nset_counter = 1\nrecord_counter = 1\n\nfor city in cities:\n params[\"q\"] = city\n response = requests.get(url, params=params)\n # when looping through, skip if city isn't found\n if response.ok == False:\n print(\" City not found. Skipping...\")\n time.sleep(1)\n pass\n else: \n response_json = response.json()\n weather_dict = {\n \"City\":response_json[\"name\"], \n \"Country\":response_json[\"sys\"][\"country\"],\n \"Date\":response_json[\"dt\"],\n \"Latitude\":response_json[\"coord\"][\"lat\"],\n \"Longitude\":response_json[\"coord\"][\"lon\"],\n \"Cloudiness\":response_json[\"clouds\"][\"all\"],\n \"Humidity\":response_json[\"main\"][\"humidity\"],\n \"Max Temp\":response_json[\"main\"][\"temp_max\"],\n \"Wind Speed\":response_json[\"wind\"][\"speed\"]\n }\n city_data.append(weather_dict)\n print(f\"Processing Record {record_counter} of Set {set_counter} | {weather_dict['City']}\")\n record_counter += 1\n\n #utilize the counter\n if record_counter % 50 == 0:\n set_counter += 1\n record_counter = 1\n\nprint(\"---------------------------------\")\nprint(\" \")\nprint(\" END: CITY WEATHER CHECK \")\nprint(\" \")\nprint(\"---------------------------------\")",
"---------------------------------\n \n START: CITY WEATHER CHECK \n \n---------------------------------\nProcessing Record 1 of Set 1 | Wagar\nProcessing Record 2 of Set 1 | Busselton\n City not found. Skipping...\n City not found. Skipping...\nProcessing Record 3 of Set 1 | Russell\nProcessing Record 4 of Set 1 | Kathu\nProcessing Record 5 of Set 1 | Palu\n City not found. Skipping...\nProcessing Record 6 of Set 1 | Mar del Plata\nProcessing Record 7 of Set 1 | Vaini\nProcessing Record 8 of Set 1 | Jamestown\nProcessing Record 9 of Set 1 | Burns Lake\nProcessing Record 10 of Set 1 | Butaritari\nProcessing Record 11 of Set 1 | George Town\nProcessing Record 12 of Set 1 | High Level\nProcessing Record 13 of Set 1 | Cape Town\nProcessing Record 14 of Set 1 | Mooresville\nProcessing Record 15 of Set 1 | Vyshkov\nProcessing Record 16 of Set 1 | Hermanus\nProcessing Record 17 of Set 1 | Yar-Sale\nProcessing Record 18 of Set 1 | Hobart\nProcessing Record 19 of Set 1 | Khatanga\nProcessing Record 20 of Set 1 | Valladolid\n City not found. Skipping...\nProcessing Record 21 of Set 1 | Loreto\nProcessing Record 22 of Set 1 | Shimoda\nProcessing Record 23 of Set 1 | Bilibino\nProcessing Record 24 of Set 1 | Thompson\nProcessing Record 25 of Set 1 | Bandarbeyla\nProcessing Record 26 of Set 1 | Tavda\nProcessing Record 27 of Set 1 | Payson\nProcessing Record 28 of Set 1 | Husavik\nProcessing Record 29 of Set 1 | Dunedin\nProcessing Record 30 of Set 1 | Faanui\nProcessing Record 31 of Set 1 | Arraial do Cabo\nProcessing Record 32 of Set 1 | Udachny\nProcessing Record 33 of Set 1 | Mataura\nProcessing Record 34 of Set 1 | Severo-Kuril'sk\nProcessing Record 35 of Set 1 | Dikson\nProcessing Record 36 of Set 1 | Ulaanbaatar\nProcessing Record 37 of Set 1 | Castro\nProcessing Record 38 of Set 1 | Kirovsk\nProcessing Record 39 of Set 1 | Trebon\nProcessing Record 40 of Set 1 | Rikitea\nProcessing Record 41 of Set 1 | Aykhal\nProcessing Record 42 of Set 1 | Pereira Barreto\nProcessing Record 43 of Set 1 | Morant Bay\nProcessing Record 44 of Set 1 | Tuktoyaktuk\nProcessing Record 45 of Set 1 | Upernavik\nProcessing Record 46 of Set 1 | Lae\nProcessing Record 47 of Set 1 | Cam Ranh\nProcessing Record 48 of Set 1 | Matara\nProcessing Record 49 of Set 1 | Hithadhoo\nProcessing Record 1 of Set 2 | New Norfolk\nProcessing Record 2 of Set 2 | Port Hedland\nProcessing Record 3 of Set 2 | Pathalgaon\nProcessing Record 4 of Set 2 | Kapaa\n City not found. Skipping...\nProcessing Record 5 of Set 2 | Maningrida\nProcessing Record 6 of Set 2 | Bluff\nProcessing Record 7 of Set 2 | Padang\nProcessing Record 8 of Set 2 | George\nProcessing Record 9 of Set 2 | Grenville\nProcessing Record 10 of Set 2 | Tongren\nProcessing Record 11 of Set 2 | Oranjestad\nProcessing Record 12 of Set 2 | Atuona\nProcessing Record 13 of Set 2 | Turka\nProcessing Record 14 of Set 2 | Yellowknife\nProcessing Record 15 of Set 2 | Changde\nProcessing Record 16 of Set 2 | Punta Arenas\nProcessing Record 17 of Set 2 | Caravelas\nProcessing Record 18 of Set 2 | Bara\nProcessing Record 19 of Set 2 | Saint-Pierre\nProcessing Record 20 of Set 2 | Katsuura\nProcessing Record 21 of Set 2 | Kaitangata\nProcessing Record 22 of Set 2 | Leningradskiy\n City not found. Skipping...\nProcessing Record 23 of Set 2 | Saskylakh\nProcessing Record 24 of Set 2 | Paragominas\nProcessing Record 25 of Set 2 | Cidreira\nProcessing Record 26 of Set 2 | Srandakan\n City not found. Skipping...\nProcessing Record 27 of Set 2 | Toamasina\nProcessing Record 28 of Set 2 | Port Augusta\nProcessing Record 29 of Set 2 | Nampula\nProcessing Record 30 of Set 2 | Nanortalik\nProcessing Record 31 of Set 2 | Damghan\nProcessing Record 32 of Set 2 | Vila Franca do Campo\nProcessing Record 33 of Set 2 | Avarua\nProcessing Record 34 of Set 2 | Barrow\nProcessing Record 35 of Set 2 | Hofn\nProcessing Record 36 of Set 2 | Okha\nProcessing Record 37 of Set 2 | Puerto Ayora\nProcessing Record 38 of Set 2 | Berlevag\nProcessing Record 39 of Set 2 | Acarau\nProcessing Record 40 of Set 2 | Healesville\nProcessing Record 41 of Set 2 | Cherskiy\nProcessing Record 42 of Set 2 | Saint-Philippe\nProcessing Record 43 of Set 2 | East London\nProcessing Record 44 of Set 2 | Ushuaia\nProcessing Record 45 of Set 2 | Port Hardy\nProcessing Record 46 of Set 2 | Pinawa\nProcessing Record 47 of Set 2 | Skovorodino\nProcessing Record 48 of Set 2 | Bubaque\nProcessing Record 49 of Set 2 | Hearst\nProcessing Record 1 of Set 3 | Kavieng\nProcessing Record 2 of Set 3 | Portsmouth\nProcessing Record 3 of Set 3 | San Patricio\nProcessing Record 4 of Set 3 | Rio Gallegos\nProcessing Record 5 of Set 3 | Shieli\nProcessing Record 6 of Set 3 | Klyuchi\nProcessing Record 7 of Set 3 | Kochevo\nProcessing Record 8 of Set 3 | Grand Gaube\nProcessing Record 9 of Set 3 | Pevek\nProcessing Record 10 of Set 3 | Tiksi\nProcessing Record 11 of Set 3 | Ossora\nProcessing Record 12 of Set 3 | Ponta do Sol\n City not found. Skipping...\nProcessing Record 13 of Set 3 | Klaksvik\n City not found. Skipping...\n City not found. Skipping...\nProcessing Record 14 of Set 3 | Vestmannaeyjar\nProcessing Record 15 of Set 3 | Nyurba\nProcessing Record 16 of Set 3 | Albany\nProcessing Record 17 of Set 3 | Trinidad\nProcessing Record 18 of Set 3 | Nouadhibou\nProcessing Record 19 of Set 3 | Codrington\nProcessing Record 20 of Set 3 | Arrondissement de Vierzon\nProcessing Record 21 of Set 3 | Richards Bay\nProcessing Record 22 of Set 3 | Ebnat-Kappel\nProcessing Record 23 of Set 3 | Port Macquarie\nProcessing Record 24 of Set 3 | Vuktyl\nProcessing Record 25 of Set 3 | Isfana\nProcessing Record 26 of Set 3 | Along\nProcessing Record 27 of Set 3 | Ranong\nProcessing Record 28 of Set 3 | Margate\nProcessing Record 29 of Set 3 | Souillac\nProcessing Record 30 of Set 3 | Longyearbyen\nProcessing Record 31 of Set 3 | Lichuan\nProcessing Record 32 of Set 3 | Sai Buri\nProcessing Record 33 of Set 3 | Estevan\nProcessing Record 34 of Set 3 | Aubenas\nProcessing Record 35 of Set 3 | Kruisfontein\nProcessing Record 36 of Set 3 | Bredasdorp\n City not found. Skipping...\n City not found. Skipping...\nProcessing Record 37 of Set 3 | Zunheboto\nProcessing Record 38 of Set 3 | Bariloche\nProcessing Record 39 of Set 3 | Auki\nProcessing Record 40 of Set 3 | Olafsvik\nProcessing Record 41 of Set 3 | Mahebourg\nProcessing Record 42 of Set 3 | Camabatela\nProcessing Record 43 of Set 3 | Hasaki\nProcessing Record 44 of Set 3 | Ribeira Grande\n City not found. Skipping...\nProcessing Record 45 of Set 3 | Talavera de la Reina\nProcessing Record 46 of Set 3 | Sao Joao da Barra\nProcessing Record 47 of Set 3 | Nizwa\nProcessing Record 48 of Set 3 | Tasiilaq\nProcessing Record 49 of Set 3 | Torbay\n City not found. Skipping...\nProcessing Record 1 of Set 4 | Port Alfred\n City not found. Skipping...\nProcessing Record 2 of Set 4 | Albury\nProcessing Record 3 of Set 4 | Beringovskiy\nProcessing Record 4 of Set 4 | Chippewa Falls\nProcessing Record 5 of Set 4 | Donskoye\nProcessing Record 6 of Set 4 | Pacific Grove\nProcessing Record 7 of Set 4 | Kang\nProcessing Record 8 of Set 4 | Karratha\nProcessing Record 9 of Set 4 | Pontecorvo\nProcessing Record 10 of Set 4 | Bethel\nProcessing Record 11 of Set 4 | Aksu\nProcessing Record 12 of Set 4 | Emba\nProcessing Record 13 of Set 4 | Kodiak\nProcessing Record 14 of Set 4 | Aden\nProcessing Record 15 of Set 4 | Ringkobing\nProcessing Record 16 of Set 4 | Maltahohe\nProcessing Record 17 of Set 4 | Pierre\nProcessing Record 18 of Set 4 | Mount Gambier\nProcessing Record 19 of Set 4 | High Prairie\nProcessing Record 20 of Set 4 | Gravdal\nProcessing Record 21 of Set 4 | Angoche\nProcessing Record 22 of Set 4 | Ahipara\nProcessing Record 23 of Set 4 | Sinnamary\nProcessing Record 24 of Set 4 | Veraval\nProcessing Record 25 of Set 4 | Lebu\nProcessing Record 26 of Set 4 | Moron\nProcessing Record 27 of Set 4 | Kakonko\nProcessing Record 28 of Set 4 | Aasiaat\nProcessing Record 29 of Set 4 | Port Elizabeth\nProcessing Record 30 of Set 4 | Port Lincoln\nProcessing Record 31 of Set 4 | Narsaq\nProcessing Record 32 of Set 4 | Carnarvon\nProcessing Record 33 of Set 4 | Ginda\nProcessing Record 34 of Set 4 | Corinth\nProcessing Record 35 of Set 4 | Suamico\nProcessing Record 36 of Set 4 | Kumeny\nProcessing Record 37 of Set 4 | Bereda\nProcessing Record 38 of Set 4 | Guerrero Negro\n City not found. Skipping...\n"
]
],
[
[
"### Convert Raw Data to DataFrame\n* Export the city data into a .csv.\n* Display the DataFrame",
"_____no_output_____"
]
],
[
[
"# Dataframe\ncity_pd = pd.DataFrame(city_data)\n\nlat = city_pd[\"Latitude\"]\nmaxtemp = city_pd[\"Max Temp\"]\nhum = city_pd[\"Humidity\"]\nwinds = city_pd[\"Wind Speed\"]\nclouds = city_pd[\"Cloudiness\"]\n\n# Display Dataframe\ncity_pd.to_csv(output_data_file, index_label=\"City_ID\")\ncity_pd.head()",
"_____no_output_____"
]
],
[
[
"### Plotting the Data\n* Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.\n* Save the plotted figures as .pngs.",
"_____no_output_____"
],
[
"#### Latitude vs. Temperature Plot",
"_____no_output_____"
]
],
[
[
"plt.scatter(lat,\n maxtemp,\n marker=\"d\",\n facecolor=\"dodgerblue\",\n edgecolor=\"slategrey\",\n s= 25,\n alpha=0.99)\nplt.title(\"City Latitude vs. Max Temperature\", fontsize=15);\nplt.xlabel(\"Latitude\", fontsize=13);\nplt.ylabel(\"Max Temperature (F)\", fontsize=13);\nplt.grid();\nplt.xlim(-90, 90);\nplt.ylim(-40, 100);\nplt.figsize=(20,20);\nplt.savefig(\"Plot_Latitude_v_Temp.png\");\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### Latitude vs. Humidity Plot",
"_____no_output_____"
]
],
[
[
"plt.scatter(lat,\n hum,\n marker=\"d\",\n facecolor=\"dodgerblue\",\n edgecolor=\"slategrey\",\n s= 25,\n alpha=0.99)\nplt.title(\"City Latitude vs. Humidity\", fontsize=15);\nplt.xlabel(\"Latitude\", fontsize=13);\nplt.ylabel(\"Humidity (%)\", fontsize=13);\nplt.grid();\nplt.xlim(-90, 90);\nplt.ylim(-0, 105);\nplt.figsize=(20,20);\nplt.savefig(\"Plot_Latitude_v_Humidity.png\");\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### Latitude vs. Cloudiness Plot",
"_____no_output_____"
]
],
[
[
"plt.scatter(lat,\n clouds,\n marker=\"d\",\n facecolor=\"dodgerblue\",\n edgecolor=\"slategrey\",\n s= 25, \n alpha=0.99)\nplt.title(\"City Latitude vs. Cloudiness\", fontsize=15);\nplt.xlabel(\"Latitude\",fontsize=13);\nplt.ylabel(\"Cloudinesss (%)\",fontsize=13);\nplt.grid();\nplt.xlim(-90, 90);\nplt.ylim(-5, 105);\nplt.figsize=(20,20);\nplt.savefig(\"Plot_Latitude_v_Cloudiness.png\");\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### Latitude vs. Wind Speed Plot",
"_____no_output_____"
]
],
[
[
"plt.scatter(lat,\n winds,\n marker=\"d\",\n facecolor=\"dodgerblue\",\n edgecolor=\"slategrey\",\n s= 25, \n alpha=0.99)\nplt.title(\"City Latitude vs. Wind Speed\", fontsize=15);\nplt.xlabel(\"Latitude\", fontsize=13);\nplt.ylabel(\"Wind Speed (MPH)\", fontsize=13);\nplt.grid();\nplt.xlim(-90, 90);\nplt.ylim(-5, 75);\nplt.figsize=(20,20);\nplt.savefig(\"Plot_Latitude_v_WindSpeed.png\");\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Linear Regression",
"_____no_output_____"
]
],
[
[
"# OPTIONAL: Create a function to create Linear Regression plots\ndef linear_reg_plot(x_values,y_values):\n (slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)\n regress_values = x_values * slope + intercept\n line_eq = \"y = \" + str(round(slope,2)) + \"x + \" + str(round(intercept,2))\n plt.scatter(x_values,y_values)\n plt.plot(x_values,regress_values,\"r-\")\n plt.annotate(line_eq,(x_values.median(),y_values.median()),fontsize=15,color=\"red\")\n plt.xlabel(\"Latitude\")\n print(f\"The r-squared is: {rvalue}\")\n plt.show()",
"_____no_output_____"
],
[
"# Create Northern and Southern Hemisphere DataFrames\nNorthern_hemisphere = city_pd.loc[city_pd[\"Latitude\"]>0]\nSouthern_hemisphere = city_pd.loc[city_pd[\"Latitude\"]<0]",
"_____no_output_____"
]
],
[
[
"#### Northern Hemisphere - Max Temp vs. Latitude Linear Regression",
"_____no_output_____"
]
],
[
[
"x_values = Northern_hemisphere[\"Latitude\"]\ny_values = Northern_hemisphere[\"Max Temp\"]\n\n(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)\nregress_values = x_values * slope + intercept\nline_eq = \"y = \" + str(round(slope,2)) + \"x + \" + str(round(intercept,2))\n\nplt.scatter(x_values,y_values,\n marker=\"d\",\n facecolors=\"darkorchid\",\n edgecolors=\"darkorchid\",\n s=25,\n alpha=0.99)\nplt.plot(x_values,regress_values,\"seagreen\")\n\nplt.xlim(-5, 90);\nplt.ylim(-45, 100);\n\nplt.annotate(line_eq,(0,-20),fontsize=15,color=\"grey\")\n\nplt.title(\"Northern Hemisphere - Max Temp vs. Latitude\", fontsize=15, color=\"firebrick\")\nplt.ylabel(\"Max Temperature (F)\", fontsize=13)\nplt.xlabel(\"Latitude\", fontsize=13)\n\nplt.savefig(\"N_Hemisphere_Max_Temp_v_Latitude.png\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### Southern Hemisphere - Max Temp vs. Latitude Linear Regression",
"_____no_output_____"
]
],
[
[
"plt.style.use('fivethirtyeight')\nx_values = Southern_hemisphere[\"Latitude\"]\ny_values = Southern_hemisphere[\"Max Temp\"]\nplt.ylabel(\"Max Temp (F)\", color=\"black\")\nplt.show(linear_reg_plot(x_values,y_values))",
"The r-squared is: 0.49961187693877596\n"
]
],
[
[
"#### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression",
"_____no_output_____"
]
],
[
[
"x_values = Northern_hemisphere[\"Latitude\"]\ny_values = Northern_hemisphere[\"Humidity\"]\nplt.ylabel(\"Humidity (%)\")\nplt.show(linear_reg_plot(x_values,y_values))",
"The r-squared is: 0.35261120411107827\n"
]
],
[
[
"#### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression",
"_____no_output_____"
]
],
[
[
"x_values = Southern_hemisphere[\"Latitude\"]\ny_values = Southern_hemisphere[\"Humidity\"]\nplt.ylim(0, y_values.max()+100)\nplt.ylabel(\"Humidity (%)\")\nplt.show(linear_reg_plot(x_values,y_values))",
"The r-squared is: 0.41781289823022977\n"
]
],
[
[
"#### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression",
"_____no_output_____"
]
],
[
[
"x_values = Northern_hemisphere[\"Latitude\"]\ny_values = Northern_hemisphere[\"Cloudiness\"]\nplt.ylabel(\"Cloudiness\")\nplt.show(linear_reg_plot(x_values,y_values))",
"The r-squared is: 0.32929410771643386\n"
]
],
[
[
"#### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression",
"_____no_output_____"
]
],
[
[
"x_values = Southern_hemisphere[\"Latitude\"]\ny_values = Southern_hemisphere[\"Cloudiness\"]\nplt.ylabel(\"Cloudiness\")\nplt.show(linear_reg_plot(x_values,y_values))",
"The r-squared is: 0.19887907495630727\n"
]
],
[
[
"#### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression",
"_____no_output_____"
]
],
[
[
"x_values = Northern_hemisphere[\"Latitude\"]\ny_values = Northern_hemisphere[\"Wind Speed\"]\nplt.ylabel(\"Wind Speed\")\nplt.show(linear_reg_plot(x_values,y_values))",
"The r-squared is: 0.030478722949290743\n"
]
],
[
[
"#### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression",
"_____no_output_____"
]
],
[
[
"plt.style.use('Solarize_Light2')\nx_values = Southern_hemisphere[\"Latitude\"]\ny_values = Southern_hemisphere[\"Wind Speed\"]\nplt.ylabel(\"Wind Speed\")\nplt.show(linear_reg_plot(x_values,y_values))",
"The r-squared is: -0.19965184984196033\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec65f747f68155e182bd9060c1b3a13d8ff44a2b | 61,359 | ipynb | Jupyter Notebook | optuna_vis.ipynb | German-NLP-Group/xlsr | 2f0c4ced0c18b3f69d478c9a320afb1ab9f04fff | [
"BSD-2-Clause"
]
| 9 | 2021-04-05T05:44:56.000Z | 2021-12-28T09:08:57.000Z | optuna_vis.ipynb | German-NLP-Group/xlsr | 2f0c4ced0c18b3f69d478c9a320afb1ab9f04fff | [
"BSD-2-Clause"
]
| null | null | null | optuna_vis.ipynb | German-NLP-Group/xlsr | 2f0c4ced0c18b3f69d478c9a320afb1ab9f04fff | [
"BSD-2-Clause"
]
| null | null | null | 61.114542 | 1,296 | 0.384393 | [
[
[
"import optuna\nimport pandas as pd\n\npd.set_option('display.max_rows', None)\npd.set_option('display.max_columns', None)\npd.set_option('display.max_colwidth', None)",
"_____no_output_____"
],
[
"# the target is 0.8662709406419864\n# base model: # de 0.8078971446757308 en 0.8349525108990091 cross 0.798345566174531\n\nstudy_name = \"xlsr_de_en_cross_stsb_08\"\n#study_name = \"xlsr_de_en_cross_stsb_08_do_01\" # both dropouts\n#study_name = \"xlsr_de_en_cross_stsb_08_do_02\" # hidden_dropout_prob only\n#study_name = \"xlsr_de_en_cross_stsb_08_xnli_02\" # add xnli\n\nstorage='sqlite:///optuna.db'",
"_____no_output_____"
],
[
"study = optuna.create_study(study_name=study_name, \n storage=storage,\n load_if_exists=True, direction='maximize')\n\ndf = study.trials_dataframe()\n\ndf_gt_0 = df[df['value'] > 0.0]\nprint('trials with value > 0.0:', len(df_gt_0))\n\ndf.sort_values('value', ascending=False, na_position='first')\n",
"\u001b[32m[I 2021-04-05 08:26:13,354]\u001b[0m Using an existing study with name 'xlsr_de_en_cross_stsb_08' instead of creating a new one.\u001b[0m\n"
],
[
"best_trial = study.best_trial\nprint(best_trial)\nprint('##############')\nprint(best_trial.params)",
"FrozenTrial(number=23, values=[0.8658006814494316], datetime_start=datetime.datetime(2021, 3, 31, 19, 33, 57, 112480), datetime_complete=datetime.datetime(2021, 3, 31, 22, 0, 14, 232114), params={'eps': 1.1684086303875153e-07, 'lr': 1.6422738926110895e-05, 'num_epochs': 3, 'train_batch_size': 27, 'warmup_steps_mul': 0.3956450975375144, 'weight_decay': 0.004635304680893249}, distributions={'eps': UniformDistribution(high=1e-05, low=1e-07), 'lr': UniformDistribution(high=0.0002, low=2e-06), 'num_epochs': IntUniformDistribution(high=3, low=1, step=1), 'train_batch_size': IntUniformDistribution(high=50, low=4, step=1), 'warmup_steps_mul': UniformDistribution(high=0.5, low=0.1), 'weight_decay': UniformDistribution(high=0.1, low=0.001)}, user_attrs={'results': '[0.8711774249675952, 0.8697599686435442, 0.8534852561836358, 0.877517183780333, 0.8696654710533329, 0.8599504506305392, 0.846673763648894, 0.872195151066224, 0.8692012549791207, 0.8683808895410967]'}, system_attrs={}, intermediate_values={0: 0.8711774249675952, 1: 0.8697599686435442, 2: 0.8534852561836358, 3: 0.877517183780333, 4: 0.8696654710533329, 5: 0.8599504506305392, 6: 0.846673763648894, 7: 0.872195151066224, 8: 0.8692012549791207, 9: 0.8683808895410967}, trial_id=406, state=TrialState.COMPLETE, value=None)\n##############\n{'eps': 1.1684086303875153e-07, 'lr': 1.6422738926110895e-05, 'num_epochs': 3, 'train_batch_size': 27, 'warmup_steps_mul': 0.3956450975375144, 'weight_decay': 0.004635304680893249}\n"
],
[
"trials = [t for t in study.get_trials() if t.value is not None]\ntrials = sorted(trials, key=lambda trial: trial.value, reverse=True)\ntrials = trials[:5]\nfor t in trials:\n print(t.params)",
"{'eps': 1.1684086303875153e-07, 'lr': 1.6422738926110895e-05, 'num_epochs': 3, 'train_batch_size': 27, 'warmup_steps_mul': 0.3956450975375144, 'weight_decay': 0.004635304680893249}\n{'eps': 7.447165206861166e-07, 'lr': 2.5204500922230505e-05, 'num_epochs': 2, 'train_batch_size': 21, 'warmup_steps_mul': 0.3974489556555445, 'weight_decay': 0.008587621185204689}\n{'eps': 4.462251033010287e-06, 'lr': 1.026343323298136e-05, 'num_epochs': 2, 'train_batch_size': 8, 'warmup_steps_mul': 0.1609010732760181, 'weight_decay': 0.04794438776350409}\n{'eps': 3.177318344118139e-07, 'lr': 1.8705145169605904e-05, 'num_epochs': 3, 'train_batch_size': 32, 'warmup_steps_mul': 0.3033765868008159, 'weight_decay': 0.01438045647847093}\n{'eps': 1.83682031546129e-06, 'lr': 2.4837128093399775e-05, 'num_epochs': 2, 'train_batch_size': 8, 'warmup_steps_mul': 0.2604129072434368, 'weight_decay': 0.06789899582936298}\n"
],
[
"fig = optuna.visualization.plot_slice(study)\n#fig.write_image(f\"./plots/{study_name}.png\") # save to image\nfig.write_html(f\"./plots/{study_name}_slice.html\") # save to html",
"_____no_output_____"
],
[
"fig = optuna.visualization.plot_param_importances(study)\n#fig.write_image(f\"./plots/{study_name}.png\") # save to image\nfig.write_html(f\"./plots/{study_name}_param_importances.html\") # save to html",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec66042d22c823425a3a54e7d0c5a30005bbd78d | 32,264 | ipynb | Jupyter Notebook | ch5_pandas_data_connect_final.ipynb | jaeyoon89/doit_pandas_study | e39280ffe0aafeccd8fd1025d56ecf9407d6dc59 | [
"MIT"
]
| null | null | null | ch5_pandas_data_connect_final.ipynb | jaeyoon89/doit_pandas_study | e39280ffe0aafeccd8fd1025d56ecf9407d6dc59 | [
"MIT"
]
| null | null | null | ch5_pandas_data_connect_final.ipynb | jaeyoon89/doit_pandas_study | e39280ffe0aafeccd8fd1025d56ecf9407d6dc59 | [
"MIT"
]
| null | null | null | 25.8112 | 273 | 0.465875 | [
[
[
"# 데이터 연결하기\n\n## -분석하기 좋은 데이터\n\n- 분석하기 좋은 데이터란??\n\n분석하기 좋은 데이터란 데이터 집합을 분석하기 아주 좋은 상태로 만들어 놓은 것을 말한다. 데이터 분석 단계에서 데이터 정리는 아주 중요하다. \n\n",
"_____no_output_____"
],
[
"- 깔끔한 데이터의 조건\n\n- 1) 데이터 분석 목적에 맞는 데이터를 모아 새로운 표를 만들어야 한다.\n- 2) 측정한 값은 행(row)을 구성해야 한다.\n- 3) 변수는 열(column)로 구성해야 한다.\n\n\n- 깔끔한 데이터는 데이터 연결부터!!\n\n예를 들어 주식 데이터를 분석하는 과정에서 '기업 정보' 가 있는 데이터 집합과 '주식 가격'이 있는 데이터 집합이 있을 때 '첨단 산업 기업의 주식 가격에 대한 데이터'를 보려면 어떻게 해야 할까? 일단 '기업 정보' 에서 첨단 기술을 가진 기업을 찾고, 이 기업들의 '주식 가격'을 찾아야 한다. 그럼 다음 찾아낸 2개의 데이터를 연결하면 된다. 이렇게 데이터 집합은 연관성이 깊은 값끼리 모여 있기 때문에 데이터 연결을 통해 필요한 데이터를 만드는 과정이 반드시 필요하다.",
"_____no_output_____"
],
[
"## -데이터 연결 기초",
"_____no_output_____"
],
[
"1) concat 메서드로 데이터 연결하기\n\n데이터를 연결하려면 concat 메서드를 사용하면 된다. 다음 예제를 통해 concat 메서드의 사용법을 익혀보자.",
"_____no_output_____"
]
],
[
[
"import pandas",
"_____no_output_____"
],
[
"import pandas as pd",
"_____no_output_____"
],
[
"df1 = pd.read_csv('C:/Users/이재윤/Downloads/doit_pandas-master/doit_pandas-master/data/concat_1.csv')\ndf2 = pd.read_csv('C:/Users/이재윤/Downloads/doit_pandas-master/doit_pandas-master/data/concat_2.csv')\ndf3 = pd.read_csv('C:/Users/이재윤/Downloads/doit_pandas-master/doit_pandas-master/data/concat_3.csv')",
"_____no_output_____"
]
],
[
[
"2) concat 메서드에 연결하려는 데이터프레임을 리스트에 담아 전달하면 연결한 데이터 프레임을 반환한다. concat 메서드는 데이터프레임을 연결할 때 위에서 아래 방향으로 연결합니다. 그리고 df1,2,3 은 열의 이름이 모두 A,B,C,D로 같다 그래서 데이터 프레임을 연결한 다음에도 열이 그대로 유지된다.",
"_____no_output_____"
]
],
[
[
"row_concat = pd.concat([df1,df2,df3])\nprint(row_concat)",
" A B C D\n0 a0 b0 c0 d0\n1 a1 b1 c1 d1\n2 a2 b2 c2 d2\n3 a3 b3 c3 d3\n0 a4 b4 c4 d4\n1 a5 b5 c5 d5\n2 a6 b6 c6 d6\n3 a7 b7 c7 d7\n0 a8 b8 c8 d8\n1 a9 b9 c9 d9\n2 a10 b10 c10 d10\n3 a11 b11 c11 d11\n"
]
],
[
[
"3) 연결한 데이터 프레임에서 행 데이터를 추출해 보자. concat 메서드는 전달받은 리스트의 요소 순서대로 데이터를 연결합니다.\nconcat 메서드는 전달받은 리스트의 요소의 순서대로 데이터를 연결한다. 그래서 기존 데이터프레임에 있던 인덱스도 그대로 유지된다.\n다음은 데이터프레임에서 네 번째 행을 추출한 것이다.",
"_____no_output_____"
]
],
[
[
"print(row_concat.iloc[3,])",
"A a3\nB b3\nC c3\nD d3\nName: 3, dtype: object\n"
]
],
[
[
"4) 데이터프레임에 시리즈 연결하기\n\n이번엔 데이터 프레임에 시리즈를 추가해 보자. 먼저 리스트를 시리즈로 변환한다.",
"_____no_output_____"
]
],
[
[
"new_row_series = pd.Series(['n1','n2','n3','n4'])",
"_____no_output_____"
]
],
[
[
"5) concat 메서드로 데이터프레임과 시리즈를 연결해 보자. 시리즈가 새로운 행으로 추가되는 것이아니고 새로운 열로 추가된다.\n그래서 NaN 이라는 값도 많이 생겼다. 이 NaN을 누락값이라고 부르겠다.",
"_____no_output_____"
]
],
[
[
"print(pd.concat([df1, new_row_series]))",
" A B C D 0\n0 a0 b0 c0 d0 NaN\n1 a1 b1 c1 d1 NaN\n2 a2 b2 c2 d2 NaN\n3 a3 b3 c3 d3 NaN\n0 NaN NaN NaN NaN n1\n1 NaN NaN NaN NaN n2\n2 NaN NaN NaN NaN n3\n3 NaN NaN NaN NaN n4\n"
]
],
[
[
"### -행이 1개라도 반드시 데이터프레임에 담아 연결해야한다.\n\n시리즈를 데이터프레임에 새로운 행으로 연결하려고 하면 제대로 되지 않는다. 그 이유는 시리즈에 열 이름이 없기 때문이다.\n그래서 시리즈를 새로운 열로 간주하여 0이라는 이름의 열로 추가한 것이다.\n\n- 행 1개로 구성된 데이터프레임 생성하여 연결하기\n\n1) 시리즈는 행이 1개인 데이터프레임이라고 생각해도 된다. 다음은 1개의 행을 가지는 데이터프레임을 생성하여 df1에 연결한 것이다.",
"_____no_output_____"
]
],
[
[
"new_row_df = pd.DataFrame([['n1','n2','n3','n4']], columns=['A','B','C','D'])\nprint(new_row_df)",
" A B C D\n0 n1 n2 n3 n4\n"
],
[
"print(pd.concat([df1, new_row_df]))",
" A B C D\n0 a0 b0 c0 d0\n1 a1 b1 c1 d1\n2 a2 b2 c2 d2\n3 a3 b3 c3 d3\n0 n1 n2 n3 n4\n"
]
],
[
[
"2) concat 메서드는 한 번에 2개 이상의 데이터프레임을 연결할 수 있는 메서드이다.만약 연결할 데이터프레임이 1개라면 append 메서드를 사용해도 된다.",
"_____no_output_____"
]
],
[
[
"print(df1.append(new_row_df))",
" A B C D\n0 a0 b0 c0 d0\n1 a1 b1 c1 d1\n2 a2 b2 c2 d2\n3 a3 b3 c3 d3\n0 n1 n2 n3 n4\n"
]
],
[
[
"3) append 메서드와 딕셔너리를 사용하면 더욱 간편하게 행을 연결할 수 있다. 이때 ignore_index를 True로 설정하면 데이터를 연결한 다음 데이터프레임의 인덱스를 0부터 다시 지정한다.",
"_____no_output_____"
]
],
[
[
"data_dict = {'A' : 'n1', 'B' : 'n2', 'C' : 'n3', 'D' : 'n4' }\nprint(df1.append(data_dict, ignore_index=True))",
" A B C D\n0 a0 b0 c0 d0\n1 a1 b1 c1 d1\n2 a2 b2 c2 d2\n3 a3 b3 c3 d3\n4 n1 n2 n3 n4\n"
]
],
[
[
"### -다양한 방법으로 데이터 연결하기\n\n판다스는 데이터를 연결하는 다양한 방법을 제공한다. 다음 예제를 살펴보자.\n\n- 다양한 방법으로 데이터 연결하기\n\n1) ignore_index 인자 사용하기\n\n바로 앞에서 실습한 ignore_index 를 True로 지정하면 데이터를 연결한 다음 데이터프레임의 인덱스를 0부터 다시 지정한다.",
"_____no_output_____"
]
],
[
[
"row_concat_i = pd.concat([df1, df2, df3], ignore_index=True)\nprint(row_concat_i)",
" A B C D\n0 a0 b0 c0 d0\n1 a1 b1 c1 d1\n2 a2 b2 c2 d2\n3 a3 b3 c3 d3\n4 a4 b4 c4 d4\n5 a5 b5 c5 d5\n6 a6 b6 c6 d6\n7 a7 b7 c7 d7\n8 a8 b8 c8 d8\n9 a9 b9 c9 d9\n10 a10 b10 c10 d10\n11 a11 b11 c11 d11\n"
]
],
[
[
"2) 열 방향으로 데이터 연결하기\n\n만약 행 방향이 아니라 열 방향으로 데이터를 연결하려면 concat 메서드의 axis 인자를 1로 지정하면 된다. ",
"_____no_output_____"
]
],
[
[
"col_concat = pd.concat([df1, df2, df3], axis=1)\nprint(col_concat)",
" A B C D A B C D A B C D\n0 a0 b0 c0 d0 a4 b4 c4 d4 a8 b8 c8 d8\n1 a1 b1 c1 d1 a5 b5 c5 d5 a9 b9 c9 d9\n2 a2 b2 c2 d2 a6 b6 c6 d6 a10 b10 c10 d10\n3 a3 b3 c3 d3 a7 b7 c7 d7 a11 b11 c11 d11\n"
]
],
[
[
"3) 만약 같은 열 이름이 있는 데이터프레임에서 열 이름으로 데이터를 추출하면 해당 열 이름의 데이터를 모두 추출한다.",
"_____no_output_____"
]
],
[
[
"print(col_concat['A'])",
" A A A\n0 a0 a4 a8\n1 a1 a5 a9\n2 a2 a6 a10\n3 a3 a7 a11\n"
]
],
[
[
"4) 다음과 같이 입력하면 간편하게 새로운 열을 추가할 수도 있다.",
"_____no_output_____"
]
],
[
[
"col_concat['new_col_list'] = ['n1', 'n2', 'n3', 'n4']\nprint(col_concat)",
" A B C D A B C D A B C D new_col_list\n0 a0 b0 c0 d0 a4 b4 c4 d4 a8 b8 c8 d8 n1\n1 a1 b1 c1 d1 a5 b5 c5 d5 a9 b9 c9 d9 n2\n2 a2 b2 c2 d2 a6 b6 c6 d6 a10 b10 c10 d10 n3\n3 a3 b3 c3 d3 a7 b7 c7 d7 a11 b11 c11 d11 n4\n"
]
],
[
[
"5) 위 과정 2에서 데이터프레임의 열 이름을 유지한 채 연결했기 때문에 열 이름이 중복 되었다. 다음은 ignore_index를 True로 지정하여 열 이름을 다시 지정한 것이다.",
"_____no_output_____"
]
],
[
[
"print(pd.concat([df1,df2,df3], axis=1, ignore_index=True))",
" 0 1 2 3 4 5 6 7 8 9 10 11\n0 a0 b0 c0 d0 a4 b4 c4 d4 a8 b8 c8 d8\n1 a1 b1 c1 d1 a5 b5 c5 d5 a9 b9 c9 d9\n2 a2 b2 c2 d2 a6 b6 c6 d6 a10 b10 c10 d10\n3 a3 b3 c3 d3 a7 b7 c7 d7 a11 b11 c11 d11\n"
]
],
[
[
"6) 공통 열과 공통 인덱스만 연결하기\n\n만약 열 이름의 일부가 서로 다른 데이터 프레임을 연결하면 어떻게 될까? 앞에서 사용한 df1,df2,df3의 열 이름을 다시 지정해보자.",
"_____no_output_____"
]
],
[
[
"df1.columns = ['A', 'B', 'C', 'D']\ndf2.columns = ['E', 'F', 'G', 'H']\ndf1.columns = ['A', 'C', 'F', 'H']\nprint(df1)\nprint(type(df1))",
" A C F H\n0 a0 b0 c0 d0\n1 a1 b1 c1 d1\n2 a2 b2 c2 d2\n3 a3 b3 c3 d3\n<class 'pandas.core.frame.DataFrame'>\n"
],
[
"print(df2)\nprint(type(df2))",
" E F G H\n0 a4 b4 c4 d4\n1 a5 b5 c5 d5\n2 a6 b6 c6 d6\n3 a7 b7 c7 d7\n<class 'pandas.core.frame.DataFrame'>\n"
],
[
"print(df3)\nprint(type(df3))",
" A B C D\n0 a8 b8 c8 d8\n1 a9 b9 c9 d9\n2 a10 b10 c10 d10\n3 a11 b11 c11 d11\n<class 'pandas.core.frame.DataFrame'>\n"
]
],
[
[
"7) 새롭게 열 이름을 부여한 데이터프레임 3개를 concat 메서드로 연결해 보겠다. 아래와 같이 열 이름이 정렬되어 연결되었다. 그리고 데이터 프레임에 없는 데이터는 누락값으로 처리되었다. 누락값 없이 데이터를 연결하는 방법은 없을까?",
"_____no_output_____"
]
],
[
[
"row_concat = pd.concat([df1,df2,df3])\nprint(row_concat)",
" A C F H E G B D\n0 a0 b0 c0 d0 NaN NaN NaN NaN\n1 a1 b1 c1 d1 NaN NaN NaN NaN\n2 a2 b2 c2 d2 NaN NaN NaN NaN\n3 a3 b3 c3 d3 NaN NaN NaN NaN\n0 NaN NaN b4 d4 a4 c4 NaN NaN\n1 NaN NaN b5 d5 a5 c5 NaN NaN\n2 NaN NaN b6 d6 a6 c6 NaN NaN\n3 NaN NaN b7 d7 a7 c7 NaN NaN\n0 a8 c8 NaN NaN NaN NaN b8 d8\n1 a9 c9 NaN NaN NaN NaN b9 d9\n2 a10 c10 NaN NaN NaN NaN b10 d10\n3 a11 c11 NaN NaN NaN NaN b11 d11\n"
]
],
[
[
"8) 데이터프레임의 공통 열만 골라 연결하면 누락값이 생기지 않을 것이다. 공통 열만 골라서 연결하려면 join 인자를 inner로 지정해야 한다. 하지만 df1,df2,df3은 공통 열이 없다. 따라서 세 데이터프레임의 공통 열을 연결한 결괏값으로 Empty DataFrame이 출력된다.",
"_____no_output_____"
]
],
[
[
"print(pd.concat([df1,df2,df3], join='inner'))",
"Empty DataFrame\nColumns: []\nIndex: [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]\n"
]
],
[
[
"9) df1,df3의 공통열만 골라 연결해 보자. 그러면 공통 열인 A 와 C 만 연결된다.",
"_____no_output_____"
]
],
[
[
"print(pd.concat([df1,df3], ignore_index=False, join='inner'))",
" A C\n0 a0 b0\n1 a1 b1\n2 a2 b2\n3 a3 b3\n0 a8 c8\n1 a9 c9\n2 a10 c10\n3 a11 c11\n"
]
],
[
[
"10) 이번엔 데이터프레임을 행 방향으로 연결해 보자. df1,df2,df3의 인덱스를 다시 지정하자.",
"_____no_output_____"
]
],
[
[
"df1.index = [0,1,2,3]\ndf2.index = [4,5,6,7]\ndf3.index = [0,2,5,7]\n\nprint(df1)",
" A C F H\n0 a0 b0 c0 d0\n1 a1 b1 c1 d1\n2 a2 b2 c2 d2\n3 a3 b3 c3 d3\n"
],
[
"print(df2)",
" E F G H\n4 a4 b4 c4 d4\n5 a5 b5 c5 d5\n6 a6 b6 c6 d6\n7 a7 b7 c7 d7\n"
],
[
"print(df3)",
" A B C D\n0 a8 b8 c8 d8\n2 a9 b9 c9 d9\n5 a10 b10 c10 d10\n7 a11 b11 c11 d11\n"
]
],
[
[
"11) concat 메서드로 df1,df2,df3을 행 방향으로 연결하면 과정 2 와 비슷한 결과가 출력된다.",
"_____no_output_____"
]
],
[
[
"col_concat = pd.concat([df1,df2,df3], axis=1)\nprint(col_concat)",
" A C F H E F G H A B C D\n0 a0 b0 c0 d0 NaN NaN NaN NaN a8 b8 c8 d8\n1 a1 b1 c1 d1 NaN NaN NaN NaN NaN NaN NaN NaN\n2 a2 b2 c2 d2 NaN NaN NaN NaN a9 b9 c9 d9\n3 a3 b3 c3 d3 NaN NaN NaN NaN NaN NaN NaN NaN\n4 NaN NaN NaN NaN a4 b4 c4 d4 NaN NaN NaN NaN\n5 NaN NaN NaN NaN a5 b5 c5 d5 a10 b10 c10 d10\n6 NaN NaN NaN NaN a6 b6 c6 d6 NaN NaN NaN NaN\n7 NaN NaN NaN NaN a7 b7 c7 d7 a11 b11 c11 d11\n"
]
],
[
[
"12) 과정 9와 비슷한 방법으로 df1, df3의 공통 행만 연결해 보자. 그러면 공통 행인 0과 2만 출력된다.",
"_____no_output_____"
]
],
[
[
"print(pd.concat([df1,df3], axis=1, join='inner'))",
" A C F H A B C D\n0 a0 b0 c0 d0 a8 b8 c8 d8\n2 a2 b2 c2 d2 a9 b9 c9 d9\n"
]
],
[
[
"### - 알아두면 좋은 지식\n\n여기까지 배운것이 데이터베이스의 주요 개념 중 하나인 내부조인과 외부조인이다. 간략히 개념을 설명하겠다.\n\n-내부조인\n\n둘 이상의 데이터프레임에서 조건에 맞는 행을 연결하는 것이다.\n\n-외부조인\n\n두 데이터프레임 중 어떤 데이터프레임을 기준으로 할 것인지에 따라 왼쪽 외부 조인고 오른쪽 외부 조인, 완전 외부 조인으로 나뉜다.\n왼쪽 외부 조인은 데이터프레임을 연결할 때 왼쪽 데이터프레임을 모두 포함하여 연결하는 것이고 오른쪽 외부 조인은 데이터프레임을 연결할 때 오른쪽 데이터프레임을 모두 포함하여 연결하는 것이다. 완전 외부 조인은 왼쪽과 오른쪽 데이터프레임을 모두 포함하여 연결한다.",
"_____no_output_____"
],
[
"## 데이터 연결 마무리\n\n판다스는 데이터 연결 전용 메서드인 merge를 제공한다. 아래의 예제를 실습하며 따라해보자.\n\n1) 다음은 특정 위치의 날씨 정보에 필요한 데이터 집합을 모두 불러온 것이다. person은 관측한 사람의 이름, site는 관측 위치, visited는 관측 날짜, survey는 날씨 정보입니다.",
"_____no_output_____"
]
],
[
[
"person = pd.read_csv('C:/Users/이재윤/Downloads/doit_pandas-master/doit_pandas-master/data/survey_person.csv')\nsite = pd.read_csv('C:/Users/이재윤/Downloads/doit_pandas-master/doit_pandas-master/data/survey_site.csv')\nsurvey = pd.read_csv('C:/Users/이재윤/Downloads/doit_pandas-master/doit_pandas-master/data/survey_survey.csv')\nvisited = pd.read_csv('C:/Users/이재윤/Downloads/doit_pandas-master/doit_pandas-master/data/survey_visited.csv')",
"_____no_output_____"
],
[
"print(person)",
" ident personal family\n0 dyer William Dyer\n1 pb Frank Pabodie\n2 lake Anderson Lake\n3 roe Valentina Roerich\n4 danforth Frank Danforth\n"
],
[
"print(site)",
" name lat long\n0 DR-1 -49.85 -128.57\n1 DR-3 -47.15 -126.72\n2 MSK-4 -48.87 -123.40\n"
],
[
"print(visited)",
" ident site dated\n0 619 DR-1 1927-02-08\n1 622 DR-1 1927-02-10\n2 734 DR-3 1939-01-07\n3 735 DR-3 1930-01-12\n4 751 DR-3 1930-02-26\n5 752 DR-3 NaN\n6 837 MSK-4 1932-01-14\n7 844 DR-1 1932-03-22\n"
],
[
"print(survey)",
" taken person quant reading\n0 619 dyer rad 9.82\n1 619 dyer sal 0.13\n2 622 dyer rad 7.80\n3 622 dyer sal 0.09\n4 734 pb rad 8.41\n5 734 lake sal 0.05\n6 734 pb temp -21.50\n7 735 pb rad 7.22\n8 735 NaN sal 0.06\n9 735 NaN temp -26.00\n10 751 pb rad 4.35\n11 751 pb temp -18.50\n12 751 lake sal 0.10\n13 752 lake rad 2.19\n14 752 lake sal 0.09\n15 752 lake temp -16.00\n16 752 roe sal 41.60\n17 837 lake rad 1.46\n18 837 lake sal 0.21\n19 837 roe sal 22.50\n20 844 roe rad 11.25\n"
]
],
[
[
"2) visited 데이터프레임의 일부 데이터만 떼어 따라해보자.\n",
"_____no_output_____"
]
],
[
[
"visited_subset = visited.loc[[0,2,6], ]",
"_____no_output_____"
]
],
[
[
"3) merge 메서드는 기본적으로 내주 조인을 실행하며 메서드를 사용한 데이터프레임을 왼쪽으로 지정하고 첫 번째 인잣값으로 지정한 데이터프레임을 오른쪽으로 지정한다. left_on, right_on 인자는 값이 일치해야 할 왼쪽과 오른쪽 데이터프레임의 열을 지정한다. 즉, 왼쪽 데이터프레임의 열과 오른쪽 데이터프레임의 열의 값이 일치하면 왼쪽 데이터프레임을 기준으로 연결한다.",
"_____no_output_____"
]
],
[
[
"o2o_merge = site.merge(visited_subset, left_on='name', right_on='site')\nprint(o2o_merge)",
" name lat long ident site dated\n0 DR-1 -49.85 -128.57 619 DR-1 1927-02-08\n1 DR-3 -47.15 -126.72 734 DR-3 1939-01-07\n2 MSK-4 -48.87 -123.40 837 MSK-4 1932-01-14\n"
]
],
[
[
"4) 다음은 site,vistied 데이터 프레임을 이용하여 데이터를 연결한 것이다.",
"_____no_output_____"
]
],
[
[
"m2o_merge = site.merge(visited, left_on='name', right_on='site')\nprint(m2o_merge)",
" name lat long ident site dated\n0 DR-1 -49.85 -128.57 619 DR-1 1927-02-08\n1 DR-1 -49.85 -128.57 622 DR-1 1927-02-10\n2 DR-1 -49.85 -128.57 844 DR-1 1932-03-22\n3 DR-3 -47.15 -126.72 734 DR-3 1939-01-07\n4 DR-3 -47.15 -126.72 735 DR-3 1930-01-12\n5 DR-3 -47.15 -126.72 751 DR-3 1930-02-26\n6 DR-3 -47.15 -126.72 752 DR-3 NaN\n7 MSK-4 -48.87 -123.40 837 MSK-4 1932-01-14\n"
]
],
[
[
"5) 다른 데이터프레임도 연결해 보자. 다음은 person, survey 데이터프레임과 visited,survey 데이터프레임을 merge 메서드로 연결한 것이다.",
"_____no_output_____"
]
],
[
[
"ps = person.merge(survey, left_on='ident', right_on='person')\nvs = visited.merge(survey, left_on='ident', right_on='taken')\n\nprint(ps)",
" ident personal family taken person quant reading\n0 dyer William Dyer 619 dyer rad 9.82\n1 dyer William Dyer 619 dyer sal 0.13\n2 dyer William Dyer 622 dyer rad 7.80\n3 dyer William Dyer 622 dyer sal 0.09\n4 pb Frank Pabodie 734 pb rad 8.41\n5 pb Frank Pabodie 734 pb temp -21.50\n6 pb Frank Pabodie 735 pb rad 7.22\n7 pb Frank Pabodie 751 pb rad 4.35\n8 pb Frank Pabodie 751 pb temp -18.50\n9 lake Anderson Lake 734 lake sal 0.05\n10 lake Anderson Lake 751 lake sal 0.10\n11 lake Anderson Lake 752 lake rad 2.19\n12 lake Anderson Lake 752 lake sal 0.09\n13 lake Anderson Lake 752 lake temp -16.00\n14 lake Anderson Lake 837 lake rad 1.46\n15 lake Anderson Lake 837 lake sal 0.21\n16 roe Valentina Roerich 752 roe sal 41.60\n17 roe Valentina Roerich 837 roe sal 22.50\n18 roe Valentina Roerich 844 roe rad 11.25\n"
],
[
"print(vs)",
" ident site dated taken person quant reading\n0 619 DR-1 1927-02-08 619 dyer rad 9.82\n1 619 DR-1 1927-02-08 619 dyer sal 0.13\n2 622 DR-1 1927-02-10 622 dyer rad 7.80\n3 622 DR-1 1927-02-10 622 dyer sal 0.09\n4 734 DR-3 1939-01-07 734 pb rad 8.41\n5 734 DR-3 1939-01-07 734 lake sal 0.05\n6 734 DR-3 1939-01-07 734 pb temp -21.50\n7 735 DR-3 1930-01-12 735 pb rad 7.22\n8 735 DR-3 1930-01-12 735 NaN sal 0.06\n9 735 DR-3 1930-01-12 735 NaN temp -26.00\n10 751 DR-3 1930-02-26 751 pb rad 4.35\n11 751 DR-3 1930-02-26 751 pb temp -18.50\n12 751 DR-3 1930-02-26 751 lake sal 0.10\n13 752 DR-3 NaN 752 lake rad 2.19\n14 752 DR-3 NaN 752 lake sal 0.09\n15 752 DR-3 NaN 752 lake temp -16.00\n16 752 DR-3 NaN 752 roe sal 41.60\n17 837 MSK-4 1932-01-14 837 lake rad 1.46\n18 837 MSK-4 1932-01-14 837 lake sal 0.21\n19 837 MSK-4 1932-01-14 837 roe sal 22.50\n20 844 DR-1 1932-03-22 844 roe rad 11.25\n"
]
],
[
[
"6) left_on,right_on에 전달하는 값이 여러 개라도 상관이 없다. 다음과 같이 여러개의 열 이름을 리스트에 담아 전달해도 된다. 다음은 ps 데이터프레임의 ident, taken, quant, reading 열의 값과 vs 데이터프레임의 person, ident, quant, reading 열의 값을 이용하여 ps와 vs 데이터프레임을 서로 연결한 것이다.",
"_____no_output_____"
]
],
[
[
"ps_vs = ps.merge(vs, left_on=['ident', 'taken', 'quant', 'reading'], right_on=['person', 'ident', 'quant', 'reading'])",
"_____no_output_____"
]
],
[
[
"7) 과정 6에서 연결한 ps_vs 데이터프레임의 첫 번째 행을 살펴보면 양쪽 데이터프레임에 있었던 중복된 열 이름에 접미사 _x, _y 가 추가 되었다. _x는 왼쪽 데이터프레임의 열을 의미하고 _y 는 오른쪽 데이터프레임의 열을 의미한다.",
"_____no_output_____"
]
],
[
[
"print(ps_vs.loc[0, ])",
"ident_x dyer\npersonal William\nfamily Dyer\ntaken_x 619\nperson_x dyer\nquant rad\nreading 9.82\nident_y 619\nsite DR-1\ndated 1927-02-08\ntaken_y 619\nperson_y dyer\nName: 0, dtype: object\n"
]
],
[
[
"이번에는 데이터를 연결하는 다양한 방법을 알아보았다. 특히 누락값과 중복값을 해결하기 위한 여러가지 방법에 대해 알아보았다. 시작부에 얘기했던 깔끔한 데이터의 조건 중 하나인 '데이터 분석 목적에 맞는 데이터를 모아 새로운 표를 만들어야 합니다.' 는 바로 누락값이나 중복값이 없는 상태로 데이터가 잘 연결되어 있어야 한다는 것이다.",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
]
|
ec661c359c99274a5f49d237d9fd3018c6d93287 | 21,599 | ipynb | Jupyter Notebook | CNN/Fashion_MNIST.ipynb | Nova1323/Machine-Learning | 77acd97bc211bf37b014f67c9f7bd14d839e12b7 | [
"Apache-2.0"
]
| 6 | 2020-07-10T16:06:56.000Z | 2020-11-07T19:15:43.000Z | CNN/Fashion_MNIST.ipynb | Nova1323/Tensorflow | 77acd97bc211bf37b014f67c9f7bd14d839e12b7 | [
"Apache-2.0"
]
| null | null | null | CNN/Fashion_MNIST.ipynb | Nova1323/Tensorflow | 77acd97bc211bf37b014f67c9f7bd14d839e12b7 | [
"Apache-2.0"
]
| null | null | null | 56.690289 | 5,650 | 0.619103 | [
[
[
"# Fashion MNIST \nMachine Learning and Neural Networks make complex problems like Computer Vision easy to solve.\n\nFashion MNIST is a dataset containg photos of different items of clothing like shoes, shits, pants etc.\nA picture is first converted to a size of 28 by 28 pixels. This image is then converted into greyscale. We do this as this reduces a lot of data that is to be feed into the neural network.\n\nWe will be training a neural network to successfully classify these objects.",
"_____no_output_____"
],
[
"## Importing Libraries",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras",
"_____no_output_____"
]
],
[
[
"## Loading Data\n\nTo load the Fashion MNIST data we will be using a TensorFlow function call.\n\nThe second line splits the dataset into two parts\n- The training dataset\n- The test dataset\n\n\nThe MNIST dataset contains a total of 70,000 images. the dataset is split in such a manner that 60,000 images are in the training set and the rest 10,000 images are in the test set",
"_____no_output_____"
]
],
[
[
"fashion_mnist = keras.datasets.fashion_mnist\n(X_train, y_train),(X_test, y_test) = fashion_mnist.load_data()",
"_____no_output_____"
]
],
[
[
"## Visualizing data\n\nVisualizing data is always considered a good practice in Machine Learning. To visualize this data we run the code below.\n\nMatplotlib is a library in python which helps in visulaising data. The numbers are the values of the pixels which range from 0 to 255. 0 being White and 255 being Black[link text](https://)",
"_____no_output_____"
]
],
[
[
"import numpy as np\nnp.set_printoptions(linewidth=200) # sets the linewidth\nimport matplotlib.pyplot as plt\nplt.figure(figsize=(7,7)) # changes the figures size\nplt.axis('off') #hides axis\nplt.imshow(X_train[0]) #change 0 to any number to view the image at that index\nprint(X_train[0]) #change 0 to any number to view the data at that index",
"[[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 13 73 0 0 1 4 0 0 0 0 1 1 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 3 0 36 136 127 62 54 0 0 0 1 3 4 0 0 3]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 6 0 102 204 176 134 144 123 23 0 0 0 0 12 10 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 155 236 207 178 107 156 161 109 64 23 77 130 72 15]\n [ 0 0 0 0 0 0 0 0 0 0 0 1 0 69 207 223 218 216 216 163 127 121 122 146 141 88 172 66]\n [ 0 0 0 0 0 0 0 0 0 1 1 1 0 200 232 232 233 229 223 223 215 213 164 127 123 196 229 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 183 225 216 223 228 235 227 224 222 224 221 223 245 173 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 193 228 218 213 198 180 212 210 211 213 223 220 243 202 0]\n [ 0 0 0 0 0 0 0 0 0 1 3 0 12 219 220 212 218 192 169 227 208 218 224 212 226 197 209 52]\n [ 0 0 0 0 0 0 0 0 0 0 6 0 99 244 222 220 218 203 198 221 215 213 222 220 245 119 167 56]\n [ 0 0 0 0 0 0 0 0 0 4 0 0 55 236 228 230 228 240 232 213 218 223 234 217 217 209 92 0]\n [ 0 0 1 4 6 7 2 0 0 0 0 0 237 226 217 223 222 219 222 221 216 223 229 215 218 255 77 0]\n [ 0 3 0 0 0 0 0 0 0 62 145 204 228 207 213 221 218 208 211 218 224 223 219 215 224 244 159 0]\n [ 0 0 0 0 18 44 82 107 189 228 220 222 217 226 200 205 211 230 224 234 176 188 250 248 233 238 215 0]\n [ 0 57 187 208 224 221 224 208 204 214 208 209 200 159 245 193 206 223 255 255 221 234 221 211 220 232 246 0]\n [ 3 202 228 224 221 211 211 214 205 205 205 220 240 80 150 255 229 221 188 154 191 210 204 209 222 228 225 0]\n [ 98 233 198 210 222 229 229 234 249 220 194 215 217 241 65 73 106 117 168 219 221 215 217 223 223 224 229 29]\n [ 75 204 212 204 193 205 211 225 216 185 197 206 198 213 240 195 227 245 239 223 218 212 209 222 220 221 230 67]\n [ 48 203 183 194 213 197 185 190 194 192 202 214 219 221 220 236 225 216 199 206 186 181 177 172 181 205 206 115]\n [ 0 122 219 193 179 171 183 196 204 210 213 207 211 210 200 196 194 191 195 191 198 192 176 156 167 177 210 92]\n [ 0 0 74 189 212 191 175 172 175 181 185 188 189 188 193 198 204 209 210 210 211 188 188 194 192 216 170 0]\n [ 2 0 0 0 66 200 222 237 239 242 246 243 244 221 220 193 191 179 182 182 181 176 166 168 99 58 0 0]\n [ 0 0 0 0 0 0 0 40 61 44 72 41 35 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]]\n"
]
],
[
[
"## Normalizing Data\n\nA neural network or a Machine Learning model in general works better if the data is normalized. We divide the values of the pixels by 255 to get the range between 0 and 1.",
"_____no_output_____"
]
],
[
[
"X_train = X_train / 255\nX_test = X_test / 255",
"_____no_output_____"
]
],
[
[
"## Building the Neural Network\n\nNow adding the neural network\n\nThe neural layer will consist of three layers\n1. Input layer\n2. Hidden layer\n3. Output layer\n\nThe method **Sequential** defines the sequence of the layers\n\nThe Input layer in your network should be the same shape as your data.\nThe input layer will expect an input of a 28 by 28 matrix _(defined by the input_shape attribute)_ as the images are of this size. The **Flatten** method then converts this matrix into a simple linear array avoiding the use of 28 layers of 28 neurons which would be infeasible.\n\nThe hidden layer consists of **128** neurons in which the classification takes place. The **Dense** method is used to add a layer in network the first attribute assigns the number of neuron present in a layer. The activation type of this layer is **Relu**. \n\nRelu effectively means \"If X>0 return X, else return 0\" -- so what it does it it only passes values 0 or greater to the next layer in the network.\n\nThe output layer will contain **10** neurons (set by the Dense method) each depicting the 10 classes in the dataset. The number of neurons in the output layer should always be equal to the number of classes in the problem. _(Except when there are only two classes then the result will be 0 or 1 depicting the first class and second class respectively.)_ The activation function of this layer is **Softmax**. \n\nSoftmax takes a set of values, and effectively picks the biggest one, so, for example, if the output of the last layer looks like, \n\n[0.1, 0.1, 0.05, 0.1, 9.5, 0.1, 0.05, 0.05, 0.05]\n\nit saves you from fishing through it looking for the biggest value, and turns it into.\n\n[0,0,0,0,1,0,0,0,0]\n\nThe index of 1 is the class identified by the network.",
"_____no_output_____"
]
],
[
[
"model = keras.Sequential([keras.layers.Flatten(input_shape=(28,28)),\n keras.layers.Dense(128, activation=tf.nn.relu), #change 128 to increase the number of neurons in the hidden layer\n keras.layers.Dense(10,activation=tf.nn.softmax)\n ])",
"_____no_output_____"
]
],
[
[
"Changing the number of neurons in the hidden layer will affect the time taken to run and the accuracy of the network.\n- A higher number will give you a better accuracy but will consume more time\n\nThere isn't a significant impact effects of additional layers (hidden layer 2, 3 etc.)in the network because this is relatively simple data. For far more complex data (including color images to be classified as flowers that you'll see in the next lesson), extra layers are often necessary.",
"_____no_output_____"
],
[
"## Training the Neural Network\n\nNow to set the optimizer and loss function of the neural network we use the following code. This is done by the **compile** method. The metrics attribute gives us an idea of how good our prediction is.\n\nAfter this the data is fit in the network using the **fit** method. The first attribute are the features of the data, the scond attribute are the labels of the data. Epochs is the number of time this model runs optimizing itself with the help of the optimizer and loss functions.",
"_____no_output_____"
]
],
[
[
"model.compile(optimizer = tf.optimizers.Adam(),\n loss = 'sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel.fit(X_train, y_train, epochs=5)",
"Epoch 1/5\n1875/1875 [==============================] - 3s 2ms/step - loss: 0.5043 - accuracy: 0.8232\nEpoch 2/5\n1875/1875 [==============================] - 3s 2ms/step - loss: 0.3803 - accuracy: 0.8629\nEpoch 3/5\n1875/1875 [==============================] - 3s 2ms/step - loss: 0.3444 - accuracy: 0.8752\nEpoch 4/5\n1875/1875 [==============================] - 3s 2ms/step - loss: 0.3175 - accuracy: 0.8842\nEpoch 5/5\n1875/1875 [==============================] - 3s 2ms/step - loss: 0.2985 - accuracy: 0.8899\n"
]
],
[
[
"## Evaluating the Neural Network\n\nNow after fitting this data we test it on data which it hasn't seen ie the data which is not in the training set. We use the **evaluate** method for this.",
"_____no_output_____"
]
],
[
[
"model.evaluate(X_test, y_test)",
"313/313 [==============================] - 0s 1ms/step - loss: 0.3645 - accuracy: 0.8706\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec6627cb052dfe932fed262061868bde9382fda6 | 212,183 | ipynb | Jupyter Notebook | statistic/01_Populacao_amostra.ipynb | galvaowesley/DataScience_Learning | e46f0c6ed69f35cb90a90fff0b3b40b63c249cb7 | [
"Apache-2.0"
]
| null | null | null | statistic/01_Populacao_amostra.ipynb | galvaowesley/DataScience_Learning | e46f0c6ed69f35cb90a90fff0b3b40b63c249cb7 | [
"Apache-2.0"
]
| null | null | null | statistic/01_Populacao_amostra.ipynb | galvaowesley/DataScience_Learning | e46f0c6ed69f35cb90a90fff0b3b40b63c249cb7 | [
"Apache-2.0"
]
| null | null | null | 62.55395 | 32,709 | 0.608168 | [
[
[
"<a href=\"https://colab.research.google.com/github/galvaowesley/data-science-study/blob/master/statistic/01_Populacao_amostra.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"\n<center> <h1>Estatística para Ciência de Dados e Machine Learning</h1> </center>\n\n<center> <h1> População e amostra </h1> </center>\n\n[Curso da IA Academy](https://iaexpert.academy/courses/estatistica-para-ciencia-de-dados-e-machine-learning/)\n\nAutor do notebook: Wesley Galvão\n\nMarço de 2022\n\n\n",
"_____no_output_____"
],
[
"# Amostragem\n\n\n\n",
"_____no_output_____"
],
[
"## O que é?\nÉ um método que permite obter informações sobre a população com base nas estatísticas de um recorte ou subconjunto da população, sem a necessidade de \"olhar\" para todos os indvíduos, mas para uma **amostra**. \n\nSendo assim, para investigar as estatísticas de uma população, a fim de diminuir os custos com tempo, recurso e processamento que uma tarefa do tipo censo gera, usa-se a amostragem. \n\nVale lembrar que a amostra precisa ser aleatória e representativa, de modo a diminuir eventuais viéses. ",
"_____no_output_____"
],
[
"\n\nFonte: https://www.analyticsvidhya.com/blog/2019/09/data-scientists-guide-8-types-of-sampling-techniques/",
"_____no_output_____"
],
[
"## Etapas envolvidads na amostragem",
"_____no_output_____"
],
[
"\nFonte: https://www.analyticsvidhya.com/blog/2019/09/data-scientists-guide-8-types-of-sampling-techniques/",
"_____no_output_____"
],
[
"## Métodos de amostragem",
"_____no_output_____"
],
[
"\nFonte: https://www.analyticsvidhya.com/blog/2019/09/data-scientists-guide-8-types-of-sampling-techniques/",
"_____no_output_____"
],
[
"**Amostragem Probabilística**\n\nNeste método, cada elemento da população tem a mesma chance de ser selecionado, o que proporciona um subconjunto da população que seja representativo. \n\n**Amostra não probabilística**\n\nNeste método, os elementos não têm a mesma chance de serem selecionados, o que resulta numa deficiência de representatividade e generalização para a população. ",
"_____no_output_____"
],
[
"# Técnicas de amostragem probabilística",
"_____no_output_____"
],
[
"## Carregamento de bibliotecas e dados",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport random\nimport numpy as np\nimport gdown \n",
"_____no_output_____"
]
],
[
[
"### Base de dados Census\n\nBase de dados do Census dos EUA",
"_____no_output_____"
]
],
[
[
"# Download da base de dados census\n!gdown 1HIdDZhMCgnGL3kqUvFk2PNZ96OGoWV3K",
"Downloading...\nFrom: https://drive.google.com/uc?id=1HIdDZhMCgnGL3kqUvFk2PNZ96OGoWV3K\nTo: g:\\My Drive\\Study\\DataScience\\Cursos\\Estatística para Ciência de Dados e ML\\Notebooks\\census.csv\n\n 0%| | 0.00/4.01M [00:00<?, ?B/s]\n 39%|███▉ | 1.57M/4.01M [00:00<00:00, 10.8MB/s]\n 79%|███████▊ | 3.15M/4.01M [00:00<00:00, 10.9MB/s]\n100%|██████████| 4.01M/4.01M [00:00<00:00, 11.5MB/s]\n"
],
[
"# Importa a base de dados com DataFrame\ndataset = pd.read_csv('census.csv')\ndataset.head()",
"_____no_output_____"
],
[
"# Tamanho da base de dados\nprint(f'Linhas: {dataset.shape[0]} \\nColunas: {dataset.shape[1]}')",
"Linhas: 32561 \nColunas: 15\n"
]
],
[
[
"## Amostragem aleatória simples",
"_____no_output_____"
]
],
[
[
"# Amostras aleatórias e diferentes a cada rodada\namostra_aleatoria_simples = dataset.sample(n = 5, replace = True)\namostra_aleatoria_simples.head()",
"_____no_output_____"
],
[
"# Amostras aleatórias e com valores iguais a cada rodada\namostra_aleatoria_simples = dataset.sample(n = 5, random_state = 42, replace = True)\namostra_aleatoria_simples.head()",
"_____no_output_____"
],
[
"# Tamanho da amostra aleatória simpes\nprint(f'Linhas: {amostra_aleatoria_simples.shape[0]} \\nColunas: {amostra_aleatoria_simples.shape[1]}')",
"Linhas: 5 \nColunas: 15\n"
]
],
[
[
"## Amostragem sistemática",
"_____no_output_____"
],
[
"Vamos recordar o tamanho do nosso dataset Census. ",
"_____no_output_____"
]
],
[
[
"# Tamanho da base de dados\nprint(f'Linhas: {dataset.shape[0]} \\nColunas: {dataset.shape[1]}')",
"Linhas: 32561 \nColunas: 15\n"
]
],
[
[
"Temos que a nossa população referente ao dataset é de 32561 indivíduos. \n\nPara uma amostragem sistemática, precisamos de uma amostra de 100 indivíduos. Para tal, primeiramente devemos encontrar o passo de amostragem. Isto é, o número que representa o intervalo de seleção aleatória de um indivíduo e os seus subsequentes. \n\n---\n\n",
"_____no_output_____"
]
],
[
[
"tam_populacao = len(dataset)\ntam_populacao",
"_____no_output_____"
],
[
"# Calcular o passo de amostragem\npasso = tam_populacao // 100\nprint('Passo de amostragem: ', passo)",
"Passo de amostragem: 325\n"
]
],
[
[
"Temos então que o passo de amostragem é o valor inteiro 325. Contudo, devemos também encontrar o primeiro indivíduo da amostragem. \n\nO índice do indivíduo, ou o valor que representa a posição do indivíduo na população, deve ser entre 0 e 325, o qual também será selecionado aleatoriamente. ",
"_____no_output_____"
]
],
[
[
"# Seleção do primeiro indivíduo\nrandom.seed(2) # Semente de randomização para manter o mesmo valor aleatório a cada rodada\nprimeiro_individuo = random.randint(0, passo) # Sorteia um valor aleatório entre 0 e 325\nprimeiro_individuo",
"_____no_output_____"
]
],
[
[
"Dado que o primeiro indivíduo é o de número (ou índice) 28, os outros 99 indivíduos serão selecionados de 325 a 325 a partir de 28.\n\nAssim: ",
"_____no_output_____"
]
],
[
[
"# primeiro indivíduo\nprimeiro_individuo # 28",
"_____no_output_____"
],
[
"# Segundo indivíduo\nsegundo_individuo = primeiro_individuo + passo # 28 + 325\nsegundo_individuo",
"_____no_output_____"
],
[
"# Terceiro indivíduo\nterceiro_individuo = segundo_individuo + passo # 353 + 325\nterceiro_individuo",
"_____no_output_____"
]
],
[
[
"... e assim por diante. \n\nPodemos fazer isso de forma automática com a função `np.arange()` da biblioteca Numpy. Para rodar a função, pricisamos do número ou variável que representa o 1º elemento, o tamanho da população e o passo de amostragem. \n\nA função irá retornar um array de números que representam os índices dos indivíduos amostrados sistematicamente da população Census.",
"_____no_output_____"
]
],
[
[
"np.arange(primeiro_individuo, tam_populacao, passo)",
"_____no_output_____"
]
],
[
[
"Note que os primeiros números são os mesmos que havíamos calculados manualmente. ",
"_____no_output_____"
],
[
"### Função amostragem sistemática",
"_____no_output_____"
]
],
[
[
"def amostra_sistematica(dataset, tam_amostra):\n \"\"\"\n Função que recebe um dataset e um valor do tamanho da amostra desejada, calcula\n a amostra pelo método sistemático e retorna o dataframe amostrado. \n \"\"\"\n tam_populacao = len(dataset)\n passo = tam_populacao // tam_amostra\n random.seed(2) \n primeiro_individuo = random.randint(0, passo)\n indices = np.arange(primeiro_individuo, tam_populacao, step = passo)\n amostra = dataset.iloc[indices]\n\n return amostra",
"_____no_output_____"
],
[
"df_amostra_sistematica = amostra_sistematica(dataset, 100)\ndf_amostra_sistematica.head()",
"_____no_output_____"
]
],
[
[
"## Amostragem por grupos\n",
"_____no_output_____"
],
[
"Com o nosso dataset, podemos dividir a população em 10 grupos de tamanho fixo. Para tal, precisamos saber o tamanho do grupo.",
"_____no_output_____"
]
],
[
[
"# Tamanho grupo\ntam_grupo = len(dataset) // 10\ntam_grupo",
"_____no_output_____"
]
],
[
[
"Iremos agora percorrer todas as linhas do dataset para atribuir cada indivíduo a um grupo.",
"_____no_output_____"
]
],
[
[
"# lista \ngrupos = []\n# Teremos 10 grupos, de IDs de 0 a 9.\nid_grupo = 0\n# Contador de grupos\ncontagem = 0\n\nfor _ in dataset.iterrows():\n if contagem > tam_grupo:\n id_grupo += 1\n contagem = 0\n grupos.append(id_grupo)\n contagem += 1\n\n",
"_____no_output_____"
]
],
[
[
"Podemos agora validar quantos grupos únicos existem e a quantidade de indivíduos por grupo. ",
"_____no_output_____"
]
],
[
[
"np.unique(grupos, return_counts=True)",
"_____no_output_____"
]
],
[
[
"É importante verificar também se o tamanho da lista de grupos é o mesmo tamanho do dataset da população. ",
"_____no_output_____"
]
],
[
[
"# Tamanho da lista de grupos\nprint(f'Shape grupos: {np.shape(grupos)}. Shape dataset: {np.shape(dataset)}')",
"Shape grupos: (32561,). Shape dataset: (32561, 15)\n"
]
],
[
[
"Vamos criar agora uma nova coluna no dataset para armazenar o número do grupo por indivíduo.",
"_____no_output_____"
]
],
[
[
"dataset['grupo'] = grupos\ndataset.head()",
"_____no_output_____"
]
],
[
[
"Agora, precisamos selecionar aleatoriamente um dos grupos Para isso, vamos utilizar a função `random.randint()` da biblioteca `random`.",
"_____no_output_____"
]
],
[
[
"# Semente de randomização para manter o mesmo valor aleatório a cada rodada\nrandom.seed(7)\ngrupo = random.randint(0, 9)\nprint(grupo)",
"5\n"
]
],
[
[
"Sabendo que o grupo escolhido é 5, vamos selecionar {{5}} os indivíduos do dataset que pertençam ao grupo 5. ",
"_____no_output_____"
]
],
[
[
"# Filtro por grupo\ndf_agrupamento = dataset[dataset['grupo'] == 5]\ndf_agrupamento.head()",
"_____no_output_____"
],
[
"# Verificando a quantidade de indivíduos\ndf_agrupamento['grupo'].value_counts()",
"_____no_output_____"
]
],
[
[
"### Função amostragem por agrupamento",
"_____no_output_____"
],
[
"Faremos agora uma função para realizar a amostragem por agrupamento.",
"_____no_output_____"
]
],
[
[
"def amostragem_agrupamento(dataset, numero_grupos, random_state = False):\n \"\"\"\n Função que recebe um dataset, um valor inteiro do número de grupos desejado e um valor booleano\n para a ativação da semente de randomização. Retorna um dataframe com a amostra agrupada.\n \"\"\"\n # Tamanho grupo\n tam_grupo = len(dataset) // numero_grupos\n # lista \n grupos = []\n # Teremos 10 grupos, de IDs de 0 a 9.\n id_grupo = 0\n # Contador de grupos\n contagem = 0\n\n for _ in dataset.iterrows():\n if contagem > tam_grupo:\n id_grupo += 1\n contagem = 0\n grupos.append(id_grupo)\n contagem += 1\n # Nova coluna para os grupos\n dataset['grupo'] = grupos\n\n # Semente de randomização para manter o mesmo valor aleatório a cada rodada\n if random_state == True: \n random.seed(7)\n # Seleciona o grupo aleatoriamente\n grupo = random.randint(0, numero_grupos)\n # Filtra um subset pelo grupo selecionado\n df_agrupamento = dataset[dataset['grupo'] == grupo]\n\n return df_agrupamento\n ",
"_____no_output_____"
],
[
"df_agrupamento = amostragem_agrupamento(dataset, 100, random_state = False)\ndf_agrupamento.shape, df_agrupamento['grupo'].value_counts()",
"_____no_output_____"
],
[
"df_agrupamento.head()",
"_____no_output_____"
]
],
[
[
"## Amostragem estratificada proporcional\n\nÉ uma estratégia de divisão e conquista, pois a população é dividida em grupos chamados _estratos_. Os estratos são escolhidos basedos em características da população, como sexo, edução, categoria, etc.\n\nApós agrupados, os indivíduos são selecionados aleatoriamente para formar uma amostra. É importante que a amostra preserve a proporção de indivíduos por estrato da população.\n\nUsamos esse tipo de amostragem quando queremos representação de todos os subgrupos da população. Contudo, necessita de um conhecimento prévio das características da população. ",
"_____no_output_____"
]
],
[
[
"# Importação da biblioteca para estratificação\nfrom sklearn.model_selection import StratifiedShuffleSplit",
"_____no_output_____"
]
],
[
[
"Vamos explorar o estrato rerefente à renda anual dos indivíduos, o atributo `income`.",
"_____no_output_____"
]
],
[
[
"def sumario_categoria(dataset, coluna):\n \"\"\"\n Função que recebe um dataset e uma coluna e retorna um dataframe com o sumário dessa coluna.\n \"\"\"\n # Valor absoluto por nível da categoria\n c = dataset[coluna].value_counts(dropna=False)\n # Valor relativo por nível da categoria\n p = dataset[coluna].value_counts(dropna=False, normalize=True).mul(100).round(2)\n # concatena\n sumario = pd.concat([c,p], axis=1, keys=['counts', '%'])\n\n return sumario",
"_____no_output_____"
],
[
"# Sumário da estrato income para a população\nsumario_categoria(dataset, 'income')",
"_____no_output_____"
]
],
[
[
"Temos então que 24.08% dos indivíduos ganham mais que 50 mil dólares por ano. O complemento, 75.92%, ganha menos ou igual a 50 mil dólares por ano.",
"_____no_output_____"
],
[
"Vamos agora amostrar de forma estratificada 10% do nosso dataset.",
"_____no_output_____"
]
],
[
[
"# Criação do objeto de estratificação com 10% da base de dados\nsplit = StratifiedShuffleSplit(test_size=0.1, random_state=7)\n\nfor x, y in split.split(dataset, dataset.income):\n # População - amostra estratificada\n df_pop_menos_amostra = dataset.iloc[x]\n # Amostra estratificada\n df_amostra = dataset.iloc[y]",
"_____no_output_____"
]
],
[
[
"Agora veja que o tamanho da amostra corresponde a aproximadamente 10% da população. ",
"_____no_output_____"
]
],
[
[
"# Quantidade de linhas do dataset VS quantidade de linhas da amostra\nprint(f'QTD pupulação: {dataset.shape[0]}\\nQTD amostra: {df_amostra.shape[0]}')",
"QTD pupulação: 32561\nQTD amostra: 3257\n"
],
[
"df_amostra.head()",
"_____no_output_____"
]
],
[
[
"Podemos também obter uma amostra com um tamanhno específico, ao invés de uma amostra baseado num percentual. Para tal, façamos: ",
"_____no_output_____"
]
],
[
[
"tamanho_amostra = 100\ntam_populacao = len(dataset)\n\npercent_amostra = tamanho_amostra / tam_populacao\n\nsplit = StratifiedShuffleSplit(test_size=percent_amostra, random_state=7)\n\nfor x, y in split.split(dataset, dataset.income):\n # População - amostra estratificada\n df_pop_menos_amostra = dataset.iloc[x]\n # Amostra estratificada\n df_amostra = dataset.iloc[y]",
"_____no_output_____"
],
[
"# Tamanho da amostra\nprint(f'QTD amostra: {df_amostra.shape[0]}')",
"QTD amostra: 100\n"
],
[
"df_amostra.head()",
"_____no_output_____"
]
],
[
[
"Para validarmos que a proporção de indivíduos por nível de renda da amostra é próxima ao da população, façamos: ",
"_____no_output_____"
]
],
[
[
"# Sumário da estrato income para a amostra\nsumario_categoria(df_amostra, 'income')",
"_____no_output_____"
]
],
[
[
"Veja que a proporção se manteve muito próxima. ",
"_____no_output_____"
],
[
"### Função amostragem estratificada proporcional\n\nPara finalizar, podemos criar uma função que realize a amostragem estratificada proporcional, dado um dataset como população, o estrato referente à característica de amostragem e o tamanho da amostra.",
"_____no_output_____"
]
],
[
[
"def amostragem_estratificada(dataset, estrato, tamanho_amostra):\n \"\"\"\n Função que recebe um dataset, um estrato e um tamanho de amostra. \n Retorna um dataframe com a amostra estratificada.\n \"\"\"\n\n tam_populacao = len(dataset)\n percent_amostra = tamanho_amostra / tam_populacao\n\n # Criação do objeto de estratificação\n split = StratifiedShuffleSplit(test_size=percent_amostra, random_state=7)\n\n for x, y in split.split(dataset, dataset[estrato]):\n # População - amostra estratificada\n df_pop_menos_amostra = dataset.iloc[x]\n # Amostra estratificada\n df_amostra = dataset.iloc[y] \n\n return df_amostra",
"_____no_output_____"
]
],
[
[
"Vamos amostrar 1500 indivíduos de acordo com o estrato de status de relacionamento, `relationship`. Primeiro, vamos conhecer a quantidade de indivíduos por nível de relacionamento. ",
"_____no_output_____"
]
],
[
[
"# Sumário da estrato relationship para a amostra\nsumario_categoria(dataset, 'relationship')",
"_____no_output_____"
]
],
[
[
"Fazendo a amostragem, temos:",
"_____no_output_____"
]
],
[
[
"df_amostra_estratificada = amostragem_estratificada(dataset, 'relationship', 1500)",
"_____no_output_____"
],
[
"# Tamanho da amostra\nprint(f'QTD amostra: {df_amostra_estratificada.shape[0]}')\n# Sumário da estrato relationship para a amostra\nsumario_categoria(df_amostra_estratificada, 'relationship')",
"QTD amostra: 1500\n"
]
],
[
[
"## Amostragem de reservatório",
"_____no_output_____"
]
],
[
[
"def amostragem_reservatorio(dataset, amostra):\n stream = []\n for i in range(len(dataset)):\n stream.append(i)\n i = 0\n tamanho = len(dataset)\n\n reservatorio = [0] * amostra\n for i in range(amostra):\n reservatorio[i] = stream[i]\n \n while i < tamanho:\n j = random.randrange(i + 1)\n if j < amostra:\n reservatorio[j] = stream[i]\n i += 1\n\n return dataset.iloc[reservatorio]",
"_____no_output_____"
],
[
"df_amostragem_reservatorio = amostragem_reservatorio(dataset, 1500)",
"_____no_output_____"
],
[
"df_amostragem_reservatorio.head()",
"_____no_output_____"
]
],
[
[
"---\n\n# Referências\n\n1. [Gangwal, R. (2019) What is sampling, Analytics Vidhya. Available at: https://www.analyticsvidhya.com/blog/2019/09/data-scientists-guide-8-types-of-sampling-techniques/ (Accessed: March 27, 2022).](https://www.analyticsvidhya.com/blog/2019/09/data-scientists-guide-8-types-of-sampling-techniques/)\n \n2. [Diez, D. M., Barr, C. D. and Çetinkaya-Rundel, M. (2013) OpenIntro statistics. 4th ed. Marston Gate: Printed in Great Britain by Amazon.co.uk.](https://leanpub.com/os)\n \n1. [Amostragem: Teoria e Prática Usando R, Github.io. Available at: https://amostragemcomr.github.io/livro/index.html (Accessed: March 27, 2022).](https://amostragemcomr.github.io/livro/estrat.html)\n\n",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
]
|
ec663c7563bbbb2730e2103a719c0530f6c32363 | 122,382 | ipynb | Jupyter Notebook | Exploring_Ebay_Cars_Sales_Data.ipynb | Kathan-Vakharia/Exploring-Ebay-Car-Sales-Data | 68c62feaf3eed785051d021e775ed1091815a1f0 | [
"MIT"
]
| null | null | null | Exploring_Ebay_Cars_Sales_Data.ipynb | Kathan-Vakharia/Exploring-Ebay-Car-Sales-Data | 68c62feaf3eed785051d021e775ed1091815a1f0 | [
"MIT"
]
| null | null | null | Exploring_Ebay_Cars_Sales_Data.ipynb | Kathan-Vakharia/Exploring-Ebay-Car-Sales-Data | 68c62feaf3eed785051d021e775ed1091815a1f0 | [
"MIT"
]
| null | null | null | 39.837891 | 19,094 | 0.4597 | [
[
[
"<a href=\"https://colab.research.google.com/github/Kathan-Vakharia/Exploring-Ebay-Car-Sales-Data/blob/main/Exploring_Ebay_Cars_Sales_Data.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"# Abstract\n\nIn this project, we'll explore data set of used cars from [eBay Kleinanzeigen](https://www.ebay-kleinanzeigen.de/), a classifieds section of the German eBay website.The dataset can found [here](https://data.world/data-society/used-cars-data). But I have used a modified(dirtier) version of the dataset provided by Dataquest. The main aim of this project is to apply **Data Cleaning** and **Analyse** various car listings using `pandas` library of python.",
"_____no_output_____"
],
[
"## Data Dictionary\n<ul>\n<li><code>dateCrawled</code> - When this ad was first crawled. All field-values are taken from this date.</li>\n<li><code>name</code> - Name of the car.</li>\n<li><code>seller</code> - Whether the seller is private or a dealer.</li>\n<li><code>offerType</code> - The type of listing</li>\n<li><code>price</code> - The price on the ad to sell the car.</li>\n<li><code>abtest</code> - Whether the listing is included in an <a href=\"https://www.leanplum.com/blog/test-group-vs-control-group/\">A/B test</a>.</li>\n<li><code>vehicleType</code> - The vehicle Type.</li>\n<li><code>yearOfRegistration</code> - The year in which the car was first registered.</li>\n<li><code>gearbox</code> - The transmission type.</li>\n<li><code>powerPS</code> - The power of the car in PS(german acronym for horsepower).</li>\n<li><code>model</code> - The car model name.</li>\n<li><code>odometer</code> - How many kilometers the car has driven.</li>\n<li><code>monthOfRegistration</code> - The month in which the car was first registered.</li>\n<li><code>fuelType</code> - What type of fuel the car uses.</li>\n<li><code>brand</code> - The brand of the car.</li>\n<li><code>notRepairedDamage</code> - If the car has a damage which is not yet repaired.</li>\n<li><code>dateCreated</code> - The date on which the eBay listing was created.</li>\n<li><code>nrOfPictures</code> - The number of pictures in the ad.</li>\n<li><code>postalCode</code> - The postal code for the location of the vehicle.</li>\n<li><code>lastSeenOnline</code> - When the crawler saw this ad last online.</li>\n</ul>",
"_____no_output_____"
],
[
"# Import Libaries",
"_____no_output_____"
]
],
[
[
"# importing libraries\nimport numpy as np\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"# Read Data",
"_____no_output_____"
]
],
[
[
"url = \"https://raw.githubusercontent.com/Kathan-Vakharia/Exploring-Ebay-Car-Sales-Data/main/autos.csv\"\nautos = pd.read_csv(url, encoding=\"Latin-1\")",
"_____no_output_____"
],
[
"autos.head(n=6)",
"_____no_output_____"
]
],
[
[
"# Examining Columns",
"_____no_output_____"
]
],
[
[
"autos.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 50000 entries, 0 to 49999\nData columns (total 20 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 dateCrawled 50000 non-null object\n 1 name 50000 non-null object\n 2 seller 50000 non-null object\n 3 offerType 50000 non-null object\n 4 price 50000 non-null object\n 5 abtest 50000 non-null object\n 6 vehicleType 44905 non-null object\n 7 yearOfRegistration 50000 non-null int64 \n 8 gearbox 47320 non-null object\n 9 powerPS 50000 non-null int64 \n 10 model 47242 non-null object\n 11 odometer 50000 non-null object\n 12 monthOfRegistration 50000 non-null int64 \n 13 fuelType 45518 non-null object\n 14 brand 50000 non-null object\n 15 notRepairedDamage 40171 non-null object\n 16 dateCreated 50000 non-null object\n 17 nrOfPictures 50000 non-null int64 \n 18 postalCode 50000 non-null int64 \n 19 lastSeen 50000 non-null object\ndtypes: int64(5), object(15)\nmemory usage: 7.6+ MB\n"
]
],
[
[
"- There are **50000** rows and **20** columns in our dataset. And most of the columns are `strings`.\n- Even the dates are represented as strings.\n",
"_____no_output_____"
]
],
[
[
"#Which columns contain null values?\nnull_cols = autos.isna().any()\nnull_cols[null_cols]",
"_____no_output_____"
]
],
[
[
"- Thus, following columns contains `null` values:\n - `vehicleType`\n - `gearbox`\n - `model`\n - `fuelType`\n - `notRepairedDamage`",
"_____no_output_____"
]
],
[
[
"# How many % of data in each column is null\nper_null = autos.isna().sum()/autos.shape[0] * 100\nper_null[per_null != 0]",
"_____no_output_____"
]
],
[
[
"- No column contains more than **~20%** `null` values.",
"_____no_output_____"
],
[
"# Cleaning Column Names\nWe can see below that column names are in **camelCase** but in python, **snake_case** is preferred. \n\n- Let's convert column names to **snake_case** and **reword** some other column names so they are more descriptive.",
"_____no_output_____"
]
],
[
[
"autos.columns",
"_____no_output_____"
],
[
"new_cols = ['date_crawled', 'name', 'seller', 'offer_type', 'price', 'abtest',\n 'vehicle_type', 'registration_year', 'gearbox', 'power_ps', 'model',\n 'odometer', 'registration_month', 'fuel_type', 'brand',\n 'unrepaired_damage', 'ad_created', 'num_pictures', 'postal_code',\n 'last_seen']\nautos.columns = new_cols\nautos.head()",
"_____no_output_____"
]
],
[
[
"# Initial Exploration and Cleaning\n\nInitially we will look for:\n\n- Text/Numeric columns where all or almost all values are the same. These can often be dropped as they don't have useful information for analysis.\n- Examples of numeric data stored as text which can be cleaned and converted.",
"_____no_output_____"
]
],
[
[
"#summary stats for numerical columns\nautos.describe()",
"_____no_output_____"
]
],
[
[
"Noteworthy Observations:\n- `registration_year`'s minimum value is **1000** and maximum value is **9999** which is not possible.\n- `power_ps`'s minimum value is **0** maximun value is **~17K** horse power which again is not possible till date. [ref](https://autowise.com/highest-horsepower-cars/)\n- `registration_month`'s minimum value is **0** and maximum value is **12**, which is again not possible as there are only **12** months in a year\n- There seems to be something wrong with `num_pictures` column as all of its statistics(except count) is zero. \n\nFor now, let's examine `num_pictures` column more,",
"_____no_output_____"
]
],
[
[
"autos[\"num_pictures\"].value_counts()",
"_____no_output_____"
]
],
[
[
"It turns out that all the values in `num_pictures` column are zeros. Thus, this column is of no use to us for data analysis and we'll happily drop it. But before we do that, let's see if have some useless text column that needs to be dropped?",
"_____no_output_____"
]
],
[
[
"#summary stats for text columns\nautos.describe(include=['O'])",
"_____no_output_____"
]
],
[
[
"Noteworthy observations:\n- `seller` and `offer_type` column have 49,999 out of 50,000 values to be **privat** and **Angebot** respectively. This columns are not gonna help us in our analysis since they contain same values in almost all the entries, so we have to drop them.\n\nLet's drop, `seller`, `offer_type`, `num_pictures` columns before we go any further.",
"_____no_output_____"
]
],
[
[
"autos.drop(columns=[\"seller\", \"offer_type\", \"num_pictures\"], inplace=True)\nautos.head()",
"_____no_output_____"
]
],
[
[
"Moreover, `price` and `odometer` columns are stored as text since their values are numbers concatenated with some special characters. \nWe need to remove those special characters, and convert this columns into numeric ones.",
"_____no_output_____"
],
[
"1. Converting `price` column from text to numeric\n",
"_____no_output_____"
]
],
[
[
"#find what to remove\nautos[\"price\"].value_counts()",
"_____no_output_____"
],
[
"autos[\"price\"] = (autos[\"price\"]\n .str.replace(\"$\", \"\")\n .str.replace(\",\", \"\")\n .astype(int) \n )\nautos[\"price\"].head()",
"_____no_output_____"
]
],
[
[
"2. Converting `odometer` from text to numeric.",
"_____no_output_____"
]
],
[
[
"#find what to remove\nautos[\"odometer\"].value_counts()",
"_____no_output_____"
],
[
"autos[\"odometer\"] = (autos[\"odometer\"]\n .str.replace(\",\", \"\")\n .str.replace(\"km\",\"\")\n .astype(int)\n )\nautos[\"odometer\"].head()",
"_____no_output_____"
]
],
[
[
"let's rename the `odometer` column to `odometer_km` to make it look more descriptive.",
"_____no_output_____"
]
],
[
[
"autos.rename(columns={\"odometer\":\"odometer_km\"}, inplace=True)\nautos[\"odometer_km\"].head()",
"_____no_output_____"
]
],
[
[
"# Exploring `odometer` and `price` columns",
"_____no_output_____"
]
],
[
[
"print(\"Unique Odometer Readings:\", autos[\"odometer_km\"].unique().shape[0])\nautos[[\"odometer_km\"]].describe()",
"Unique Odometer Readings: 13\n"
],
[
"autos[\"odometer_km\"].value_counts()",
"_____no_output_____"
]
],
[
[
"Observations:\n- The values are rounded, it means the seller had to choose from pre-set options for this field. Moreover, there are more vehicles with high mileage.",
"_____no_output_____"
]
],
[
[
"print(\"Unique prices: \", autos[\"price\"].unique().shape[0])\nautos[[\"price\"]].describe()",
"Unique prices: 2357\n"
],
[
"#top prices\nautos[\"price\"].value_counts().sort_index(ascending=False).head(15)",
"_____no_output_____"
]
],
[
[
"- Some of the cars are really heavily priced. Some of them are even priced over $\\$1$ million.\n <!-- Although cars can be priced over $\\$1$ million but these are special cars and it is less likely that such cars will be on sites like ebay. [Ref](https://financesonline.com/10-most-expensive-items-ever-sold-on-ebay-top-spot-bought-for-168m/) -->\n",
"_____no_output_____"
]
],
[
[
"#least prices\nautos[\"price\"].value_counts().sort_index().head(15)",
"_____no_output_____"
]
],
[
[
"- There are **1421** entries of cars with a price tag of $\\$0$ which is obviously not valid. And since ebay is an auction site, it completely legitimate to have items starting at $\\$1$.\n\n- We will drop all the cars prices less than $\\$0$ and more than $\\$350,000$ because it seems prices after that is increasing very fast.\n\n - $345000 \\rightarrow 350,000 \\rightarrow 999990$ \n",
"_____no_output_____"
]
],
[
[
"#including prices in range[1, 350000]\nautos= autos[autos[\"price\"].between(1, 350000)]\n#confirming the change\nautos[\"price\"].describe()",
"_____no_output_____"
]
],
[
[
"# Exploring Date Columns\n\nFollowing are the date columns,\n\n- `date_crawled`\n- `registration_year`\n- `registration_month`\n- `ad_created`\n- `last_seen`\n\n`registration_year` and `registration_month` are stored as **numeric** values\n\n`date_crawled`, `ad_created` and `last_seen` are stored as **string** values.",
"_____no_output_____"
]
],
[
[
"#columns which should be treated as date\ndate_cols = {\"strings\": [\"date_crawled\", \"ad_created\",\"last_seen\"],\n \"numeric\":[\"registration_year\",\"registration_month\"]\n }\n\n#getting sense of how date is stored as strings\nautos[date_cols[\"strings\"]][0:5]",
"_____no_output_____"
]
],
[
[
"- It seems like dates in these columns are stored in following format, **YYYY-MM-DD HH:MM:SS**",
"_____no_output_____"
]
],
[
[
"#percentage distribution of date_crawled\nprint(\"Unique Entries:\", autos[\"date_crawled\"].str[:10].unique().shape[0])\n(autos[\"date_crawled\"]\n .str[:10]\n .value_counts(normalize=True, dropna=False)\n .sort_index()\n)*100",
"Unique Entries: 34\n"
]
],
[
[
"- We can see that the site was crawled almost everyday in one month period in $March$ and $April$ of year $2016$ by the crawler.\n\n- Also the distribution is fairly uniform.",
"_____no_output_____"
]
],
[
[
"#percentage distribution of last_seen: The ad was removed after this date\n#presumably because the car was sold\nprint(\"Unique Entries:\", autos[\"last_seen\"].str[:10].unique().shape[0])\n(autos[\"last_seen\"]\n .str[:10]\n .value_counts(normalize=True, dropna=False)\n .sort_index()\n)*100",
"Unique Entries: 34\n"
]
],
[
[
"The last three days contain a disproportionate amount of `last_seen` values. Given that these are **6-10 times** the values from the previous days, it's unlikely that there was a massive spike in sales, and more likely that these values are to do with the crawling period ending and don't indicate car sales.\n\nNote: It is not possible for the crawler to see all the ads everyday!",
"_____no_output_____"
]
],
[
[
"#percentage distribution of ad_created\nprint(\"Unique Entries:\", autos[\"ad_created\"].str[:10].unique().shape[0])\n(autos[\"ad_created\"]\n .str[:10]\n .value_counts(normalize=True, dropna=False)\n .sort_index()\n)*100",
"Unique Entries: 76\n"
],
[
"(autos[\"ad_created\"]\n .str[:10]\n .value_counts(normalize=True, dropna=False)\n .sort_index()\n)*100",
"_____no_output_____"
],
[
"#distribution of registration_year\nautos[\"registration_year\"].describe()",
"_____no_output_____"
]
],
[
[
"- `registration_year`'s minimum value is **1000** (way before cars were invented) and maximum value is **9999** (many years into the future) which is not possible.",
"_____no_output_____"
],
[
"# Dealing with Incorrect `registration_year` Data\n\nThe automobile was first invented and perfected in Germany and France in the late 1800s. [ref](https://www.history.com/topics/inventions/automobiles)\n\nSo Realistically all the `registration_year` values less than $1800$ are not possible. Moreover, the crawler crawled the site in $2016$ so the values after $2016$ are also not possible.\n\nLet's see how many percentage of values are not in the realistic range of $[1800, 2016]$ and if we can drop them.",
"_____no_output_____"
]
],
[
[
"np.logical_not(autos[\"registration_year\"].between(1800, 2016)).sum()/autos.shape[0] * 100",
"_____no_output_____"
]
],
[
[
"- There are ~4% of non-realistic values so we'll happily drop them.",
"_____no_output_____"
]
],
[
[
"#keep only entries whose `registration_year` is in realistic range\nautos = autos.loc[autos[\"registration_year\"].between(1800, 2016)]\n#check if column was updated\nautos[\"registration_year\"].describe()",
"_____no_output_____"
],
[
"#distribution of modified `registration_year` col\nprint(\"Unique Values:\", autos[\"registration_year\"].unique().shape[0])\n(autos[\"registration_year\"].value_counts(normalize=True, dropna=False)*100).head(20)",
"Unique Values: 79\n"
]
],
[
[
"It seems like majority of the vehicles were first registered in past $20-30$ years itself.",
"_____no_output_____"
]
],
[
[
"#how many % vehicles are in top 20\n(autos[\"registration_year\"].value_counts(normalize=True, dropna=False)*100).head(20).sum()",
"_____no_output_____"
]
],
[
[
"# Exploring Price By Brand\n",
"_____no_output_____"
]
],
[
[
"#unique brands\nautos[\"brand\"].unique()",
"_____no_output_____"
],
[
"#percentage distribution of \"brand\"\n(autos[\"brand\"]\n .value_counts(normalize=True)\n .sort_values(ascending=False)*100)",
"_____no_output_____"
]
],
[
[
"- **Volkswagen** has by far the most listings followed by **BMW** and **Opel** . \n\n- It is evident that **German** manufacturers dominate the listings since top **five** spots are held by them.\n\n- Alot of brands don't have significant amount of listings. \n\nSo we'll limit our analyis to brands that have more than $5\\%(2500)$ listings.",
"_____no_output_____"
]
],
[
[
"brand_counts = (autos[\"brand\"]\n .value_counts(normalize=True)\n .sort_values(ascending=False)\n )\ncommon_brands = brand_counts[brand_counts.gt(0.05)].index\nprint(common_brands)",
"Index(['volkswagen', 'bmw', 'opel', 'mercedes_benz', 'audi', 'ford'], dtype='object')\n"
]
],
[
[
"- Let's calculate **average** price by most common brand.",
"_____no_output_____"
]
],
[
[
"#Using groupby\nautos.groupby(\"brand\")[\"price\"].mean().loc[common_brands].round()",
"_____no_output_____"
],
[
"#using for loop\nbrand_mean_prices = {}\nfor brand in common_brands:\n avg_price = autos.loc[autos[\"brand\"] == brand, \"price\"].mean()\n brand_mean_prices[brand] = round(avg_price)\n\nbrand_mean_prices",
"_____no_output_____"
]
],
[
[
"- **audi**, **bmw**, **mercedes_benz** are more expensive while **opel** and **ford** are less expensive.\n- **volkswagen** lies in between. That might also explain the reason for its poplularity: it may give best of both worlds!",
"_____no_output_____"
],
[
"# Exploring Mileage\nLet's see if milege has anything to do with the average prices.",
"_____no_output_____"
]
],
[
[
"#using groupby\nautos.groupby(\"brand\")[[\"price\", \"odometer_km\"]].mean().loc[common_brands].round()",
"_____no_output_____"
],
[
"#using for loop\nbrand_mean_mileage = {}\nfor brand in common_brands:\n avg_mileage = autos.loc[autos[\"brand\"] == brand, \"odometer_km\"].mean()\n brand_mean_mileage[brand] = round(avg_mileage)\n\nbrand_mean_mileage",
"_____no_output_____"
],
[
"brand_info = pd.DataFrame(data = {\"avg_price\":brand_mean_prices, \"avg_mileage\":brand_mean_mileage})\nbrand_info",
"_____no_output_____"
]
],
[
[
"- The range of car mileages does not vary as much as the prices do by brand.",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
]
|
ec663d0aa8f596f68280b8610caa9189d90c702b | 62,678 | ipynb | Jupyter Notebook | Unsupervised Learning/K-Means Clustering/K_Means_Clustering.ipynb | Jawwad-Fida/Machine-Learning-Algorithms | c326cd83850b771b979b8dfcbca6a54c508b035a | [
"MIT"
]
| 1 | 2021-07-07T07:44:20.000Z | 2021-07-07T07:44:20.000Z | Unsupervised Learning/K-Means Clustering/K_Means_Clustering.ipynb | Jawwad-Fida/Machine-Learning-Algorithms | c326cd83850b771b979b8dfcbca6a54c508b035a | [
"MIT"
]
| null | null | null | Unsupervised Learning/K-Means Clustering/K_Means_Clustering.ipynb | Jawwad-Fida/Machine-Learning-Algorithms | c326cd83850b771b979b8dfcbca6a54c508b035a | [
"MIT"
]
| null | null | null | 180.109195 | 32,442 | 0.881139 | [
[
[
"## Importing the libraries",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"## Data Preprocessing",
"_____no_output_____"
],
[
"### Importing the dataset\n\nEach row correponds to a customer\n\nFeature: spending score (evaluation metric) --> measure how much each customer spends\n\nWe will identify some patterns within the customer base",
"_____no_output_____"
]
],
[
[
"data = pd.read_csv(\"Mall_Customers.csv\")\n\n# Unsupervised learning has only X (features)\n\n# feature - customer id is not needed for our model (exclude it)\n\n# Note:\n# To visualize our clusters, we will need 2 features from our dataset (One axis per feature). (2 features --> 2D plot), so\n# for the time being, we will not consider other features other than 2 chosen.\n\n# Features chosen -- Annual Income (index 3), Spending Score (index 4)\nX = data.iloc[:, [3, 4]].values\n\n# Take all rows, of column index 3 and 4",
"_____no_output_____"
]
],
[
[
"Also, since there is no y, we won't be splitting our dataset into Train and Test set\n\nX[0] --> Annual Income\n\nX[1] --> Spending Score",
"_____no_output_____"
],
[
"## Using the elbow method to find the optimal number of clusters, K",
"_____no_output_____"
]
],
[
[
"# Implement elbow method by running K-Means algorithm several times with different values of K\n\nfrom sklearn.cluster import KMeans\nwcss = []\n\n# loop will run K-Means algorithm with different values of K (from 1 to 10)\n# we will calculate WCSS for each cluster each time the algorithm runs\n\n# Graph ==> y - WCSS for each cluster , x - no. of clusters K\n\nfor i in range(1,11):\n # Create object of KMeans Class (Model)\n kmeans = KMeans(n_clusters=i, init = 'k-means++', random_state=42)\n # k-means++ --> an initiliazation trick to prevent the model from falling into the Random Initialization trap\n # random_state = 42 --> results will be reproducable\n\n # Train the model\n kmeans.fit(X)\n\n # calculate WCSS\n wcss.append(kmeans.inertia_) # inertia_ is an attribute of KMeans class\n\n# Plot the graph of the elbow method\nplt.figure(figsize=(8, 6))\nplt.plot(range(1,11),wcss)\nplt.title(\"The Elbow Method\")\nplt.xlabel(\"Number of Clusters, K\")\nplt.ylabel(\"WCSS\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"From the graph, we can see that the optimal value of K = 5\n\n(Distortion of graph decreases after K=5, i.e. graph slowly becomes flat)",
"_____no_output_____"
],
[
"## Training the K-Means model on the dataset",
"_____no_output_____"
]
],
[
[
"K = 5\n\nkmeans = KMeans(n_clusters=K, init = 'k-means++', random_state=42)\n\n# Build the dependent variable - values are numbers of clusters\n\n# 1 -- cluster 1, 2 -- cluster 2, 3 -- cluster 3 .....\n# each cluster represents a different set(group) of customers\n# customers will be grouped due to their similar qualities\n\n# fit_predict() --> trains model, and also returns y(dependent variable)\ny_kmeans = kmeans.fit_predict(X) # just like y_pred",
"_____no_output_____"
],
[
"print(y_kmeans)\n# all the clusters that each customer belongs to\n\n# first customer --> belongs to cluster 3, second customer --> cluster 0, third customer --> cluster 3, .......",
"[3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3\n 0 3 0 3 0 3 1 3 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1 1 1 1 2 4 2 1 2 4 2 4 2 1 2 4 2 4 2 4 2 4 2 1 2 4 2 4 2\n 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4\n 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2]\n"
]
],
[
[
"## Visualising the clusters\n\nwith their centroids",
"_____no_output_____"
]
],
[
[
"# Scatter plot each cluster separately\nplt.figure(figsize=(8, 6))\n\n# 1) Plot the Clusters\n\n# X --> Annual income, X[0]. y --> Spending Score, X[1]\n# X[y_kmeans == 0, 0] --> select all customers from Annual Income that belong to Cluster 0\n# X[y_kmeans == 0, 1] --> select all customers from Spending Score that belong to Cluster 0\n\nplt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s=100, c = 'red', label='Cluster 0') \n\nplt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s=100, c = 'blue', label='Cluster 1') \nplt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s=100, c = 'green', label='Cluster 2') \nplt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s=100, c = 'cyan', label='Cluster 3') \nplt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], s=100, c = 'magenta', label='Cluster 4') \n\n# 2) Plot the Centroids of each cluster\n\n# cluster_centers is an attribute of KMeans class. It is a 2D array where rows=different centroids, column = co-ordinates of centroids\n\n# cluster_centers[:, 0] --> take all rows, then first column of X - X[0] (x co-ordinate of centroid)\n# cluster_centers[:, 1] --> take all rows, then second column of X - X[1] (y co-ordinate of centroid)\n\nplt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=300, c='yellow', label='Centroids')\n\nplt.title(\"Clusters of Customers\")\nplt.xlabel(\"Annual Income (k$)\")\nplt.ylabel(\"Spending Score (1-100)\")\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Conclusion",
"_____no_output_____"
],
[
"Cluster 0 --> customers have low annual income, and high spending score\n\nCluster 4 --> Customers have high annual income, and has low spending score",
"_____no_output_____"
],
[
"### Some Marketing/Business Strategies\n\nTarget customers in cluster 2 because they have high income, and they spend a lot. Thus, more profit can be generated for the business. So we can target them with new deals and offers\n\nFor new deals and offers, do not target(or pressurize) customers in cluster 0. These customers show loyalty to the business and must be retained.\n\nWe can target some customers in cluster 4 in order to attract them to the business. \n",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
]
|
ec663dd7f100d61d4295f8542af1a464f1114c9c | 21,751 | ipynb | Jupyter Notebook | IBM Professional Certificates/Python for Data Science and AI/5-3.2-API_2.ipynb | Bezhuang/LearnCS | d82cc691e1854454576c769a090f4cfb5f91c47a | [
"MIT"
]
| 1 | 2021-10-03T05:14:25.000Z | 2021-10-03T05:14:25.000Z | IBM Professional Certificates/Python for Data Science and AI/5-3.2-API_2.ipynb | Bezhuang/LearnCS | d82cc691e1854454576c769a090f4cfb5f91c47a | [
"MIT"
]
| null | null | null | IBM Professional Certificates/Python for Data Science and AI/5-3.2-API_2.ipynb | Bezhuang/LearnCS | d82cc691e1854454576c769a090f4cfb5f91c47a | [
"MIT"
]
| 1 | 2021-05-25T03:04:08.000Z | 2021-05-25T03:04:08.000Z | 31.523188 | 575 | 0.606133 | [
[
[
"<center>\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/Logos/organization_logo/organization_logo.png\" width=\"300\" alt=\"cognitiveclass.ai logo\" />\n</center>\n\n# Watson Speech to Text Translator\n\nEstimated time needed: **25** minutes\n\n## Objectives\n\nAfter completing this lab you will be able to:\n\n- Create Speech to Text Translator\n",
"_____no_output_____"
],
[
"### Introduction\n\n<p>In this notebook, you will learn to convert an audio file of an English speaker to text using a Speech to Text API. Then you will translate the English version to a Spanish version using a Language Translator API. <b>Note:</b> You must obtain the API keys and enpoints to complete the lab.</p>\n",
"_____no_output_____"
],
[
"<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n<h2>Table of Contents</h2>\n<ul>\n <li><a href=\"#ref0\">Speech To Text</a></li>\n <li><a href=\"#ref1\">Language Translator</a></li>\n <li><a href=\"#ref2\">Exercise</a></li>\n</ul>\n</div>\n",
"_____no_output_____"
]
],
[
[
"#you will need the following library \n!pip install PyJWT==1.7.1 ibm_watson wget",
"Collecting PyJWT==1.7.1\n Downloading https://files.pythonhosted.org/packages/87/8b/6a9f14b5f781697e51259d81657e6048fd31a113229cf346880bb7545565/PyJWT-1.7.1-py2.py3-none-any.whl\nCollecting ibm_watson\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/9b/88/395d7d52df29f321ae1150cf9b5a71cef8611570230502597c427bc1e9d9/ibm-watson-5.1.0.tar.gz (382kB)\n\u001b[K |████████████████████████████████| 389kB 6.0MB/s eta 0:00:01\n\u001b[?25hCollecting wget\n Downloading https://files.pythonhosted.org/packages/47/6a/62e288da7bcda82b935ff0c6cfe542970f04e29c756b0e147251b2fb251f/wget-3.2.zip\nRequirement already satisfied: requests<3.0,>=2.0 in /home/jupyterlab/conda/envs/python/lib/python3.6/site-packages (from ibm_watson) (2.25.0)\nRequirement already satisfied: python_dateutil>=2.5.3 in /home/jupyterlab/conda/envs/python/lib/python3.6/site-packages (from ibm_watson) (2.8.1)\nCollecting websocket-client==0.48.0 (from ibm_watson)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/8a/a1/72ef9aa26cfe1a75cee09fc1957e4723add9de098c15719416a1ee89386b/websocket_client-0.48.0-py2.py3-none-any.whl (198kB)\n\u001b[K |████████████████████████████████| 204kB 28.4MB/s eta 0:00:01\n\u001b[?25hCollecting ibm_cloud_sdk_core>=3.3.6 (from ibm_watson)\n Downloading https://files.pythonhosted.org/packages/49/de/1d21ed3af7f7c755829d813e9f33404e56bd988053221f9b3b9604e0f672/ibm-cloud-sdk-core-3.5.1.tar.gz\nRequirement already satisfied: chardet<4,>=3.0.2 in /home/jupyterlab/conda/envs/python/lib/python3.6/site-packages (from requests<3.0,>=2.0->ibm_watson) (3.0.4)\nRequirement already satisfied: urllib3<1.27,>=1.21.1 in /home/jupyterlab/conda/envs/python/lib/python3.6/site-packages (from requests<3.0,>=2.0->ibm_watson) (1.25.11)\nRequirement already satisfied: certifi>=2017.4.17 in /home/jupyterlab/conda/envs/python/lib/python3.6/site-packages (from requests<3.0,>=2.0->ibm_watson) (2020.12.5)\nRequirement already satisfied: idna<3,>=2.5 in /home/jupyterlab/conda/envs/python/lib/python3.6/site-packages (from requests<3.0,>=2.0->ibm_watson) (2.10)\nRequirement already satisfied: six>=1.5 in /home/jupyterlab/conda/envs/python/lib/python3.6/site-packages (from python_dateutil>=2.5.3->ibm_watson) (1.15.0)\nBuilding wheels for collected packages: ibm-watson, wget, ibm-cloud-sdk-core\n Building wheel for ibm-watson (setup.py) ... \u001b[?25ldone\n\u001b[?25h Stored in directory: /home/jupyterlab/.cache/pip/wheels/49/6d/cf/1d91261b96363da78bf9b02699fd2262e6b5dad179500690c1\n Building wheel for wget (setup.py) ... \u001b[?25ldone\n\u001b[?25h Stored in directory: /home/jupyterlab/.cache/pip/wheels/40/15/30/7d8f7cea2902b4db79e3fea550d7d7b85ecb27ef992b618f3f\n Building wheel for ibm-cloud-sdk-core (setup.py) ... \u001b[?25ldone\n\u001b[?25h Stored in directory: /home/jupyterlab/.cache/pip/wheels/e1/e6/66/7494a76d8fdee07cd3b2017d92d6c1309e54130cbbdf17448f\nSuccessfully built ibm-watson wget ibm-cloud-sdk-core\n\u001b[31mERROR: ibm-cloud-sdk-core 3.5.1 has requirement PyJWT<3.0.0,>=2.0.1, but you'll have pyjwt 1.7.1 which is incompatible.\u001b[0m\nInstalling collected packages: PyJWT, websocket-client, ibm-cloud-sdk-core, ibm-watson, wget\nSuccessfully installed PyJWT-1.7.1 ibm-cloud-sdk-core-3.5.1 ibm-watson-5.1.0 websocket-client-0.48.0 wget-3.2\n"
]
],
[
[
"<h2 id=\"ref0\">Speech to Text</h2>\n",
"_____no_output_____"
],
[
"<p>First we import <code>SpeechToTextV1</code> from <code>ibm_watson</code>.For more information on the API, please click on this <a href=\"https://cloud.ibm.com/apidocs/speech-to-text?code=python\">link</a></p>\n",
"_____no_output_____"
]
],
[
[
"from ibm_watson import SpeechToTextV1 \nimport json\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator",
"_____no_output_____"
]
],
[
[
"<p>The service endpoint is based on the location of the service instance, we store the information in the variable URL. To find out which URL to use, view the service credentials and paste the url here.</p>\n",
"_____no_output_____"
]
],
[
[
"url_s2t = \"\"",
"_____no_output_____"
]
],
[
[
"<p>You require an API key, and you can obtain the key on the <a href=\"https://cloud.ibm.com/resources\">Dashboard </a>.</p>\n",
"_____no_output_____"
]
],
[
[
"iam_apikey_s2t = \"\"",
"_____no_output_____"
]
],
[
[
"<p>You create a <a href=\"http://watson-developer-cloud.github.io/python-sdk/v0.25.0/apis/watson_developer_cloud.speech_to_text_v1.html\">Speech To Text Adapter object</a> the parameters are the endpoint and API key.</p>\n",
"_____no_output_____"
]
],
[
[
"authenticator = IAMAuthenticator(iam_apikey_s2t)\ns2t = SpeechToTextV1(authenticator=authenticator)\ns2t.set_service_url(url_s2t)\ns2t",
"_____no_output_____"
]
],
[
[
"<p>Lets download the audio file that we will use to convert into text.</p>\n",
"_____no_output_____"
]
],
[
[
"!wget -O PolynomialRegressionandPipelines.mp3 https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%205/data/PolynomialRegressionandPipelines.mp3\n\n",
"--2021-02-03 14:17:00-- https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%205/data/PolynomialRegressionandPipelines.mp3\nResolving cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud (cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud)... 169.63.118.104\nConnecting to cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud (cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud)|169.63.118.104|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 4234179 (4.0M) [audio/mpeg]\nSaving to: ‘PolynomialRegressionandPipelines.mp3’\n\nPolynomialRegressio 100%[===================>] 4.04M 5.23MB/s in 0.8s \n\n2021-02-03 14:17:01 (5.23 MB/s) - ‘PolynomialRegressionandPipelines.mp3’ saved [4234179/4234179]\n\n"
]
],
[
[
"<p>We have the path of the wav file we would like to convert to text</p>\n",
"_____no_output_____"
]
],
[
[
"filename='PolynomialRegressionandPipelines.mp3'",
"_____no_output_____"
]
],
[
[
"<p>We create the file object <code>wav</code> with the wav file using <code>open</code> ; we set the <code>mode</code> to \"rb\" , this is similar to read mode, but it ensures the file is in binary mode.We use the method <code>recognize</code> to return the recognized text. The parameter audio is the file object <code>wav</code>, the parameter <code>content_type</code> is the format of the audio file.</p>\n",
"_____no_output_____"
]
],
[
[
"with open(filename, mode=\"rb\") as wav:\n response = s2t.recognize(audio=wav, content_type='audio/mp3')",
"_____no_output_____"
]
],
[
[
"<p>The attribute result contains a dictionary that includes the translation:</p>\n",
"_____no_output_____"
]
],
[
[
"response.result",
"_____no_output_____"
],
[
"from pandas.io.json import json_normalize\n\njson_normalize(response.result['results'],\"alternatives\")",
"_____no_output_____"
],
[
"response",
"_____no_output_____"
]
],
[
[
"<p>We can obtain the recognized text and assign it to the variable <code>recognized_text</code>:</p>\n",
"_____no_output_____"
]
],
[
[
"recognized_text=response.result['results'][0][\"alternatives\"][0][\"transcript\"]\ntype(recognized_text)",
"_____no_output_____"
]
],
[
[
"<h2 id=\"ref1\">Language Translator</h2>\n",
"_____no_output_____"
],
[
"<p>First we import <code>LanguageTranslatorV3</code> from ibm_watson. For more information on the API click <a href=\"https://cloud.ibm.com/apidocs/speech-to-text?code=python\"> here</a></p>\n",
"_____no_output_____"
]
],
[
[
"from ibm_watson import LanguageTranslatorV3",
"_____no_output_____"
]
],
[
[
"<p>The service endpoint is based on the location of the service instance, we store the information in the variable URL. To find out which URL to use, view the service credentials.</p>\n",
"_____no_output_____"
]
],
[
[
"url_lt='https://gateway.watsonplatform.net/language-translator/api'",
"_____no_output_____"
]
],
[
[
"<p>You require an API key, and you can obtain the key on the <a href=\"https://cloud.ibm.com/resources\">Dashboard</a>.</p>\n",
"_____no_output_____"
]
],
[
[
"apikey_lt=''",
"_____no_output_____"
]
],
[
[
"<p>API requests require a version parameter that takes a date in the format version=YYYY-MM-DD. This lab describes the current version of Language Translator, 2018-05-01</p>\n",
"_____no_output_____"
]
],
[
[
"version_lt='2018-05-01'",
"_____no_output_____"
]
],
[
[
"<p>we create a Language Translator object <code>language_translator</code>:</p>\n",
"_____no_output_____"
]
],
[
[
"authenticator = IAMAuthenticator(apikey_lt)\nlanguage_translator = LanguageTranslatorV3(version=version_lt,authenticator=authenticator)\nlanguage_translator.set_service_url(url_lt)\nlanguage_translator",
"_____no_output_____"
]
],
[
[
"<p>We can get a Lists the languages that the service can identify.\nThe method Returns the language code. For example English (en) to Spanis (es) and name of each language.</p>\n",
"_____no_output_____"
]
],
[
[
"from pandas.io.json import json_normalize\n\njson_normalize(language_translator.list_identifiable_languages().get_result(), \"languages\")",
"_____no_output_____"
]
],
[
[
"<p>We can use the method <code>translate</code> this will translate the text. The parameter text is the text. Model_id is the type of model we would like to use use we use list the language . In this case, we set it to 'en-es' or English to Spanish. We get a Detailed Response object translation_response</p>\n",
"_____no_output_____"
]
],
[
[
"translation_response = language_translator.translate(\\\n text=recognized_text, model_id='en-es')\ntranslation_response",
"_____no_output_____"
]
],
[
[
"<p>The result is a dictionary.</p>\n",
"_____no_output_____"
]
],
[
[
"translation=translation_response.get_result()\ntranslation",
"_____no_output_____"
]
],
[
[
"<p>We can obtain the actual translation as a string as follows:</p>\n",
"_____no_output_____"
]
],
[
[
"spanish_translation =translation['translations'][0]['translation']\nspanish_translation ",
"_____no_output_____"
]
],
[
[
"<p>We can translate back to English</p>\n",
"_____no_output_____"
]
],
[
[
"translation_new = language_translator.translate(text=spanish_translation ,model_id='es-en').get_result()",
"_____no_output_____"
]
],
[
[
"<p>We can obtain the actual translation as a string as follows:</p>\n",
"_____no_output_____"
]
],
[
[
"translation_eng=translation_new['translations'][0]['translation']\ntranslation_eng",
"_____no_output_____"
]
],
[
[
"<br>\n",
"_____no_output_____"
],
[
"<h2>Quiz</h2>\n",
"_____no_output_____"
],
[
"Translate to French.\n",
"_____no_output_____"
]
],
[
[
"# Write your code below and press Shift+Enter to execute\nFrench_translation=language_translator.translate(\n text=translation_eng , model_id='en-fr').get_result()\n\nFrench_translation['translations'][0]['translation']",
"_____no_output_____"
]
],
[
[
"<details><summary>Click here for the solution</summary>\n\n```python\nFrench_translation=language_translator.translate(\n text=translation_eng , model_id='en-fr').get_result()\n\nFrench_translation['translations'][0]['translation']\n\n```\n\n</details>\n",
"_____no_output_____"
],
[
"<h3>Language Translator</h3>\n",
"_____no_output_____"
],
[
" <a href=\"https://cloud.ibm.com/catalog/services/watson-studio\"><img src=\"https://ibm.box.com/shared/static/irypdxea2q4th88zu1o1tsd06dya10go.png\" width=\"750\" align=\"center\"></a>\n",
"_____no_output_____"
],
[
"<b>References</b>\n",
"_____no_output_____"
],
[
"[https://cloud.ibm.com/apidocs/speech-to-text?code=python](https://cloud.ibm.com/apidocs/speech-to-text?code=python&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork-19487395&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ)\n",
"_____no_output_____"
],
[
"[https://cloud.ibm.com/apidocs/language-translator?code=python](https://cloud.ibm.com/apidocs/language-translator?code=python&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork-19487395&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ)\n",
"_____no_output_____"
],
[
"<hr>\n",
"_____no_output_____"
],
[
"## Authors:\n\n [Joseph Santarcangelo](https://www.linkedin.com/in/joseph-s-50398b136?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork-19487395&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork-19487395&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ) \n\nJoseph Santarcangelo has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.\n\n## Other Contributor(s)\n\n<a href=\"https://www.linkedin.com/in/fanjiang0619/\">Fan Jiang</a>\n\n## Change Log\n\n| Date (YYYY-MM-DD) | Version | Changed By | Change Description |\n| ----------------- | ------- | ---------- | ---------------------------------- |\n| 2021-01-05 | 2.1 | Malika | Added a library |\n| 2020-08-26 | 2.0 | Lavanya | Moved lab to course repo in GitLab |\n| | | | |\n| | | | |\n\n<hr/>\n\n## <h3 align=\"center\"> © IBM Corporation 2020. All rights reserved. <h3/>\n",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
]
|
ec6644822ce98054b7189db7d799e00d703e15f0 | 6,752 | ipynb | Jupyter Notebook | xgboost/MushroomClassification/mushroom_data_preparation.ipynb | srinivas02/AmazonSageMakerCourse | 98d77dfea6cf5bb3ddd23c44a2adfb8be29b10b3 | [
"Apache-2.0"
]
| 1 | 2021-07-03T04:35:45.000Z | 2021-07-03T04:35:45.000Z | xgboost/MushroomClassification/mushroom_data_preparation.ipynb | srinivas02/AmazonSageMakerCourse | 98d77dfea6cf5bb3ddd23c44a2adfb8be29b10b3 | [
"Apache-2.0"
]
| null | null | null | xgboost/MushroomClassification/mushroom_data_preparation.ipynb | srinivas02/AmazonSageMakerCourse | 98d77dfea6cf5bb3ddd23c44a2adfb8be29b10b3 | [
"Apache-2.0"
]
| 1 | 2020-08-17T17:58:31.000Z | 2020-08-17T17:58:31.000Z | 24.02847 | 110 | 0.529177 | [
[
[
"%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing",
"_____no_output_____"
]
],
[
[
"<h2>Mushroom Classification Dataset - All Categorical Features</h2>\n<h4>Hands-on: Classification with AWS Machine Learning Service</h4>\nInput Features: 'cap-shape', 'cap-surface', 'cap-color', 'bruises',\n 'odor', 'gill-attachment', 'gill-spacing', 'gill-size', 'gill-color',\n 'stalk-shape', 'stalk-root', 'stalk-surface-above-ring',\n 'stalk-surface-below-ring', 'stalk-color-above-ring',\n 'stalk-color-below-ring', 'veil-type', 'veil-color', 'ring-number',\n 'ring-type', 'spore-print-color', 'population', 'habitat'<br>\nTarget Feature: 'class_edible'<br>\nObjective: Predict class for given input features<br>\n<h4>Data source: https://archive.ics.uci.edu/ml/datasets/mushroom</h4>",
"_____no_output_____"
]
],
[
[
"columns = ['class_edible', 'cap-shape', 'cap-surface', 'cap-color', 'bruises',\n 'odor', 'gill-attachment', 'gill-spacing', 'gill-size', 'gill-color',\n 'stalk-shape', 'stalk-root', 'stalk-surface-above-ring',\n 'stalk-surface-below-ring', 'stalk-color-above-ring',\n 'stalk-color-below-ring', 'veil-type', 'veil-color', 'ring-number',\n 'ring-type', 'spore-print-color', 'population', 'habitat']",
"_____no_output_____"
],
[
"df = pd.read_csv('mushroom_data_all.csv')",
"_____no_output_____"
],
[
"df['class_edible'].value_counts()",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"# https://stackoverflow.com/questions/24458645/label-encoding-across-multiple-columns-in-scikit-learn\nfrom collections import defaultdict\nd = defaultdict(preprocessing.LabelEncoder)",
"_____no_output_____"
],
[
"# Encoding the variable\ndf = df.apply(lambda x: d[x.name].fit_transform(x))",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"d.keys()",
"_____no_output_____"
],
[
"for key in d.keys():\n print(key, d[key].classes_)",
"_____no_output_____"
],
[
"df['class_edible'].value_counts()",
"_____no_output_____"
],
[
"df.to_csv('mushroom_encoded_all.csv'\n ,index=False)",
"_____no_output_____"
]
],
[
[
"## Training and Validation Set\n### Target Variable as first column followed by input features:\n'class_edible', 'cap-shape', 'cap-surface', 'cap-color', 'bruises',\n 'odor', 'gill-attachment', 'gill-spacing', 'gill-size', 'gill-color',\n 'stalk-shape', 'stalk-root', 'stalk-surface-above-ring',\n 'stalk-surface-below-ring', 'stalk-color-above-ring',\n 'stalk-color-below-ring', 'veil-type', 'veil-color', 'ring-number',\n 'ring-type', 'spore-print-color', 'population', 'habitat'\n### Training, Validation files do not have a column header",
"_____no_output_____"
]
],
[
[
"# Training = 70% of the data\n# Validation = 30% of the data\n# Randomize the datset\nnp.random.seed(5)\nl = list(df.index)\nnp.random.shuffle(l)\ndf = df.iloc[l]",
"_____no_output_____"
],
[
"rows = df.shape[0]\ntrain = int(.7 * rows)\ntest = int(.3 * rows)",
"_____no_output_____"
],
[
"rows, train, test",
"_____no_output_____"
],
[
"# Write Training Set\ndf[:train].to_csv('mushroom_train.csv'\n ,index=False,index_label='Row',header=False\n ,columns=columns)",
"_____no_output_____"
],
[
"# Write Validation Set\ndf[train:].to_csv('mushroom_validation.csv'\n ,index=False,index_label='Row',header=False\n ,columns=columns)",
"_____no_output_____"
],
[
"# Write Column List\nwith open('mushroom_train_column_list.txt','w') as f:\n f.write(','.join(columns))",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec6652068fd7326ba524904cac1ffab0deae25a4 | 1,059 | ipynb | Jupyter Notebook | notebooks/index.ipynb | mmercan/study_python | 1170116d57aecf331b4d8d5dd78ac1c6abe5003a | [
"MIT"
]
| null | null | null | notebooks/index.ipynb | mmercan/study_python | 1170116d57aecf331b4d8d5dd78ac1c6abe5003a | [
"MIT"
]
| null | null | null | notebooks/index.ipynb | mmercan/study_python | 1170116d57aecf331b4d8d5dd78ac1c6abe5003a | [
"MIT"
]
| null | null | null | 16.045455 | 45 | 0.459868 | [
[
[
"### Hello\n* 1\n* 2\n\n\n[Basic notebook](./python_basics.ipynb)",
"_____no_output_____"
]
],
[
[
"a= 3\nb =5\na+b",
"_____no_output_____"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
]
]
|
ec6653d45d8a9995a563a36190d77656b9444c13 | 692,957 | ipynb | Jupyter Notebook | ipython/10_Stochastics.ipynb | adenzhang/pythonforfinance | a6ec765c58ea761f880cf2871cf158a31f02d2f6 | [
"CNRI-Python"
]
| 15 | 2018-07-10T09:18:23.000Z | 2021-12-30T06:35:09.000Z | legacy/ipython/10_Stochastics.ipynb | madxro/py4fi | a28206939ca7c81794186c5baf2bdddd70e82820 | [
"CNRI-Python"
]
| null | null | null | legacy/ipython/10_Stochastics.ipynb | madxro/py4fi | a28206939ca7c81794186c5baf2bdddd70e82820 | [
"CNRI-Python"
]
| 13 | 2018-01-08T01:10:22.000Z | 2021-05-26T17:35:35.000Z | 263.682268 | 107,236 | 0.911704 | [
[
[
"<img src=\"http://hilpisch.com/tpq_logo.png\" alt=\"The Python Quants\" width=\"35%\" align=\"right\" border=\"0\"><br>",
"_____no_output_____"
],
[
"# Python for Finance",
"_____no_output_____"
],
[
"**Analyze Big Financial Data**\n\nO'Reilly (2014)\n\nYves Hilpisch",
"_____no_output_____"
],
[
"<img style=\"border:0px solid grey;\" src=\"http://hilpisch.com/python_for_finance.png\" alt=\"Python for Finance\" width=\"30%\" align=\"left\" border=\"0\">",
"_____no_output_____"
],
[
"**Buy the book ** |\n<a href='http://shop.oreilly.com/product/0636920032441.do' target='_blank'>O'Reilly</a> |\n<a href='http://www.amazon.com/Yves-Hilpisch/e/B00JCYHHJM' target='_blank'>Amazon</a>\n\n**All book codes & IPYNBs** |\n<a href=\"http://oreilly.quant-platform.com\">http://oreilly.quant-platform.com</a>\n\n**The Python Quants GmbH** | <a href='http://pythonquants.com' target='_blank'>www.pythonquants.com</a>\n\n**Contact us** | <a href='mailto:[email protected]'>[email protected]</a>",
"_____no_output_____"
],
[
"# Stochastics",
"_____no_output_____"
],
[
"## Random Numbers",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport numpy.random as npr\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"npr.rand(10)",
"_____no_output_____"
],
[
"npr.rand(5, 5)",
"_____no_output_____"
],
[
"a = 5.\nb = 10.\nnpr.rand(10) * (b - a) + a",
"_____no_output_____"
],
[
"npr.rand(5, 5) * (b - a) + a",
"_____no_output_____"
],
[
"sample_size = 500\nrn1 = npr.rand(sample_size, 3)\nrn2 = npr.randint(0, 10, sample_size)\nrn3 = npr.sample(size=sample_size)\na = [0, 25, 50, 75, 100]\nrn4 = npr.choice(a, size=sample_size)",
"_____no_output_____"
],
[
"fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2,\n figsize=(7, 7))\nax1.hist(rn1, bins=25, stacked=True)\nax1.set_title('rand')\nax1.set_ylabel('frequency')\nax1.grid(True)\nax2.hist(rn2, bins=25)\nax2.set_title('randint')\nax2.grid(True)\nax3.hist(rn3, bins=25)\nax3.set_title('sample')\nax3.set_ylabel('frequency')\nax3.grid(True)\nax4.hist(rn4, bins=25)\nax4.set_title('choice')\nax4.grid(True)\n# tag: rand_samples\n# title: Simple pseudo-random numbers\n# size: 70",
"_____no_output_____"
],
[
"sample_size = 500\nrn1 = npr.standard_normal(sample_size)\nrn2 = npr.normal(100, 20, sample_size)\nrn3 = npr.chisquare(df=0.5, size=sample_size)\nrn4 = npr.poisson(lam=1.0, size=sample_size)",
"_____no_output_____"
],
[
"fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, figsize=(7, 7))\nax1.hist(rn1, bins=25)\nax1.set_title('standard normal')\nax1.set_ylabel('frequency')\nax1.grid(True)\nax2.hist(rn2, bins=25)\nax2.set_title('normal(100, 20)')\nax2.grid(True)\nax3.hist(rn3, bins=25)\nax3.set_title('chi square')\nax3.set_ylabel('frequency')\nax3.grid(True)\nax4.hist(rn4, bins=25)\nax4.set_title('Poisson')\nax4.grid(True)\n# tag: rand_distris\n# title: Pseudo-random numbers from different distributions\n# size: 70",
"_____no_output_____"
]
],
[
[
"## Simulation",
"_____no_output_____"
],
[
"### Random Variables",
"_____no_output_____"
]
],
[
[
"S0 = 100 # initial value\nr = 0.05 # constant short rate\nsigma = 0.25 # constant volatility\nT = 2.0 # in years\nI = 10000 # number of random draws\nST1 = S0 * np.exp((r - 0.5 * sigma ** 2) * T \n + sigma * np.sqrt(T) * npr.standard_normal(I))",
"_____no_output_____"
],
[
"plt.hist(ST1, bins=50)\nplt.xlabel('index level')\nplt.ylabel('frequency')\nplt.grid(True)\n# tag: gbm_T_sn\n# title: Simulated geometric Brownian motion (via +standard_normal+)\n# size: 60",
"_____no_output_____"
],
[
"ST2 = S0 * npr.lognormal((r - 0.5 * sigma ** 2) * T,\n sigma * np.sqrt(T), size=I)",
"_____no_output_____"
],
[
"plt.hist(ST2, bins=50)\nplt.xlabel('index level')\nplt.ylabel('frequency')\nplt.grid(True)\n# tag: gbm_T_ln\n# title: Simulated geometric Brownian motion (via +lognormal+)\n# size: 60",
"_____no_output_____"
],
[
"import scipy.stats as scs",
"_____no_output_____"
],
[
"def print_statistics(a1, a2):\n ''' Prints selected statistics.\n \n Parameters\n ==========\n a1, a2 : ndarray objects\n results object from simulation\n '''\n sta1 = scs.describe(a1)\n sta2 = scs.describe(a2)\n print \"%14s %14s %14s\" % \\\n ('statistic', 'data set 1', 'data set 2')\n print 45 * \"-\"\n print \"%14s %14.3f %14.3f\" % ('size', sta1[0], sta2[0])\n print \"%14s %14.3f %14.3f\" % ('min', sta1[1][0], sta2[1][0])\n print \"%14s %14.3f %14.3f\" % ('max', sta1[1][1], sta2[1][1])\n print \"%14s %14.3f %14.3f\" % ('mean', sta1[2], sta2[2])\n print \"%14s %14.3f %14.3f\" % ('std', np.sqrt(sta1[3]), np.sqrt(sta2[3]))\n print \"%14s %14.3f %14.3f\" % ('skew', sta1[4], sta2[4])\n print \"%14s %14.3f %14.3f\" % ('kurtosis', sta1[5], sta2[5])",
"_____no_output_____"
],
[
"print_statistics(ST1, ST2)",
" statistic data set 1 data set 2\n---------------------------------------------\n size 10000.000 10000.000\n min 27.002 25.828\n max 407.204 362.143\n mean 109.731 110.383\n std 39.714 39.655\n skew 1.070 1.040\n kurtosis 1.897 1.771\n"
]
],
[
[
"### Stochastic Processes",
"_____no_output_____"
],
[
"#### Geometric Brownian Motion",
"_____no_output_____"
]
],
[
[
"I = 10000\nM = 50\ndt = T / M\nS = np.zeros((M + 1, I))\nS[0] = S0\nfor t in range(1, M + 1):\n S[t] = S[t - 1] * np.exp((r - 0.5 * sigma ** 2) * dt \n + sigma * np.sqrt(dt) * npr.standard_normal(I))",
"_____no_output_____"
],
[
"plt.hist(S[-1], bins=50)\nplt.xlabel('index level')\nplt.ylabel('frequency')\nplt.grid(True)\n# tag: gbm_dt_hist\n# title: Simulated geometric Brownian motion at maturity\n# size: 60",
"_____no_output_____"
],
[
"print_statistics(S[-1], ST2)",
" statistic data set 1 data set 2\n---------------------------------------------\n size 10000.000 10000.000\n min 27.244 25.828\n max 402.432 362.143\n mean 110.672 110.383\n std 40.514 39.655\n skew 1.109 1.040\n kurtosis 2.058 1.771\n"
],
[
"plt.plot(S[:, :10], lw=1.5)\nplt.xlabel('time')\nplt.ylabel('index level')\nplt.grid(True)\n# tag: gbm_dt_paths\n# title: Simulated geometric Brownian motion paths\n# size: 60",
"_____no_output_____"
]
],
[
[
"#### Square-Root Diffusion",
"_____no_output_____"
]
],
[
[
"x0 = 0.05\nkappa = 3.0\ntheta = 0.02\nsigma = 0.1",
"_____no_output_____"
],
[
"I = 10000\nM = 50\ndt = T / M\ndef srd_euler():\n xh = np.zeros((M + 1, I))\n x1 = np.zeros_like(xh)\n xh[0] = x0\n x1[0] = x0\n for t in range(1, M + 1):\n xh[t] = (xh[t - 1]\n + kappa * (theta - np.maximum(xh[t - 1], 0)) * dt\n + sigma * np.sqrt(np.maximum(xh[t - 1], 0)) * np.sqrt(dt) \n * npr.standard_normal(I))\n x1 = np.maximum(xh, 0)\n return x1\nx1 = srd_euler()",
"_____no_output_____"
],
[
"plt.hist(x1[-1], bins=50)\nplt.xlabel('value')\nplt.ylabel('frequency')\nplt.grid(True)\n# tag: srd_hist_Euler\n# title: Simulated square-root diffusion at maturity (Euler scheme)\n# size: 60",
"_____no_output_____"
],
[
"plt.plot(x1[:, :10], lw=1.5)\nplt.xlabel('time')\nplt.ylabel('index level')\nplt.grid(True)\n# tag: srd_dt_Euler\n# title: Simulated square-root diffusion paths (Euler scheme)\n# size: 60",
"_____no_output_____"
],
[
"def srd_exact():\n x2 = np.zeros((M + 1, I))\n x2[0] = x0\n for t in range(1, M + 1):\n df = 4 * theta * kappa / sigma ** 2\n c = (sigma ** 2 * (1 - np.exp(-kappa * dt))) / (4 * kappa)\n nc = np.exp(-kappa * dt) / c * x2[t - 1] \n x2[t] = c * npr.noncentral_chisquare(df, nc, size=I)\n return x2\nx2 = srd_exact()",
"_____no_output_____"
],
[
"plt.hist(x2[-1], bins=50)\nplt.xlabel('value')\nplt.ylabel('frequency')\nplt.grid(True)\n# tag: srd_hist_exact\n# title: Simulated square-root diffusion at maturity (exact scheme)\n# size: 60",
"_____no_output_____"
],
[
"plt.plot(x2[:, :10], lw=1.5)\nplt.xlabel('time')\nplt.ylabel('index level')\nplt.grid(True)\n# tag: srd_dt_exact\n# title: Simulated square-root diffusion paths (exact scheme)\n# size: 60",
"_____no_output_____"
],
[
"print_statistics(x1[-1], x2[-1])",
" statistic data set 1 data set 2\n---------------------------------------------\n size 10000.000 10000.000\n min 0.003 0.004\n max 0.053 0.053\n mean 0.020 0.020\n std 0.006 0.006\n skew 0.537 0.625\n kurtosis 0.379 0.714\n"
],
[
"I = 250000\n%time x1 = srd_euler()",
"CPU times: user 662 ms, sys: 13 ms, total: 675 ms\nWall time: 675 ms\n"
],
[
"%time x2 = srd_exact()",
"CPU times: user 1.49 s, sys: 3 ms, total: 1.49 s\nWall time: 1.49 s\n"
],
[
"print_statistics(x1[-1], x2[-1])\nx1 = 0.0; x2 = 0.0",
" statistic data set 1 data set 2\n---------------------------------------------\n size 250000.000 250000.000\n min 0.003 0.004\n max 0.063 0.057\n mean 0.020 0.020\n std 0.006 0.006\n skew 0.563 0.587\n kurtosis 0.504 0.511\n"
]
],
[
[
"#### Stochastic Volatility",
"_____no_output_____"
]
],
[
[
"S0 = 100.\nr = 0.05\nv0 = 0.1\nkappa = 3.0\ntheta = 0.25\nsigma = 0.1\nrho = 0.6\nT = 1.0",
"_____no_output_____"
],
[
"corr_mat = np.zeros((2, 2))\ncorr_mat[0, :] = [1.0, rho]\ncorr_mat[1, :] = [rho, 1.0]\ncho_mat = np.linalg.cholesky(corr_mat)",
"_____no_output_____"
],
[
"cho_mat",
"_____no_output_____"
],
[
"M = 50\nI = 10000\nran_num = npr.standard_normal((2, M + 1, I))",
"_____no_output_____"
],
[
"dt = T / M\nv = np.zeros_like(ran_num[0])\nvh = np.zeros_like(v)\nv[0] = v0\nvh[0] = v0\nfor t in range(1, M + 1):\n ran = np.dot(cho_mat, ran_num[:, t, :])\n vh[t] = (vh[t - 1] + kappa * (theta - np.maximum(vh[t - 1], 0)) * dt\n + sigma * np.sqrt(np.maximum(vh[t - 1], 0)) * np.sqrt(dt) \n * ran[1])\nv = np.maximum(vh, 0)",
"_____no_output_____"
],
[
"S = np.zeros_like(ran_num[0])\nS[0] = S0\nfor t in range(1, M + 1):\n ran = np.dot(cho_mat, ran_num[:, t, :])\n S[t] = S[t - 1] * np.exp((r - 0.5 * v[t]) * dt +\n np.sqrt(v[t]) * ran[0] * np.sqrt(dt))",
"_____no_output_____"
],
[
"fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 5))\nax1.hist(S[-1], bins=50)\nax1.set_xlabel('index level')\nax1.set_ylabel('frequency')\nax1.grid(True)\nax2.hist(v[-1], bins=50)\nax2.set_xlabel('volatility')\nax2.grid(True)\n# tag: sv_hist\n# title: Simulated stochastic volatility model at maturity\n# size: 60",
"_____no_output_____"
],
[
"fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(7, 6))\nax1.plot(S[:, :10], lw=1.5)\nax1.set_ylabel('index level')\nax1.grid(True)\nax2.plot(v[:, :10], lw=1.5)\nax2.set_xlabel('time')\nax2.set_ylabel('volatility')\nax2.grid(True)\n# tag: sv_paths\n# title: Simulated stochastic volatility model paths\n# size: 60",
"_____no_output_____"
],
[
"print_statistics(S[-1], v[-1])",
" statistic data set 1 data set 2\n---------------------------------------------\n size 10000.000 10000.000\n min 20.136 0.176\n max 520.205 0.319\n mean 108.326 0.243\n std 52.848 0.020\n skew 1.756 0.178\n kurtosis 5.536 -0.006\n"
]
],
[
[
"#### Jump-Diffusion",
"_____no_output_____"
]
],
[
[
"S0 = 100.\nr = 0.05\nsigma = 0.2\nlamb = 0.75\nmu = -0.6\ndelta = 0.25\nT = 1.0",
"_____no_output_____"
],
[
"M = 50\nI = 10000\ndt = T / M\nrj = lamb * (np.exp(mu + 0.5 * delta ** 2) - 1)\nS = np.zeros((M + 1, I))\nS[0] = S0\nsn1 = npr.standard_normal((M + 1, I))\nsn2 = npr.standard_normal((M + 1, I))\npoi = npr.poisson(lamb * dt, (M + 1, I))\nfor t in range(1, M + 1, 1):\n S[t] = S[t - 1] * (np.exp((r - rj - 0.5 * sigma ** 2) * dt\n + sigma * np.sqrt(dt) * sn1[t])\n + (np.exp(mu + delta * sn2[t]) - 1)\n * poi[t])\n S[t] = np.maximum(S[t], 0)",
"_____no_output_____"
],
[
"plt.hist(S[-1], bins=50)\nplt.xlabel('value')\nplt.ylabel('frequency')\nplt.grid(True)\n# tag: jd_hist\n# title: Simulated jump diffusion at maturity\n# size: 60",
"_____no_output_____"
],
[
"plt.plot(S[:, :10], lw=1.5)\nplt.xlabel('time')\nplt.ylabel('index level')\nplt.grid(True)\n# tag: jd_paths\n# title: Simulated jump diffusion paths\n# size: 60",
"_____no_output_____"
]
],
[
[
"### Variance Reduction",
"_____no_output_____"
]
],
[
[
"print \"%15s %15s\" % ('Mean', 'Std. Deviation')\nprint 31 * \"-\"\nfor i in range(1, 31, 2):\n npr.seed(1000)\n sn = npr.standard_normal(i ** 2 * 10000)\n print \"%15.12f %15.12f\" % (sn.mean(), sn.std())",
" Mean Std. Deviation\n-------------------------------\n-0.011870394558 1.008752430725\n-0.002815667298 1.002729536352\n-0.003847776704 1.000594044165\n-0.003058113374 1.001086345326\n-0.001685126538 1.001630849589\n-0.001175212007 1.001347684642\n-0.000803969036 1.000159081432\n-0.000601970954 0.999506522127\n-0.000147787693 0.999571756099\n-0.000313035581 0.999646153704\n-0.000178447061 0.999677277878\n 0.000096501709 0.999684346792\n-0.000135677013 0.999823841902\n-0.000015726986 0.999906493379\n-0.000039368519 1.000063091949\n"
],
[
"i ** 2 * 10000",
"_____no_output_____"
],
[
"sn = npr.standard_normal(10000 / 2)\nsn = np.concatenate((sn, -sn))\nnp.shape(sn)",
"_____no_output_____"
],
[
"print \"%15s %15s\" % ('Mean', 'Std. Deviation')\nprint 31 * \"-\"\nfor i in range(1, 31, 2):\n npr.seed(1000)\n sn = npr.standard_normal(i ** 2 * 10000 / 2)\n sn = np.concatenate((sn, -sn))\n print \"%15.12f %15.12f\" % (sn.mean(), sn.std())",
" Mean Std. Deviation\n-------------------------------\n 0.000000000000 1.009653753942\n-0.000000000000 1.000413716783\n 0.000000000000 1.002925061201\n-0.000000000000 1.000755212673\n 0.000000000000 1.001636910076\n-0.000000000000 1.000726758438\n-0.000000000000 1.001621265149\n 0.000000000000 1.001203722778\n-0.000000000000 1.000556669784\n 0.000000000000 1.000113464185\n-0.000000000000 0.999435175324\n 0.000000000000 0.999356961431\n-0.000000000000 0.999641436845\n-0.000000000000 0.999642768905\n-0.000000000000 0.999638303451\n"
],
[
"sn = npr.standard_normal(10000)",
"_____no_output_____"
],
[
"sn.mean()",
"_____no_output_____"
],
[
"sn.std()",
"_____no_output_____"
],
[
"sn_new = (sn - sn.mean()) / sn.std()",
"_____no_output_____"
],
[
"sn_new.mean()",
"_____no_output_____"
],
[
"sn_new.std()",
"_____no_output_____"
],
[
"def gen_sn(M, I, anti_paths=True, mo_match=True):\n ''' Function to generate random numbers for simulation.\n \n Parameters\n ==========\n M : int\n number of time intervals for discretization\n I : int\n number of paths to be simulated\n anti_paths: boolean\n use of antithetic variates\n mo_math : boolean\n use of moment matching\n '''\n if anti_paths is True:\n sn = npr.standard_normal((M + 1, I / 2))\n sn = np.concatenate((sn, -sn), axis=1)\n else:\n sn = npr.standard_normal((M + 1, I))\n if mo_match is True:\n sn = (sn - sn.mean()) / sn.std()\n return sn",
"_____no_output_____"
]
],
[
[
"## Valuation",
"_____no_output_____"
],
[
"### European Options",
"_____no_output_____"
]
],
[
[
"S0 = 100.\nr = 0.05\nsigma = 0.25\nT = 1.0\nI = 50000\ndef gbm_mcs_stat(K):\n ''' Valuation of European call option in Black-Scholes-Merton\n by Monte Carlo simulation (of index level at maturity)\n \n Parameters\n ==========\n K : float\n (positive) strike price of the option\n \n Returns\n =======\n C0 : float\n estimated present value of European call option\n '''\n sn = gen_sn(1, I)\n # simulate index level at maturity\n ST = S0 * np.exp((r - 0.5 * sigma ** 2) * T \n + sigma * np.sqrt(T) * sn[1])\n # calculate payoff at maturity\n hT = np.maximum(ST - K, 0)\n # calculate MCS estimator\n C0 = np.exp(-r * T) * 1 / I * np.sum(hT)\n return C0",
"_____no_output_____"
],
[
"gbm_mcs_stat(K=105.)",
"_____no_output_____"
],
[
"M = 50\ndef gbm_mcs_dyna(K, option='call'):\n ''' Valuation of European options in Black-Scholes-Merton\n by Monte Carlo simulation (of index level paths)\n \n Parameters\n ==========\n K : float\n (positive) strike price of the option\n option : string\n type of the option to be valued ('call', 'put')\n \n Returns\n =======\n C0 : float\n estimated present value of European call option\n '''\n dt = T / M\n # simulation of index level paths\n S = np.zeros((M + 1, I))\n S[0] = S0\n sn = gen_sn(M, I)\n for t in range(1, M + 1):\n S[t] = S[t - 1] * np.exp((r - 0.5 * sigma ** 2) * dt \n + sigma * np.sqrt(dt) * sn[t])\n # case-based calculation of payoff\n if option == 'call':\n hT = np.maximum(S[-1] - K, 0)\n else:\n hT = np.maximum(K - S[-1], 0)\n # calculation of MCS estimator\n C0 = np.exp(-r * T) * 1 / I * np.sum(hT)\n return C0",
"_____no_output_____"
],
[
"gbm_mcs_dyna(K=110., option='call')",
"_____no_output_____"
],
[
"gbm_mcs_dyna(K=110., option='put')",
"_____no_output_____"
],
[
"from bsm_functions import bsm_call_value\nstat_res = []\ndyna_res = []\nanal_res = []\nk_list = np.arange(80., 120.1, 5.)\nnp.random.seed(200000)\nfor K in k_list:\n stat_res.append(gbm_mcs_stat(K))\n dyna_res.append(gbm_mcs_dyna(K))\n anal_res.append(bsm_call_value(S0, K, T, r, sigma))\nstat_res = np.array(stat_res)\ndyna_res = np.array(dyna_res)\nanal_res = np.array(anal_res)",
"_____no_output_____"
],
[
"fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(8, 6))\nax1.plot(k_list, anal_res, 'b', label='analytical')\nax1.plot(k_list, stat_res, 'ro', label='static')\nax1.set_ylabel('European call option value')\nax1.grid(True)\nax1.legend(loc=0)\nax1.set_ylim(ymin=0)\nwi = 1.0\nax2.bar(k_list - wi / 2, (anal_res - stat_res) / anal_res * 100, wi)\nax2.set_xlabel('strike')\nax2.set_ylabel('difference in %')\nax2.set_xlim(left=75, right=125)\nax2.grid(True)\n# tag: opt_val_comp_1\n# title: Comparsion of static and dynamic Monte Carlo estimator values\n# size: 60",
"_____no_output_____"
],
[
"fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(8, 6))\nax1.plot(k_list, anal_res, 'b', label='analytical')\nax1.plot(k_list, dyna_res, 'ro', label='dynamic')\nax1.set_ylabel('European call option value')\nax1.grid(True)\nax1.legend(loc=0)\nax1.set_ylim(ymin=0)\nwi = 1.0\nax2.bar(k_list - wi / 2, (anal_res - dyna_res) / anal_res * 100, wi)\nax2.set_xlabel('strike')\nax2.set_ylabel('difference in %')\nax2.set_xlim(left=75, right=125)\nax2.grid(True)\n# tag: opt_val_comp_2\n# title: Comparsion of static and dynamic Monte Carlo estimator values\n# size: 60",
"_____no_output_____"
]
],
[
[
"### American Options",
"_____no_output_____"
]
],
[
[
"def gbm_mcs_amer(K, option='call'):\n ''' Valuation of American option in Black-Scholes-Merton\n by Monte Carlo simulation by LSM algorithm\n \n Parameters\n ==========\n K : float\n (positive) strike price of the option\n option : string\n type of the option to be valued ('call', 'put')\n \n Returns\n =======\n C0 : float\n estimated present value of European call option\n '''\n dt = T / M\n df = np.exp(-r * dt)\n # simulation of index levels\n S = np.zeros((M + 1, I))\n S[0] = S0\n sn = gen_sn(M, I)\n for t in range(1, M + 1):\n S[t] = S[t - 1] * np.exp((r - 0.5 * sigma ** 2) * dt \n + sigma * np.sqrt(dt) * sn[t])\n # case based calculation of payoff\n if option == 'call':\n h = np.maximum(S - K, 0)\n else:\n h = np.maximum(K - S, 0)\n # LSM algorithm\n V = np.copy(h)\n for t in range(M - 1, 0, -1):\n reg = np.polyfit(S[t], V[t + 1] * df, 7)\n C = np.polyval(reg, S[t])\n V[t] = np.where(C > h[t], V[t + 1] * df, h[t])\n # MCS estimator\n C0 = df * 1 / I * np.sum(V[1])\n return C0",
"_____no_output_____"
],
[
"gbm_mcs_amer(110., option='call')",
"_____no_output_____"
],
[
"gbm_mcs_amer(110., option='put')",
"_____no_output_____"
],
[
"euro_res = []\namer_res = []\nk_list = np.arange(80., 120.1, 5.)\nfor K in k_list:\n euro_res.append(gbm_mcs_dyna(K, 'put'))\n amer_res.append(gbm_mcs_amer(K, 'put'))\neuro_res = np.array(euro_res)\namer_res = np.array(amer_res)",
"_____no_output_____"
],
[
"fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(8, 6))\nax1.plot(k_list, euro_res, 'b', label='European put')\nax1.plot(k_list, amer_res, 'ro', label='American put')\nax1.set_ylabel('call option value')\nax1.grid(True)\nax1.legend(loc=0)\nwi = 1.0\nax2.bar(k_list - wi / 2, (amer_res - euro_res) / euro_res * 100, wi)\nax2.set_xlabel('strike')\nax2.set_ylabel('early exercise premium in %')\nax2.set_xlim(left=75, right=125)\nax2.grid(True)\n# tag: opt_euro_amer\n# title: Comparsion of European and LSM Monte Carlo estimator values\n# size: 60",
"_____no_output_____"
]
],
[
[
"## Risk Measures",
"_____no_output_____"
],
[
"### Value-at-Risk",
"_____no_output_____"
]
],
[
[
"S0 = 100\nr = 0.05\nsigma = 0.25\nT = 30 / 365.\nI = 10000\nST = S0 * np.exp((r - 0.5 * sigma ** 2) * T \n + sigma * np.sqrt(T) * npr.standard_normal(I))",
"_____no_output_____"
],
[
"R_gbm = np.sort(ST - S0)",
"_____no_output_____"
],
[
"plt.hist(R_gbm, bins=50)\nplt.xlabel('absolute return')\nplt.ylabel('frequency')\nplt.grid(True)\n# tag: var_hist_gbm\n# title: Absolute returns of geometric Brownian motion (30d)\n# size: 60",
"_____no_output_____"
],
[
"percs = [0.01, 0.1, 1., 2.5, 5.0, 10.0]\nvar = scs.scoreatpercentile(R_gbm, percs)\nprint \"%16s %16s\" % ('Confidence Level', 'Value-at-Risk')\nprint 33 * \"-\"\nfor pair in zip(percs, var):\n print \"%16.2f %16.3f\" % (100 - pair[0], -pair[1])",
"Confidence Level Value-at-Risk\n---------------------------------\n 99.99 26.072\n 99.90 20.175\n 99.00 15.753\n 97.50 13.265\n 95.00 11.298\n 90.00 8.942\n"
],
[
"dt = 30. / 365 / M\nrj = lamb * (np.exp(mu + 0.5 * delta ** 2) - 1)\nS = np.zeros((M + 1, I))\nS[0] = S0\nsn1 = npr.standard_normal((M + 1, I))\nsn2 = npr.standard_normal((M + 1, I))\npoi = npr.poisson(lamb * dt, (M + 1, I))\nfor t in range(1, M + 1, 1):\n S[t] = S[t - 1] * (np.exp((r - rj - 0.5 * sigma ** 2) * dt\n + sigma * np.sqrt(dt) * sn1[t])\n + (np.exp(mu + delta * sn2[t]) - 1)\n * poi[t])\n S[t] = np.maximum(S[t], 0)",
"_____no_output_____"
],
[
"R_jd = np.sort(S[-1] - S0)",
"_____no_output_____"
],
[
"plt.hist(R_jd, bins=50)\nplt.xlabel('absolute return')\nplt.ylabel('frequency')\nplt.grid(True)\n# tag: var_hist_jd\n# title: Absolute returns of jump diffusion (30d)\n# size: 60",
"_____no_output_____"
],
[
"percs = [0.01, 0.1, 1., 2.5, 5.0, 10.0]\nvar = scs.scoreatpercentile(R_jd, percs)\nprint \"%16s %16s\" % ('Confidence Level', 'Value-at-Risk')\nprint 33 * \"-\"\nfor pair in zip(percs, var):\n print \"%16.2f %16.3f\" % (100 - pair[0], -pair[1])",
"Confidence Level Value-at-Risk\n---------------------------------\n 99.99 75.029\n 99.90 71.833\n 99.00 55.901\n 97.50 45.697\n 95.00 25.993\n 90.00 8.773\n"
],
[
"percs = list(np.arange(0.0, 10.1, 0.1))\ngbm_var = scs.scoreatpercentile(R_gbm, percs)\njd_var = scs.scoreatpercentile(R_jd, percs)",
"_____no_output_____"
],
[
"plt.plot(percs, gbm_var, 'b', lw=1.5, label='GBM')\nplt.plot(percs, jd_var, 'r', lw=1.5, label='JD')\nplt.legend(loc=4)\nplt.xlabel('100 - confidence level [%]')\nplt.ylabel('value-at-risk')\nplt.grid(True)\nplt.ylim(ymax=0.0)\n# tag: var_comp\n# title: Value-at-risk for geometric Brownian motion and jump diffusion\n# size: 60",
"_____no_output_____"
]
],
[
[
"### Credit Value Adjustments",
"_____no_output_____"
]
],
[
[
"S0 = 100.\nr = 0.05\nsigma = 0.2\nT = 1.\nI = 100000\nST = S0 * np.exp((r - 0.5 * sigma ** 2) * T \n + sigma * np.sqrt(T) * npr.standard_normal(I))",
"_____no_output_____"
],
[
"L = 0.5",
"_____no_output_____"
],
[
"p = 0.01",
"_____no_output_____"
],
[
"D = npr.poisson(p * T, I)\nD = np.where(D > 1, 1, D)",
"_____no_output_____"
],
[
"np.exp(-r * T) * 1 / I * np.sum(ST)",
"_____no_output_____"
],
[
"CVaR = np.exp(-r * T) * 1 / I * np.sum(L * D * ST)\nCVaR",
"_____no_output_____"
],
[
"S0_CVA = np.exp(-r * T) * 1 / I * np.sum((1 - L * D) * ST)\nS0_CVA",
"_____no_output_____"
],
[
"S0_adj = S0 - CVaR\nS0_adj",
"_____no_output_____"
],
[
"np.count_nonzero(L * D * ST)",
"_____no_output_____"
],
[
"plt.hist(L * D * ST, bins=50)\nplt.xlabel('loss')\nplt.ylabel('frequency')\nplt.grid(True)\nplt.ylim(ymax=175)\n# tag: cva_hist_stock\n# title: Losses due to risk-neutrally expected default (stock)\n# size: 60",
"_____no_output_____"
],
[
"K = 100.\nhT = np.maximum(ST - K, 0)\nC0 = np.exp(-r * T) * 1 / I * np.sum(hT)\nC0",
"_____no_output_____"
],
[
"CVaR = np.exp(-r * T) * 1 / I * np.sum(L * D * hT)\nCVaR",
"_____no_output_____"
],
[
"C0_CVA = np.exp(-r * T) * 1 / I * np.sum((1 - L * D) * hT)\nC0_CVA",
"_____no_output_____"
],
[
"np.count_nonzero(L * D * hT) # number of losses",
"_____no_output_____"
],
[
"np.count_nonzero(D) # number of defaults",
"_____no_output_____"
],
[
"I - np.count_nonzero(hT) # zero payoff",
"_____no_output_____"
],
[
"plt.hist(L * D * hT, bins=50)\nplt.xlabel('loss')\nplt.ylabel('frequency')\nplt.grid(True)\nplt.ylim(ymax=350)\n# tag: cva_hist_opt\n# title: Losses due to risk-neutrally expected default (call option)\n# size: 60",
"_____no_output_____"
]
],
[
[
"## Conclusions",
"_____no_output_____"
],
[
"## Further Reading",
"_____no_output_____"
],
[
"<img src=\"http://hilpisch.com/tpq_logo.png\" alt=\"The Python Quants\" width=\"35%\" align=\"right\" border=\"0\"><br>\n\n<a href=\"http://www.pythonquants.com\" target=\"_blank\">www.pythonquants.com</a> | <a href=\"http://twitter.com/dyjh\" target=\"_blank\">@dyjh</a>\n\n<a href=\"mailto:[email protected]\">[email protected]</a>\n\n**Python Quant Platform** |\n<a href=\"http://oreilly.quant-platform.com\">http://oreilly.quant-platform.com</a>\n\n**Derivatives Analytics with Python** |\n<a href=\"http://www.derivatives-analytics-with-python.com\" target=\"_blank\">Derivatives Analytics @ Wiley Finance</a>\n\n**Python for Finance** |\n<a href=\"http://shop.oreilly.com/product/0636920032441.do\" target=\"_blank\">Python for Finance @ O'Reilly</a>",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
]
]
|
ec666b65e5b6a96d9b84ce8cbbe4737b6e33d1fd | 2,668 | ipynb | Jupyter Notebook | demo/Untitled.ipynb | ShadyMikey/OptiMol | e18e35e8ea65a73818e901907395c7fd2bb24296 | [
"MIT"
]
| null | null | null | demo/Untitled.ipynb | ShadyMikey/OptiMol | e18e35e8ea65a73818e901907395c7fd2bb24296 | [
"MIT"
]
| 4 | 2020-03-09T18:18:40.000Z | 2020-03-14T06:04:15.000Z | demo/Untitled.ipynb | ng-git/OptiMol | e18e35e8ea65a73818e901907395c7fd2bb24296 | [
"MIT"
]
| 2 | 2020-03-10T23:06:06.000Z | 2020-03-11T18:06:33.000Z | 37.577465 | 864 | 0.582834 | [
[
[
"from optimol import data_compile\nimport pandas as pd\n\n\nid_list = data_compile.get_id()\n\n# this give the dataframes from database using the id\ndata = data_compile.get_df_database(id_list[34])\n# for item in data:\n# print(item)\n\n# this give out a combined dataframe to be used for ML\ndata_set = data_compile.get_all_dataset(set1=3)\n# print(data_set)\n\n# this make the dataframe from user input\nuser_set = data_compile.get_df_user(['./user.txt'])\nprint(user_set)\n",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code"
]
]
|
ec6684e75e510429f4089db8ea3a882b1b111b16 | 31,598 | ipynb | Jupyter Notebook | Notebooks/Using OntologyKP to get subclasses.ipynb | sierra-moxon/TranslatorArchitecture | 3bc3831976b89f98e76e7d28dde96a31707ea718 | [
"MIT"
]
| null | null | null | Notebooks/Using OntologyKP to get subclasses.ipynb | sierra-moxon/TranslatorArchitecture | 3bc3831976b89f98e76e7d28dde96a31707ea718 | [
"MIT"
]
| null | null | null | Notebooks/Using OntologyKP to get subclasses.ipynb | sierra-moxon/TranslatorArchitecture | 3bc3831976b89f98e76e7d28dde96a31707ea718 | [
"MIT"
]
| 1 | 2021-05-05T17:51:05.000Z | 2021-05-05T17:51:05.000Z | 42.074567 | 1,914 | 0.549908 | [
[
[
"import requests",
"_____no_output_____"
],
[
"def post(url, message, params=None):\n if params is None:\n response = requests.post(url, json=message)\n else:\n response = requests.post(url, json=message, params=params)\n if not response.status_code == 200:\n print('error:', response.status_code)\n return {}\n return response.json()",
"_____no_output_____"
],
[
"def get_ontology_descendents(curie,btype):\n m ={\n \"message\": {\n \"query_graph\": {\n \"nodes\": {\n \"a\": {\n \"id\": curie\n },\n \"b\": {\n \"category\": btype\n }\n },\n \"edges\": {\n \"ab\": {\n \"subject\": \"b\",\n \"object\": \"a\",\n \"predicate\": \"biolink:subclass_of\"\n }\n }\n }}}\n url = 'https://stars-app.renci.org/sparql-kp/query'\n response = post(url,m)\n nodes = response['message']['knowledge_graph']['nodes']\n tnodes = [ {\"identifier\":k, \"label\": v['name']} for k,v in nodes.items()]\n return {curie: tnodes}\n \n \n",
"_____no_output_____"
],
[
"def convert_to_preferred(curie,allowedlist):\n j = {'curies':[curie]}\n result = post('https://nodenormalization-sri.renci.org/get_normalized_nodes',j)\n new_ids = [ v['identifier'] for v in result[curie]['equivalent_identifiers'] ]\n for nid in new_ids:\n if nid.split(':')[0] in allowedlist:\n return nid\n return None",
"_____no_output_____"
],
[
"m = 'MONDO:0005015'",
"_____no_output_____"
],
[
"get_ontology_descendents(m,'biolink:Disease')",
"_____no_output_____"
],
[
"mlist = ['MONDO:0005015', 'MONDO:0005148']\nget_ontology_descendents(mlist,'biolink:Disease')",
"error: 400\n"
],
[
"t2d_doid='DOID:1909'",
"_____no_output_____"
],
[
"convert_to_preferred(t2d_doid,['MONDO'])",
"_____no_output_____"
],
[
"mondo_descendents = get_ontology_descendents(m,'biolink:Disease')",
"_____no_output_____"
],
[
"mondo_descendent_ids = [x['identifier'] for x in mondo_descendents['MONDO:0005015']]",
"_____no_output_____"
],
[
"mondo_descendent_ids",
"_____no_output_____"
],
[
"for mid in mondo_descendent_ids:\n print(mid,convert_to_preferred(mid,['DOID']))",
"MONDO:0011572 DOID:0110755\nMONDO:0011068 DOID:0110751\nMONDO:0011123 DOID:0110753\nMONDO:0018573 None\nMONDO:0019193 DOID:0080300\nMONDO:0012919 DOID:0110757\nMONDO:0015967 None\nMONDO:0010020 DOID:0111136\nMONDO:0018581 None\nMONDO:0009100 None\nMONDO:0030089 None\nMONDO:0018625 None\nMONDO:0011273 None\nMONDO:0010864 DOID:0110746\nMONDO:0020525 DOID:0060334\nMONDO:0009419 None\nMONDO:0010802 DOID:0111733\nMONDO:0012071 DOID:0111135\nMONDO:0012522 None\nMONDO:0014686 None\nMONDO:0012920 DOID:0110758\nMONDO:0011027 None\nMONDO:0016464 None\nMONDO:0018911 DOID:0050524\nMONDO:0010950 DOID:0110747\nMONDO:0011363 None\nMONDO:0014458 None\nMONDO:0014523 None\nMONDO:0010255 None\nMONDO:0011072 None\nMONDO:0016422 None\nMONDO:0011033 DOID:0110752\nMONDO:0014589 DOID:0111110\nMONDO:0010026 DOID:0111454\nMONDO:0100165 None\nMONDO:0012819 DOID:1837\nMONDO:0011955 None\nMONDO:0008185 None\nMONDO:0014497 None\nMONDO:0008491 DOID:13366\nMONDO:0010862 DOID:0110743\nMONDO:0009517 DOID:0050470\nMONDO:0012923 DOID:0111137\nMONDO:0014674 DOID:0111111\nMONDO:0012422 DOID:0110756\nMONDO:0011986 None\nMONDO:0009192 DOID:0090060\nMONDO:0012818 DOID:0111107\nMONDO:0013242 DOID:0111109\nMONDO:0012436 DOID:0060638\nMONDO:0005827 DOID:11712\nMONDO:0011668 DOID:0111104\nMONDO:0100164 DOID:0060639\nMONDO:0013240 DOID:0111108\nMONDO:0011167 DOID:0110745\nMONDO:0012480 None\nMONDO:0013078 DOID:0110761\nMONDO:0018320 None\nMONDO:0009575 DOID:0090117\nMONDO:0008763 DOID:0050473\nMONDO:0015308 None\nMONDO:0010773 None\nMONDO:0012961 DOID:0110760\nMONDO:0006920 DOID:11716\nMONDO:0010861 DOID:0110742\nMONDO:0007452 DOID:0111099\nMONDO:0020569 None\nMONDO:0018883 None\nMONDO:0014488 None\nMONDO:0005147 DOID:9744\nMONDO:0011016 DOID:0110750\nMONDO:0018629 None\nMONDO:0005406 DOID:11714\nMONDO:0008696 None\nMONDO:0011168 DOID:0110749\nMONDO:0010785 None\nMONDO:0007453 DOID:0111100\nMONDO:0030088 None\nMONDO:0012921 DOID:0110759\nMONDO:0019207 None\nMONDO:0013225 DOID:0111138\nMONDO:0010894 DOID:0111102\nMONDO:0012513 DOID:0111106\nMONDO:0014991 DOID:0070008\nMONDO:0011302 DOID:0110754\nMONDO:0011667 DOID:0111103\nMONDO:0011073 None\nMONDO:0008812 None\nMONDO:0012520 None\nMONDO:0007454 DOID:0110741\nMONDO:0013478 DOID:0070205\nMONDO:0005148 DOID:9352\nMONDO:0016391 DOID:11717\nMONDO:0010863 DOID:0110744\nMONDO:0017230 None\nMONDO:0030087 None\nMONDO:0012348 DOID:0111105\nMONDO:0005015 DOID:9351\nMONDO:0019017 None\nMONDO:0007669 DOID:0111101\nMONDO:0012192 None\nMONDO:0018575 None\nMONDO:0009874 None\n"
],
[
"import os \nfrom SPARQLWrapper import SPARQLWrapper2, JSON, POSTDIRECTLY, POST \nfrom string import Template \n \n \n \nclass TripleStore(object): \n \"\"\" Connect to a SPARQL endpoint and provide services for loading and executing queries.\"\"\" \n \n def __init__(self, hostname): \n self.service = SPARQLWrapper2 (hostname) \n \n def get_template (self, query_name): \n \"\"\" Load a template given a template name \"\"\" \n return Template (self.get_template_text (query_name)) \n \n def get_template_text (self, query_name): \n \"\"\" Get the text of a template given its name \"\"\" \n query = None \n fn = os.path.join(os.path.dirname(__file__), 'query', \n '{0}.sparql'.format (query_name)) \n with open (fn, 'r') as stream: \n query = stream.read () \n return query \n \n def execute_query (self, query, post=False): \n \"\"\" Execute a SPARQL query. \n \n :param query: A SPARQL query. \n :return: Returns a JSON formatted object. \n \"\"\" \n if post: \n self.service.setRequestMethod(POSTDIRECTLY) \n self.service.setMethod(POST) \n self.service.setQuery (query) \n self.service.setReturnFormat (JSON) \n return self.service.query().convert () \n \n def query (self, query_text, outputs, flat=False, post = False): \n \"\"\" Execute a fully formed query and return results. \"\"\" \n response = self.execute_query (query_text, post) \n result = None \n if flat: \n result = list(map(lambda b : [ b[val].value if val in b else None for val in outputs ], response.bindings )) \n else: \n result = list(map(lambda b : { val : b[val].value if val in b else None for val in outputs }, response.bindings )) \n return result \n \n def query_template (self, template_text, outputs, inputs=[], post = False): \n \"\"\" Given template text, inputs, and outputs, execute a query. \"\"\" \n return self.query (Template (template_text).safe_substitute (**inputs), outputs, post= post) \n \n def query_template_file (self, template_file, outputs, inputs=[]): \n \"\"\" Given the name of a template file, inputs, and outputs, execute a query. \"\"\" \n return self.query (self.get_template_text (template_file), inputs, outputs) \n\n",
"_____no_output_____"
],
[
" def get_subclasses():\n text = \"\"\"\n PREFIX skos: <http://www.w3.org/2004/02/skos/core#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX obo: <http://purl.obolibrary.org/obo/>\nPREFIX mondo: <http://purl.obolibrary.org/obo/MONDO_>\nPREFIX biolink: <https://w3id.org/biolink/vocab/>\nPREFIX linkml: <https://w3id.org/linkml/>\n\nSELECT DISTINCT *\n WHERE\n { \n?s rdfs:subClassOf mondo:0005148 .\n?s rdfs:label ?s_label .\n }\nLIMIT 100\n \"\"\"\n rr = self.triplestore.query_template(\n #inputs={'super': }, \\\n outputs=['s', 's_label'], \\\n template_text=text \\\n )\n return rr",
"_____no_output_____"
],
[
"get_subclasses()",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec669079f536ab65c60ec7611f122e952f463556 | 42,583 | ipynb | Jupyter Notebook | Workflow/1_Data Cleaning.ipynb | crystal-ctrl/nlp_project | e555112e527c263b74c120989b1abd0a4edcf805 | [
"MIT"
]
| 2 | 2021-06-28T18:53:44.000Z | 2021-08-01T18:26:56.000Z | Workflow/1_Data Cleaning.ipynb | crystal-ctrl/nlp_project | e555112e527c263b74c120989b1abd0a4edcf805 | [
"MIT"
]
| null | null | null | Workflow/1_Data Cleaning.ipynb | crystal-ctrl/nlp_project | e555112e527c263b74c120989b1abd0a4edcf805 | [
"MIT"
]
| null | null | null | 41.912402 | 224 | 0.441092 | [
[
[
"## Goal:\nGet the data in a clean, standard format for further analysis\n\n## Data Cleaning\n- Missing values\n- Duplicates\n- Remove unnecessary columns",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport datetime\nimport pickle",
"_____no_output_____"
],
[
"df = pd.read_csv(\"./data/metadata.csv\", parse_dates=['publish_time'])\ndf.head()",
"/Users/crystalhuang/anaconda3/lib/python3.8/site-packages/IPython/core/interactiveshell.py:3146: DtypeWarning: Columns (1,4,5,6,13,14,15,16) have mixed types.Specify dtype option on import or set low_memory=False.\n has_raised = await self.run_ast_nodes(code_ast.body, cell_name,\n"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 579255 entries, 0 to 579254\nData columns (total 19 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 cord_uid 579255 non-null object \n 1 sha 193890 non-null object \n 2 source_x 579255 non-null object \n 3 title 578966 non-null object \n 4 doi 311662 non-null object \n 5 pmcid 202603 non-null object \n 6 pubmed_id 269153 non-null object \n 7 license 579255 non-null object \n 8 abstract 423055 non-null object \n 9 publish_time 579036 non-null datetime64[ns]\n 10 authors 563977 non-null object \n 11 journal 541087 non-null object \n 12 mag_id 0 non-null float64 \n 13 who_covidence_id 247058 non-null object \n 14 arxiv_id 7597 non-null object \n 15 pdf_json_files 193890 non-null object \n 16 pmc_json_files 156354 non-null object \n 17 url 333840 non-null object \n 18 s2_id 528405 non-null float64 \ndtypes: datetime64[ns](1), float64(2), object(16)\nmemory usage: 84.0+ MB\n"
],
[
"papers = df.drop([\"sha\", \"pmcid\", \"pubmed_id\", \"license\", \"mag_id\", \"who_covidence_id\",\n \"arxiv_id\", \"pdf_json_files\", \"pmc_json_files\", \"s2_id\"], axis=1)\npapers.head()",
"_____no_output_____"
],
[
"# dropna for publish time\npapers.dropna(subset=[\"publish_time\"], inplace=True)\n# create column year\npapers['publish_year'] = pd.DatetimeIndex(papers.publish_time).year",
"_____no_output_____"
],
[
"# Drop missing values\nprint(\"Before dropna: \", papers.shape)\npapers.dropna(subset=[\"abstract\"], inplace=True)\npapers = papers[papers.abstract != \"Unknown\"]\nprint(\"After dropna: \", papers.shape)",
"Before dropna: (579036, 10)\nAfter dropna: (422710, 10)\n"
],
[
"# Drop duplicates\npapers = papers.sort_values(\"publish_time\").reset_index(drop=True)\npapers.drop_duplicates(subset=[\"title\",\"abstract\"],keep=\"last\", inplace=True)\nprint(\"After drop duplicates: \", papers.shape)",
"After drop duplicates: (393715, 10)\n"
],
[
"papers.drop_duplicates(subset=[\"cord_uid\"],keep=\"last\", inplace=True)\npapers.drop_duplicates(subset=[\"title\"],keep=\"last\", inplace=True)\nprint(\"After drop duplicates: \", papers.shape)",
"After drop duplicates: (347779, 10)\n"
],
[
"papers.head()",
"_____no_output_____"
],
[
"papers_2020 = papers[(papers.publish_year >= 2020)].reset_index(drop=True)",
"_____no_output_____"
],
[
"print(papers_2020.shape)\npapers_2020.sample(10)",
"(268611, 10)\n"
],
[
"# Pickle it for later use\npapers_2020.to_pickle(\"./data/clean_df.pkl\")",
"_____no_output_____"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec66a23a294a62221f1be39dc9a3626d2fa14c12 | 7,427 | ipynb | Jupyter Notebook | ipython/kinetics_library_to_training.ipynb | CanePan-cc/CanePanWorkshop | 349a4af759cf8877197772cd7eaca1e51d46eff5 | [
"MIT"
]
| 1 | 2020-03-17T13:16:51.000Z | 2020-03-17T13:16:51.000Z | ipython/kinetics_library_to_training.ipynb | CanePan-cc/CanePanWorkshop | 349a4af759cf8877197772cd7eaca1e51d46eff5 | [
"MIT"
]
| null | null | null | ipython/kinetics_library_to_training.ipynb | CanePan-cc/CanePanWorkshop | 349a4af759cf8877197772cd7eaca1e51d46eff5 | [
"MIT"
]
| 1 | 2018-10-03T19:36:40.000Z | 2018-10-03T19:36:40.000Z | 31.470339 | 308 | 0.564696 | [
[
[
"# Convert Kinetics Library to Training Reactions Script\n\nSpecify the kinetics library name below and run the script. It automatically overwrites the training reactions files it needs to. Then you should commit those files.\n\nThis script only trains safely. In other words, if a single match from an RMG family is found, a training reaction is created. Sometimes, there are no matches from RMG reaction families, or multiple matches. This indicates an error that requires manual fixing, and a printout is given in the script.",
"_____no_output_____"
]
],
[
[
"# Set libraries to load reactions from; set to None to load all libraries\nlibraries = ['vinylCPD_H']\n\n# Set families to add training reactions to; either 'all' or a list, e.g. ['R_Addition_MultipleBond']\nfamilies = ['Intra_R_Add_Endocyclic']\n\n# Specify whether to plot kinetics comparisons\ncompare_kinetics = True\n\n# Specify whether to print library reactions which don't fit in the specified families\n# This can result in a lot of unnecessary output if only using a few families\nshow_all = False\n\n# Specify whether to prioritize aromatic resonance structures to reduce cases of multiple matches\nfilter_aromatic = True\n\n# Specify whether to use verbose comments when averaging tree\nverbose_comments = False",
"_____no_output_____"
],
[
"from rmgpy import settings\nfrom rmgpy.data.rmg import RMGDatabase\nfrom kinetics_library_to_training_tools import *",
"_____no_output_____"
]
],
[
[
"\n## Step 1: Load RMG-database with specified libraries and families",
"_____no_output_____"
]
],
[
[
"database = RMGDatabase()\ndatabase.load(\n path = settings['database.directory'],\n thermo_libraries = ['primaryThermoLibrary'], # Can add others if necessary\n kinetics_families = families,\n reaction_libraries = libraries,\n kinetics_depositories = ['training'],\n)\n# If we want accurate kinetics comparison, add existing training reactions and fill tree by averaging\nif compare_kinetics:\n for family in database.kinetics.families.values():\n family.add_rules_from_training(thermo_database=database.thermo)\n family.fill_rules_by_averaging_up(verbose=verbose_comments)",
"_____no_output_____"
]
],
[
[
"## Step 2a: Generate library reactions from families to get proper labels",
"_____no_output_____"
]
],
[
[
"master_dict, multiple_dict = process_reactions(database,\n libraries,\n families,\n compare_kinetics=compare_kinetics,\n show_all=show_all,\n filter_aromatic=filter_aromatic)",
"_____no_output_____"
]
],
[
[
"## Step 2b (optional): Review and select reactions to be added",
"_____no_output_____"
]
],
[
[
"review_reactions(master_dict, prompt=True)",
"_____no_output_____"
]
],
[
[
"## Step 2c (optional): Manual processing for reactions with multiple matches",
"_____no_output_____"
]
],
[
[
"manual_selection(master_dict, multiple_dict, database)",
"_____no_output_____"
]
],
[
[
"## Step 2d: Final review of reactions to be added",
"_____no_output_____"
]
],
[
[
"review_reactions(master_dict, prompt=False)",
"_____no_output_____"
]
],
[
[
"## Step 3: Write the new training reactions to the database",
"_____no_output_____"
]
],
[
[
"for library_name, reaction_dict in master_dict.items():\n library = database.kinetics.libraries[library_name]\n \n for family_name, reaction_list in reaction_dict.items():\n print('Adding training reactions from {0} to {1}...'.format(library_name, family_name))\n\n family = database.kinetics.families[family_name]\n try:\n depository = family.get_training_depository()\n except:\n raise Exception('Unable to find training depository in {0}. Check that one exists.'.format(family_name))\n\n print('Training depository previously had {} rxns. Now adding {} new rxn(s).'.format(len(depository.entries), len(reaction_list)))\n\n ref_list = []\n type_list = []\n short_list = []\n long_list = []\n \n for reaction in reaction_list:\n # Get the original entry to retrieve metadata\n orig_entry = library.entries[reaction.index]\n short_desc = orig_entry.short_desc\n long_desc = 'Training reaction from kinetics library: {0}\\nOriginal entry: {1}'.format(library_name, orig_entry.label)\n if orig_entry.long_desc:\n long_desc += '\\n' + orig_entry.long_desc\n \n ref_list.append(orig_entry.reference)\n type_list.append(orig_entry.reference_type)\n short_list.append(short_desc)\n long_list.append(long_desc)\n \n family.save_training_reactions(\n reaction_list,\n reference=ref_list,\n reference_type=type_list,\n short_desc=short_list,\n long_desc=long_list,\n )",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec66b4c94b692eae9eafe9daba90e376d1dd6158 | 28,464 | ipynb | Jupyter Notebook | exercises/florian/moutain_car/n_step_2nd_try.ipynb | NLeSC/reinforcement-learning-course | e800a7b7b9ada02f8d6b0438c4eb26a4eca8f31d | [
"Apache-2.0"
]
| 2 | 2019-05-22T11:24:53.000Z | 2019-12-20T09:30:16.000Z | exercises/florian/moutain_car/n_step_2nd_try.ipynb | NLeSC/reinforcement-learning-course | e800a7b7b9ada02f8d6b0438c4eb26a4eca8f31d | [
"Apache-2.0"
]
| null | null | null | exercises/florian/moutain_car/n_step_2nd_try.ipynb | NLeSC/reinforcement-learning-course | e800a7b7b9ada02f8d6b0438c4eb26a4eca8f31d | [
"Apache-2.0"
]
| 1 | 2019-04-02T13:11:04.000Z | 2019-04-02T13:11:04.000Z | 117.619835 | 20,616 | 0.850689 | [
[
[
"import numpy as np\nimport gym\nfrom sklearn.linear_model import SGDRegressor\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import FeatureUnion\nfrom sklearn.kernel_approximation import RBFSampler\n\nenv = gym.make('MountainCar-v0')\n\ndef get_transformer():\n #sampler = RBFSampler(gamma=2.0, n_components=1000, random_state=1234)\n n_components=500\n sampler = FeatureUnion([\n (\"rbf1\", RBFSampler(gamma=5.0, n_components=n_components)),\n (\"rbf2\", RBFSampler(gamma=2.0, n_components=n_components)),\n (\"rbf3\", RBFSampler(gamma=1.0, n_components=n_components)),\n (\"rbf4\", RBFSampler(gamma=0.5, n_components=n_components))\n ])\n \n #from the course solution: \n scaler = StandardScaler()\n\n observation_sample = np.vstack([env.observation_space.sample() for i in range(10000)])\n scaler.fit(observation_sample)\n return sampler.fit(scaler.transform(observation_sample)), scaler ",
"_____no_output_____"
],
[
"def G(rewards, gamma, state_estimate):\n result = 0\n factor = 1\n for reward in rewards:\n result += factor *reward\n factor *= gamma\n result += factor*state_estimate\n return result\n\n\ndef decide_action(predictions, epsilon):\n if np.random.random() < epsilon:\n return env.action_space.sample()\n else:\n return np.argmax(np.stack(predictions).T)\n \n \ndef run(env, gamma, epsilon, n_step = 5, render = False):\n observation = env.reset()\n # Transform observation (into higher dimensional feature space)\n transformed_observation = transformer.transform(scaler.transform([observation]))\n \n done = False\n action = None\n total_reward = 0\n \n history = [] #state, reward, is_done, info\n states = []\n actions = []\n \n while(done == False): \n\n # Make prediction\n predictions = [m.predict(transformed_observation) for m in models]\n # Choose action & do step\n action = decide_action(predictions, epsilon)\n next_observation, reward, done, info = env.step(action)\n next_transformed_observation = transformer.transform(scaler.transform([next_observation]))\n \n history.append(reward)\n states.append(observation)\n actions.append(action)\n \n rewards = [x for x in history[::-1]][:n_step]\n # Make prediction and calculate value\n next_predictions = [m.predict(next_transformed_observation) for m in models]\n #new_value = reward + gamma * max(next_predictions)\n new_value = G(rewards, gamma, np.max(next_predictions))\n \n # Update models\n #models[action].partial_fit(transformed_observation, [new_value[0]])\n n = min(n_step, len(rewards))\n models[actions[-n]].partial_fit(transformer.transform(scaler.transform([states[-n]]), [new_value])\n #models[action].partial_fit(transformed_observation, [new_value])\n \n # Update observation:\n observation = next_observation\n transformed_observation = next_transformed_observation\n \n if(render):\n env.render()\n total_reward += reward\n \n if render:\n env.close()\n return total_reward ",
"_____no_output_____"
],
[
"observation = env.reset()\ntransformer, scaler = get_transformer()\ntransformed_observation = transformer.transform([observation])\nprint('transformed_observation.shape',transformed_observation.shape)",
"transformed_observation.shape (1, 2000)\n"
],
[
"# Initialize model\nmodels = [SGDRegressor() for i in range(env.action_space.n)]\nfor model in models:\n model.partial_fit(transformed_observation,[0])\n\n# Run many epochs (\"learn to drive...\")\ntotal_rewards = []\ngamma = 0.99\neps_initial = 0.1\neps_decay = 0.001\n\nn_step = 5\n\nfor i in range(2000):\n epsilon = eps_initial * np.exp(-eps_decay * i)\n total_rewards.append(run(env, gamma, epsilon, n_step = n_step)) \n if (i+1)%200 == 0:\n print(\"Total reward for run \", i+1, \" : \", total_rewards[-1])\n",
"C:\\Users\\FlorianHuber\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\stochastic_gradient.py:128: FutureWarning: max_iter and tol parameters have been added in <class 'sklearn.linear_model.stochastic_gradient.SGDRegressor'> in 0.19. If both are left unset, they default to max_iter=5 and tol=None. If tol is not None, max_iter defaults to max_iter=1000. From 0.21, default max_iter will be 1000, and default tol will be 1e-3.\n \"and default tol will be 1e-3.\" % type(self), FutureWarning)\n"
],
[
"import matplotlib.pyplot as plt\n\nrewards_array = np.array(total_rewards)\navg_reward = []\navg_over = 100\nfor i in range(len(rewards)):\n avg_reward.append(rewards_array[max(0, i - avg_over):(i +1)].mean())\n\nfig = plt.figure(figsize=(10, 5))\n#plt.plot(rewards)\nplt.plot(avg_reward)\nplt.title(\"Total rewards\")",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code"
]
]
|
ec66bd6f29ceae2b413e682d036b5d3de004c9dd | 824,394 | ipynb | Jupyter Notebook | dog_app.ipynb | henzilfernandes/dog-project | bbab518f1986795a1d59e47020cfed3cd87a8a3a | [
"MIT"
]
| null | null | null | dog_app.ipynb | henzilfernandes/dog-project | bbab518f1986795a1d59e47020cfed3cd87a8a3a | [
"MIT"
]
| null | null | null | dog_app.ipynb | henzilfernandes/dog-project | bbab518f1986795a1d59e47020cfed3cd87a8a3a | [
"MIT"
]
| null | null | null | 509.514215 | 211,152 | 0.934335 | [
[
[
"## Convolutional Neural Networks\n\n## Project: Write an Algorithm for a Dog Identification App \n\n---\n\nIn this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! \n\n> **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \\n\",\n \"**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.\n\nIn addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.\n\n>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.\n\nThe rubric contains _optional_ \"Stand Out Suggestions\" for enhancing the project beyond the minimum requirements. If you decide to pursue the \"Stand Out Suggestions\", you should include the code in this IPython notebook.\n\n\n\n---\n### Why We're Here \n\nIn this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). \n\n\n\nIn this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience!\n\n### The Road Ahead\n\nWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.\n\n* [Step 0](#step0): Import Datasets\n* [Step 1](#step1): Detect Humans\n* [Step 2](#step2): Detect Dogs\n* [Step 3](#step3): Create a CNN to Classify Dog Breeds (from Scratch)\n* [Step 4](#step4): Use a CNN to Classify Dog Breeds (using Transfer Learning)\n* [Step 5](#step5): Create a CNN to Classify Dog Breeds (using Transfer Learning)\n* [Step 6](#step6): Write your Algorithm\n* [Step 7](#step7): Test Your Algorithm\n\n---\n<a id='step0'></a>\n## Step 0: Import Datasets\n\n### Import Dog Dataset\n\nIn the code cell below, we import a dataset of dog images. We populate a few variables through the use of the `load_files` function from the scikit-learn library:\n- `train_files`, `valid_files`, `test_files` - numpy arrays containing file paths to images\n- `train_targets`, `valid_targets`, `test_targets` - numpy arrays containing onehot-encoded classification labels \n- `dog_names` - list of string-valued dog breed names for translating labels",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import load_files \nfrom keras.utils import np_utils\nimport numpy as np\nfrom glob import glob\n\n# define function to load train, test, and validation datasets\ndef load_dataset(path):\n data = load_files(path)\n dog_files = np.array(data['filenames'])\n dog_targets = np_utils.to_categorical(np.array(data['target']), 133)\n return dog_files, dog_targets\n\n# load train, test, and validation datasets\ntrain_files, train_targets = load_dataset('/data/dog_images/train')\nvalid_files, valid_targets = load_dataset('/data/dog_images/valid')\ntest_files, test_targets = load_dataset('/data/dog_images/test')\n\n# load list of dog names\ndog_names = [item[20:-1] for item in sorted(glob(\"/data/dog_images/train/*/\"))]\n\n# print statistics about the dataset\nprint('There are %d total dog categories.' % len(dog_names))\nprint('There are %s total dog images.\\n' % len(np.hstack([train_files, valid_files, test_files])))\nprint('There are %d training dog images.' % len(train_files))\nprint('There are %d validation dog images.' % len(valid_files))\nprint('There are %d test dog images.'% len(test_files))",
"Using TensorFlow backend.\n"
]
],
[
[
"### Import Human Dataset\n\nIn the code cell below, we import a dataset of human images, where the file paths are stored in the numpy array `human_files`.",
"_____no_output_____"
]
],
[
[
"import random\nrandom.seed(8675309)\n\n# load filenames in shuffled human dataset\nhuman_files = np.array(glob(\"/data/lfw/*/*\"))\nrandom.shuffle(human_files)\n\n# print statistics about the dataset\nprint('There are %d total human images.' % len(human_files))",
"There are 13233 total human images.\n"
]
],
[
[
"---\n<a id='step1'></a>\n## Step 1: Detect Humans\n\nWe use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory.\n\nIn the next code cell, we demonstrate how to use this detector to find human faces in a sample image.",
"_____no_output_____"
]
],
[
[
"import cv2 \nimport matplotlib.pyplot as plt \n%matplotlib inline \n\n# extract pre-trained face detector\nface_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')\n\n# load color (BGR) image\nimg = cv2.imread(human_files[3])\n# convert BGR image to grayscale\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n# find faces in image\nfaces = face_cascade.detectMultiScale(gray)\n\n# print number of faces detected in the image\nprint('Number of faces detected:', len(faces))\n\n# get bounding box for each detected face\nfor (x,y,w,h) in faces:\n # add bounding box to color image\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n \n# convert BGR image to RGB for plotting\ncv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n# display the image, along with bounding box\nplt.imshow(cv_rgb)\nplt.show()",
"Number of faces detected: 1\n"
]
],
[
[
"Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. \n\nIn the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box.\n\n### Write a Human Face Detector\n\nWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below.",
"_____no_output_____"
]
],
[
[
"# returns \"True\" if face is detected in image stored at img_path\ndef face_detector(img_path):\n img = cv2.imread(img_path)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray)\n return len(faces) > 0",
"_____no_output_____"
]
],
[
[
"### (IMPLEMENTATION) Assess the Human Face Detector\n\n__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. \n- What percentage of the first 100 images in `human_files` have a detected human face? \n- What percentage of the first 100 images in `dog_files` have a detected human face? \n\nIdeally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`.\n\n__Answer:__ ",
"_____no_output_____"
]
],
[
[
"human_files_short = human_files[:100]\ndog_files_short = train_files[:100]\n# Do NOT modify the code above this line.\n\n## TODO: Test the performance of the face_detector algorithm \n## on the images in human_files_short and dog_files_short.\nh_count = 0\nd_count = 0\n\nfor human_file in human_files_short:\n if face_detector(human_file):\n h_count += 1\n\nfor dog_file in dog_files_short:\n if face_detector(dog_file):\n d_count += 1\n\nprint(\"{}%\".format(h_count))\nprint(\"{}%\".format(d_count))",
"100%\n11%\n"
]
],
[
[
"__Question 2:__ This algorithmic choice necessitates that we communicate to the user that we accept human images only when they provide a clear view of a face (otherwise, we risk having unneccessarily frustrated users!). In your opinion, is this a reasonable expectation to pose on the user? If not, can you think of a way to detect humans in images that does not necessitate an image with a clearly presented face?\n\n__Answer:__ It doesn't seem to be a reasonable expectation to pose to the user. Many use cases where user migt want to detect human images, need not have clear view of the face. In that case we might need to detect other parts like legs or hands. Deep learning of images of humans with different combinations such having proper face or having only body parts visible or both, could be better way to detect humans in images.\n\nWe suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on each of the datasets.",
"_____no_output_____"
]
],
[
[
"## (Optional) TODO: Report the performance of another \n## face detection algorithm on the LFW dataset\n### Feel free to use as many code cells as needed.",
"_____no_output_____"
]
],
[
[
"---\n<a id='step2'></a>\n## Step 2: Detect Dogs\n\nIn this section, we use a pre-trained [ResNet-50](http://ethereon.github.io/netscope/#/gist/db945b393d40bfa26006) model to detect dogs in images. Our first line of code downloads the ResNet-50 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). Given an image, this pre-trained ResNet-50 model returns a prediction (derived from the available categories in ImageNet) for the object that is contained in the image.",
"_____no_output_____"
]
],
[
[
"from keras.applications.resnet50 import ResNet50\n\n# define ResNet50 model\nResNet50_model = ResNet50(weights='imagenet')",
"Downloading data from https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5\n102858752/102853048 [==============================] - 16s 0us/step\n"
]
],
[
[
"### Pre-process the Data\n\nWhen using TensorFlow as backend, Keras CNNs require a 4D array (which we'll also refer to as a 4D tensor) as input, with shape\n\n$$\n(\\text{nb_samples}, \\text{rows}, \\text{columns}, \\text{channels}),\n$$\n\nwhere `nb_samples` corresponds to the total number of images (or samples), and `rows`, `columns`, and `channels` correspond to the number of rows, columns, and channels for each image, respectively. \n\nThe `path_to_tensor` function below takes a string-valued file path to a color image as input and returns a 4D tensor suitable for supplying to a Keras CNN. The function first loads the image and resizes it to a square image that is $224 \\times 224$ pixels. Next, the image is converted to an array, which is then resized to a 4D tensor. In this case, since we are working with color images, each image has three channels. Likewise, since we are processing a single image (or sample), the returned tensor will always have shape\n\n$$\n(1, 224, 224, 3).\n$$\n\nThe `paths_to_tensor` function takes a numpy array of string-valued image paths as input and returns a 4D tensor with shape \n\n$$\n(\\text{nb_samples}, 224, 224, 3).\n$$\n\nHere, `nb_samples` is the number of samples, or number of images, in the supplied array of image paths. It is best to think of `nb_samples` as the number of 3D tensors (where each 3D tensor corresponds to a different image) in your dataset!",
"_____no_output_____"
]
],
[
[
"from keras.preprocessing import image \nfrom tqdm import tqdm\n\ndef path_to_tensor(img_path):\n # loads RGB image as PIL.Image.Image type\n img = image.load_img(img_path, target_size=(224, 224))\n # convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)\n x = image.img_to_array(img)\n # convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor\n return np.expand_dims(x, axis=0)\n\ndef paths_to_tensor(img_paths):\n list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]\n return np.vstack(list_of_tensors)",
"_____no_output_____"
]
],
[
[
"### Making Predictions with ResNet-50\n\nGetting the 4D tensor ready for ResNet-50, and for any other pre-trained model in Keras, requires some additional processing. First, the RGB image is converted to BGR by reordering the channels. All pre-trained models have the additional normalization step that the mean pixel (expressed in RGB as $[103.939, 116.779, 123.68]$ and calculated from all pixels in all images in ImageNet) must be subtracted from every pixel in each image. This is implemented in the imported function `preprocess_input`. If you're curious, you can check the code for `preprocess_input` [here](https://github.com/fchollet/keras/blob/master/keras/applications/imagenet_utils.py).\n\nNow that we have a way to format our image for supplying to ResNet-50, we are now ready to use the model to extract the predictions. This is accomplished with the `predict` method, which returns an array whose $i$-th entry is the model's predicted probability that the image belongs to the $i$-th ImageNet category. This is implemented in the `ResNet50_predict_labels` function below.\n\nBy taking the argmax of the predicted probability vector, we obtain an integer corresponding to the model's predicted object class, which we can identify with an object category through the use of this [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ",
"_____no_output_____"
]
],
[
[
"from keras.applications.resnet50 import preprocess_input, decode_predictions\n\ndef ResNet50_predict_labels(img_path):\n # returns prediction vector for image located at img_path\n img = preprocess_input(path_to_tensor(img_path))\n return np.argmax(ResNet50_model.predict(img))",
"_____no_output_____"
]
],
[
[
"### Write a Dog Detector\n\nWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained ResNet-50 model, we need only check if the `ResNet50_predict_labels` function above returns a value between 151 and 268 (inclusive).\n\nWe use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not).",
"_____no_output_____"
]
],
[
[
"### returns \"True\" if a dog is detected in the image stored at img_path\ndef dog_detector(img_path):\n prediction = ResNet50_predict_labels(img_path)\n return ((prediction <= 268) & (prediction >= 151)) ",
"_____no_output_____"
]
],
[
[
"### (IMPLEMENTATION) Assess the Dog Detector\n\n__Question 3:__ Use the code cell below to test the performance of your `dog_detector` function. \n- What percentage of the images in `human_files_short` have a detected dog? \n- What percentage of the images in `dog_files_short` have a detected dog?\n\n__Answer:__ ",
"_____no_output_____"
]
],
[
[
"### TODO: Test the performance of the dog_detector function\n### on the images in human_files_short and dog_files_short.\nh_count = 0\nd_count = 0\n\nfor human_file in human_files_short:\n if dog_detector(human_file):\n h_count += 1\n\nfor dog_file in dog_files_short:\n if dog_detector(dog_file):\n d_count += 1\n\nprint(\"{}%\".format(h_count))\nprint(\"{}%\".format(d_count))",
"0%\n100%\n"
]
],
[
[
"---\n<a id='step3'></a>\n## Step 3: Create a CNN to Classify Dog Breeds (from Scratch)\n\nNow that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 1%. In Step 5 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.\n\nBe careful with adding too many trainable layers! More parameters means longer training, which means you are more likely to need a GPU to accelerate the training process. Thankfully, Keras provides a handy estimate of the time that each epoch is likely to take; you can extrapolate this estimate to figure out how long it will take for your algorithm to train. \n\nWe mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have great difficulty in distinguishing between a Brittany and a Welsh Springer Spaniel. \n\nBrittany | Welsh Springer Spaniel\n- | - \n<img src=\"images/Brittany_02625.jpg\" width=\"100\"> | <img src=\"images/Welsh_springer_spaniel_08203.jpg\" width=\"200\">\n\nIt is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). \n\nCurly-Coated Retriever | American Water Spaniel\n- | -\n<img src=\"images/Curly-coated_retriever_03896.jpg\" width=\"200\"> | <img src=\"images/American_water_spaniel_00648.jpg\" width=\"200\">\n\n\nLikewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. \n\nYellow Labrador | Chocolate Labrador | Black Labrador\n- | -\n<img src=\"images/Labrador_retriever_06457.jpg\" width=\"150\"> | <img src=\"images/Labrador_retriever_06455.jpg\" width=\"240\"> | <img src=\"images/Labrador_retriever_06449.jpg\" width=\"220\">\n\nWe also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. \n\nRemember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! \n\n### Pre-process the Data\n\nWe rescale the images by dividing every pixel in every image by 255.",
"_____no_output_____"
]
],
[
[
"from PIL import ImageFile \nImageFile.LOAD_TRUNCATED_IMAGES = True \n\n# pre-process the data for Keras\ntrain_tensors = paths_to_tensor(train_files).astype('float32')/255\nvalid_tensors = paths_to_tensor(valid_files).astype('float32')/255\ntest_tensors = paths_to_tensor(test_files).astype('float32')/255",
"100%|██████████| 6680/6680 [01:14<00:00, 89.09it/s] \n100%|██████████| 835/835 [00:10<00:00, 83.06it/s] \n100%|██████████| 836/836 [00:09<00:00, 84.74it/s] \n"
]
],
[
[
"### (IMPLEMENTATION) Model Architecture\n\nCreate a CNN to classify dog breed. At the end of your code cell block, summarize the layers of your model by executing the line:\n \n model.summary()\n\nWe have imported some Python modules to get you started, but feel free to import as many modules as you need. If you end up getting stuck, here's a hint that specifies a model that trains relatively fast on CPU and attains >1% test accuracy in 5 epochs:\n\n\n \n__Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. If you chose to use the hinted architecture above, describe why you think that CNN architecture should work well for the image classification task.\n\n__Answer:__ I used the hinted architecture above, to design the CNN architecture. Even though its small, its similar to VGG16 architechture. Increase in the number of filters at each level, helps to get more detailed information of images in the lower layers. The initial layers might detect basic characteristics like lines and circles etc, with more details regarding dog features in lower convolution layers having more filters. Finally the output of third layer is flattened and supplied to dense layer which applies softmax function, producing an output vector with 133 fields, each field corresponding to probability of each dog breed prediction.",
"_____no_output_____"
]
],
[
[
"from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D\nfrom keras.layers import Dropout, Flatten, Dense\nfrom keras.models import Sequential\n\nmodel = Sequential()\n\n### TODO: Define your architecture.\nmodel.add(Conv2D(16, 2, input_shape=train_tensors[0].shape, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(MaxPooling2D(pool_size=(2,2), strides=None, padding='same'))\nmodel.add(Conv2D(32, 2, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(MaxPooling2D(pool_size=(2,2), strides=None, padding='same'))\nmodel.add(Conv2D(64, 2, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(MaxPooling2D(pool_size=(2,2), strides=None, padding='same'))\nmodel.add(Conv2D(128, 2, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(MaxPooling2D(pool_size=(2,2), strides=None, padding='same'))\nmodel.add(Flatten())\nmodel.add(Dense(133, activation='softmax'))\nmodel.summary()",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_1 (Conv2D) (None, 223, 223, 16) 208 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 223, 223, 16) 0 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 112, 112, 16) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 111, 111, 32) 2080 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 111, 111, 32) 0 \n_________________________________________________________________\nmax_pooling2d_3 (MaxPooling2 (None, 56, 56, 32) 0 \n_________________________________________________________________\nconv2d_3 (Conv2D) (None, 55, 55, 64) 8256 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 55, 55, 64) 0 \n_________________________________________________________________\nmax_pooling2d_4 (MaxPooling2 (None, 28, 28, 64) 0 \n_________________________________________________________________\nconv2d_4 (Conv2D) (None, 27, 27, 128) 32896 \n_________________________________________________________________\ndropout_4 (Dropout) (None, 27, 27, 128) 0 \n_________________________________________________________________\nmax_pooling2d_5 (MaxPooling2 (None, 14, 14, 128) 0 \n_________________________________________________________________\nflatten_2 (Flatten) (None, 25088) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 133) 3336837 \n=================================================================\nTotal params: 3,380,277\nTrainable params: 3,380,277\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"### Compile the Model",
"_____no_output_____"
]
],
[
[
"model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])",
"_____no_output_____"
]
],
[
[
"### (IMPLEMENTATION) Train the Model\n\nTrain your model in the code cell below. Use model checkpointing to save the model that attains the best validation loss.\n\nYou are welcome to [augment the training data](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html), but this is not a requirement. ",
"_____no_output_____"
]
],
[
[
"from keras.callbacks import ModelCheckpoint \n\n### TODO: specify the number of epochs that you would like to use to train the model.\n\nepochs = 6\n\n### Do NOT modify the code below this line.\n\ncheckpointer = ModelCheckpoint(filepath='saved_models/weights.best.from_scratch.hdf5', \n verbose=1, save_best_only=True)\n\nmodel.fit(train_tensors, train_targets, \n validation_data=(valid_tensors, valid_targets),\n epochs=epochs, batch_size=20, callbacks=[checkpointer], verbose=1)",
"Train on 6680 samples, validate on 835 samples\nEpoch 1/6\n6660/6680 [============================>.] - ETA: 0s - loss: 4.8172 - acc: 0.0251Epoch 00001: val_loss improved from inf to 4.64434, saving model to saved_models/weights.best.from_scratch.hdf5\n6680/6680 [==============================] - 33s 5ms/step - loss: 4.8162 - acc: 0.0254 - val_loss: 4.6443 - val_acc: 0.0491\nEpoch 2/6\n6660/6680 [============================>.] - ETA: 0s - loss: 3.9907 - acc: 0.1299Epoch 00002: val_loss improved from 4.64434 to 4.32155, saving model to saved_models/weights.best.from_scratch.hdf5\n6680/6680 [==============================] - 30s 4ms/step - loss: 3.9904 - acc: 0.1295 - val_loss: 4.3216 - val_acc: 0.0707\nEpoch 3/6\n6660/6680 [============================>.] - ETA: 0s - loss: 2.6700 - acc: 0.3731Epoch 00003: val_loss improved from 4.32155 to 4.24767, saving model to saved_models/weights.best.from_scratch.hdf5\n6680/6680 [==============================] - 30s 4ms/step - loss: 2.6689 - acc: 0.3731 - val_loss: 4.2477 - val_acc: 0.0814\nEpoch 4/6\n6660/6680 [============================>.] - ETA: 0s - loss: 1.6808 - acc: 0.5893Epoch 00004: val_loss did not improve\n6680/6680 [==============================] - 30s 4ms/step - loss: 1.6804 - acc: 0.5897 - val_loss: 4.4300 - val_acc: 0.0898\nEpoch 5/6\n6660/6680 [============================>.] - ETA: 0s - loss: 0.9579 - acc: 0.7467Epoch 00005: val_loss did not improve\n6680/6680 [==============================] - 30s 4ms/step - loss: 0.9573 - acc: 0.7470 - val_loss: 4.7246 - val_acc: 0.0862\nEpoch 6/6\n6660/6680 [============================>.] - ETA: 0s - loss: 0.5369 - acc: 0.8536Epoch 00006: val_loss did not improve\n6680/6680 [==============================] - 30s 4ms/step - loss: 0.5374 - acc: 0.8534 - val_loss: 5.3432 - val_acc: 0.0647\n"
]
],
[
[
"### Load the Model with the Best Validation Loss",
"_____no_output_____"
]
],
[
[
"model.load_weights('saved_models/weights.best.from_scratch.hdf5')",
"_____no_output_____"
]
],
[
[
"### Test the Model\n\nTry out your model on the test dataset of dog images. Ensure that your test accuracy is greater than 1%.",
"_____no_output_____"
]
],
[
[
"# get index of predicted dog breed for each image in test set\ndog_breed_predictions = [np.argmax(model.predict(np.expand_dims(tensor, axis=0))) for tensor in test_tensors]\n\n# report test accuracy\ntest_accuracy = 100*np.sum(np.array(dog_breed_predictions)==np.argmax(test_targets, axis=1))/len(dog_breed_predictions)\nprint('Test accuracy: %.4f%%' % test_accuracy)",
"Test accuracy: 7.4163%\n"
]
],
[
[
"---\n<a id='step4'></a>\n## Step 4: Use a CNN to Classify Dog Breeds\n\nTo reduce training time without sacrificing accuracy, we show you how to train a CNN using transfer learning. In the following step, you will get a chance to use transfer learning to train your own CNN.\n\n### Obtain Bottleneck Features",
"_____no_output_____"
]
],
[
[
"bottleneck_features = np.load('/data/bottleneck_features/DogVGG16Data.npz')\ntrain_VGG16 = bottleneck_features['train']\nvalid_VGG16 = bottleneck_features['valid']\ntest_VGG16 = bottleneck_features['test']",
"_____no_output_____"
]
],
[
[
"### Model Architecture\n\nThe model uses the the pre-trained VGG-16 model as a fixed feature extractor, where the last convolutional output of VGG-16 is fed as input to our model. We only add a global average pooling layer and a fully connected layer, where the latter contains one node for each dog category and is equipped with a softmax.",
"_____no_output_____"
]
],
[
[
"VGG16_model = Sequential()\nVGG16_model.add(GlobalAveragePooling2D(input_shape=train_VGG16.shape[1:]))\nVGG16_model.add(Dense(133, activation='softmax'))\n\nVGG16_model.summary()",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nglobal_average_pooling2d_1 ( (None, 512) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 133) 68229 \n=================================================================\nTotal params: 68,229\nTrainable params: 68,229\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"### Compile the Model",
"_____no_output_____"
]
],
[
[
"VGG16_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])",
"_____no_output_____"
]
],
[
[
"### Train the Model",
"_____no_output_____"
]
],
[
[
"checkpointer = ModelCheckpoint(filepath='saved_models/weights.best..hdf5', \n verbose=1, save_best_only=True)\n\nVGG16_model.fit(train_VGG16, train_targets, \n validation_data=(valid_VGG16, valid_targets),\n epochs=20, batch_size=20, callbacks=[checkpointer], verbose=1)",
"Train on 6680 samples, validate on 835 samples\nEpoch 1/20\n6580/6680 [============================>.] - ETA: 0s - loss: 12.0839 - acc: 0.1321Epoch 00001: val_loss improved from inf to 10.69844, saving model to saved_models/weights.best..hdf5\n6680/6680 [==============================] - 2s 333us/step - loss: 12.0613 - acc: 0.1334 - val_loss: 10.6984 - val_acc: 0.2240\nEpoch 2/20\n6500/6680 [============================>.] - ETA: 0s - loss: 9.6480 - acc: 0.2991Epoch 00002: val_loss improved from 10.69844 to 9.50520, saving model to saved_models/weights.best..hdf5\n6680/6680 [==============================] - 2s 266us/step - loss: 9.6561 - acc: 0.2991 - val_loss: 9.5052 - val_acc: 0.3150\nEpoch 3/20\n6580/6680 [============================>.] - ETA: 0s - loss: 9.0605 - acc: 0.3711Epoch 00003: val_loss improved from 9.50520 to 9.35946, saving model to saved_models/weights.best..hdf5\n6680/6680 [==============================] - 2s 263us/step - loss: 9.0565 - acc: 0.3713 - val_loss: 9.3595 - val_acc: 0.3413\nEpoch 4/20\n6580/6680 [============================>.] - ETA: 0s - loss: 8.8107 - acc: 0.4081Epoch 00004: val_loss improved from 9.35946 to 9.24635, saving model to saved_models/weights.best..hdf5\n6680/6680 [==============================] - 2s 263us/step - loss: 8.8285 - acc: 0.4064 - val_loss: 9.2463 - val_acc: 0.3449\nEpoch 5/20\n6620/6680 [============================>.] - ETA: 0s - loss: 8.6567 - acc: 0.4251Epoch 00005: val_loss improved from 9.24635 to 9.12995, saving model to saved_models/weights.best..hdf5\n6680/6680 [==============================] - 2s 261us/step - loss: 8.6641 - acc: 0.4244 - val_loss: 9.1300 - val_acc: 0.3497\nEpoch 6/20\n6660/6680 [============================>.] - ETA: 0s - loss: 8.4802 - acc: 0.4437Epoch 00006: val_loss improved from 9.12995 to 9.02901, saving model to saved_models/weights.best..hdf5\n6680/6680 [==============================] - 2s 255us/step - loss: 8.4789 - acc: 0.4439 - val_loss: 9.0290 - val_acc: 0.3641\nEpoch 7/20\n6620/6680 [============================>.] - ETA: 0s - loss: 8.3790 - acc: 0.4542Epoch 00007: val_loss did not improve\n6680/6680 [==============================] - 2s 252us/step - loss: 8.3845 - acc: 0.4537 - val_loss: 9.0557 - val_acc: 0.3653\nEpoch 8/20\n6620/6680 [============================>.] - ETA: 0s - loss: 8.2692 - acc: 0.4606Epoch 00008: val_loss improved from 9.02901 to 8.82772, saving model to saved_models/weights.best..hdf5\n6680/6680 [==============================] - 2s 255us/step - loss: 8.2712 - acc: 0.4603 - val_loss: 8.8277 - val_acc: 0.3641\nEpoch 9/20\n6660/6680 [============================>.] - ETA: 0s - loss: 8.0042 - acc: 0.4728Epoch 00009: val_loss improved from 8.82772 to 8.51439, saving model to saved_models/weights.best..hdf5\n6680/6680 [==============================] - 2s 254us/step - loss: 8.0045 - acc: 0.4729 - val_loss: 8.5144 - val_acc: 0.3832\nEpoch 10/20\n6580/6680 [============================>.] - ETA: 0s - loss: 7.7527 - acc: 0.4921Epoch 00010: val_loss improved from 8.51439 to 8.43240, saving model to saved_models/weights.best..hdf5\n6680/6680 [==============================] - 2s 258us/step - loss: 7.7537 - acc: 0.4922 - val_loss: 8.4324 - val_acc: 0.4012\nEpoch 11/20\n6620/6680 [============================>.] - ETA: 0s - loss: 7.5419 - acc: 0.5097Epoch 00011: val_loss improved from 8.43240 to 8.25089, saving model to saved_models/weights.best..hdf5\n6680/6680 [==============================] - 2s 256us/step - loss: 7.5465 - acc: 0.5096 - val_loss: 8.2509 - val_acc: 0.4132\nEpoch 12/20\n6660/6680 [============================>.] - ETA: 0s - loss: 7.4519 - acc: 0.5233Epoch 00012: val_loss improved from 8.25089 to 8.14655, saving model to saved_models/weights.best..hdf5\n6680/6680 [==============================] - 2s 253us/step - loss: 7.4561 - acc: 0.5231 - val_loss: 8.1465 - val_acc: 0.4204\nEpoch 13/20\n6620/6680 [============================>.] - ETA: 0s - loss: 7.4262 - acc: 0.5281Epoch 00013: val_loss did not improve\n6680/6680 [==============================] - 2s 253us/step - loss: 7.4227 - acc: 0.5281 - val_loss: 8.1862 - val_acc: 0.4096\nEpoch 14/20\n6640/6680 [============================>.] - ETA: 0s - loss: 7.4037 - acc: 0.5309Epoch 00014: val_loss did not improve\n6680/6680 [==============================] - 2s 255us/step - loss: 7.4007 - acc: 0.5310 - val_loss: 8.1591 - val_acc: 0.4204\nEpoch 15/20\n6660/6680 [============================>.] - ETA: 0s - loss: 7.3377 - acc: 0.5365Epoch 00015: val_loss improved from 8.14655 to 8.08985, saving model to saved_models/weights.best..hdf5\n6680/6680 [==============================] - 2s 255us/step - loss: 7.3447 - acc: 0.5361 - val_loss: 8.0899 - val_acc: 0.4263\nEpoch 16/20\n6620/6680 [============================>.] - ETA: 0s - loss: 7.3254 - acc: 0.5376Epoch 00016: val_loss did not improve\n6680/6680 [==============================] - 2s 259us/step - loss: 7.3176 - acc: 0.5382 - val_loss: 8.2065 - val_acc: 0.4228\nEpoch 17/20\n6600/6680 [============================>.] - ETA: 0s - loss: 7.3222 - acc: 0.5395Epoch 00017: val_loss did not improve\n6680/6680 [==============================] - 2s 262us/step - loss: 7.3073 - acc: 0.5404 - val_loss: 8.1735 - val_acc: 0.4263\nEpoch 18/20\n6580/6680 [============================>.] - ETA: 0s - loss: 7.2839 - acc: 0.5445Epoch 00018: val_loss did not improve\n6680/6680 [==============================] - 2s 264us/step - loss: 7.2981 - acc: 0.5437 - val_loss: 8.1365 - val_acc: 0.4251\nEpoch 19/20\n6600/6680 [============================>.] - ETA: 0s - loss: 7.3062 - acc: 0.5436Epoch 00019: val_loss did not improve\n6680/6680 [==============================] - 2s 263us/step - loss: 7.2918 - acc: 0.5445 - val_loss: 8.1623 - val_acc: 0.4383\nEpoch 20/20\n6600/6680 [============================>.] - ETA: 0s - loss: 7.2847 - acc: 0.5461Epoch 00020: val_loss did not improve\n6680/6680 [==============================] - 2s 263us/step - loss: 7.2892 - acc: 0.5458 - val_loss: 8.1148 - val_acc: 0.4395\n"
]
],
[
[
"### Load the Model with the Best Validation Loss",
"_____no_output_____"
]
],
[
[
"VGG16_model.load_weights('saved_models/weights.best.VGG16.hdf5')",
"_____no_output_____"
]
],
[
[
"### Test the Model\n\nNow, we can use the CNN to test how well it identifies breed within our test dataset of dog images. We print the test accuracy below.",
"_____no_output_____"
]
],
[
[
"# get index of predicted dog breed for each image in test set\nVGG16_predictions = [np.argmax(VGG16_model.predict(np.expand_dims(feature, axis=0))) for feature in test_VGG16]\n\n# report test accuracy\ntest_accuracy = 100*np.sum(np.array(VGG16_predictions)==np.argmax(test_targets, axis=1))/len(VGG16_predictions)\nprint('Test accuracy: %.4f%%' % test_accuracy)",
"Test accuracy: 43.5407%\n"
]
],
[
[
"### Predict Dog Breed with the Model",
"_____no_output_____"
]
],
[
[
"from extract_bottleneck_features import *\n\ndef VGG16_predict_breed(img_path):\n # extract bottleneck features\n bottleneck_feature = extract_VGG16(path_to_tensor(img_path))\n # obtain predicted vector\n predicted_vector = VGG16_model.predict(bottleneck_feature)\n # return dog breed that is predicted by the model\n return dog_names[np.argmax(predicted_vector)]",
"_____no_output_____"
]
],
[
[
"---\n<a id='step5'></a>\n## Step 5: Create a CNN to Classify Dog Breeds (using Transfer Learning)\n\nYou will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set.\n\nIn Step 4, we used transfer learning to create a CNN using VGG-16 bottleneck features. In this section, you must use the bottleneck features from a different pre-trained model. To make things easier for you, we have pre-computed the features for all of the networks that are currently available in Keras. These are already in the workspace, at /data/bottleneck_features. If you wish to download them on a different machine, they can be found at:\n- [VGG-19](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogVGG19Data.npz) bottleneck features\n- [ResNet-50](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogResnet50Data.npz) bottleneck features\n- [Inception](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogInceptionV3Data.npz) bottleneck features\n- [Xception](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogXceptionData.npz) bottleneck features\n\nThe files are encoded as such:\n\n Dog{network}Data.npz\n \nwhere `{network}`, in the above filename, can be one of `VGG19`, `Resnet50`, `InceptionV3`, or `Xception`. \n\nThe above architectures are downloaded and stored for you in the `/data/bottleneck_features/` folder.\n\nThis means the following will be in the `/data/bottleneck_features/` folder:\n\n`DogVGG19Data.npz`\n`DogResnet50Data.npz`\n`DogInceptionV3Data.npz`\n`DogXceptionData.npz`\n\n\n\n### (IMPLEMENTATION) Obtain Bottleneck Features\n\nIn the code block below, extract the bottleneck features corresponding to the train, test, and validation sets by running the following:\n\n bottleneck_features = np.load('/data/bottleneck_features/Dog{network}Data.npz')\n train_{network} = bottleneck_features['train']\n valid_{network} = bottleneck_features['valid']\n test_{network} = bottleneck_features['test']",
"_____no_output_____"
]
],
[
[
"### TODO: Obtain bottleneck features from another pre-trained CNN.\nbottleneck_features = np.load('/data/bottleneck_features/DogXceptionData.npz')\ntrain_Xception = bottleneck_features['train']\nvalid_Xception = bottleneck_features['valid']\ntest_Xception = bottleneck_features['test']",
"_____no_output_____"
]
],
[
[
"### (IMPLEMENTATION) Model Architecture\n\nCreate a CNN to classify dog breed. At the end of your code cell block, summarize the layers of your model by executing the line:\n \n <your model's name>.summary()\n \n__Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem.\n\n__Answer:__ I have GlobalAveragePooling2D layer to soften the filtered details and reduce output size. Then I have a dense layer having activation function relu, since relu overcomes small gradient problem. Finally producing the output through dense layer softmax function.\n\n",
"_____no_output_____"
]
],
[
[
"### TODO: Define your architecture.\nXception_model = Sequential()\nXception_model.add(GlobalAveragePooling2D(input_shape=train_Xception.shape[1:]))\nXception_model.add(Dense(266, activation='relu'))\nXception_model.add(Dense(133, activation='softmax'))\n\nXception_model.summary()",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nglobal_average_pooling2d_2 ( (None, 2048) 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 266) 545034 \n_________________________________________________________________\ndense_4 (Dense) (None, 133) 35511 \n=================================================================\nTotal params: 580,545\nTrainable params: 580,545\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"### (IMPLEMENTATION) Compile the Model",
"_____no_output_____"
]
],
[
[
"### TODO: Compile the model.\nXception_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])",
"_____no_output_____"
]
],
[
[
"### (IMPLEMENTATION) Train the Model\n\nTrain your model in the code cell below. Use model checkpointing to save the model that attains the best validation loss. \n\nYou are welcome to [augment the training data](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html), but this is not a requirement. ",
"_____no_output_____"
]
],
[
[
"### TODO: Train the model.\ncheckpointer2 = ModelCheckpoint(filepath='saved_models/weights.best.Xception.hdf5', \n verbose=1, save_best_only=True)\n\nXception_model.fit(train_Xception, train_targets, \n validation_data=(valid_Xception, valid_targets),\n epochs=20, batch_size=20, callbacks=[checkpointer2], verbose=1)",
"Train on 6680 samples, validate on 835 samples\nEpoch 1/20\n6580/6680 [============================>.] - ETA: 0s - loss: 1.1976 - acc: 0.6964Epoch 00001: val_loss improved from inf to 0.68298, saving model to saved_models/weights.best.Xception.hdf5\n6680/6680 [==============================] - 4s 528us/step - loss: 1.1904 - acc: 0.6976 - val_loss: 0.6830 - val_acc: 0.7820\nEpoch 2/20\n6540/6680 [============================>.] - ETA: 0s - loss: 0.4742 - acc: 0.8518Epoch 00002: val_loss improved from 0.68298 to 0.60097, saving model to saved_models/weights.best.Xception.hdf5\n6680/6680 [==============================] - 3s 467us/step - loss: 0.4790 - acc: 0.8506 - val_loss: 0.6010 - val_acc: 0.8216\nEpoch 3/20\n6580/6680 [============================>.] - ETA: 0s - loss: 0.3380 - acc: 0.8891Epoch 00003: val_loss did not improve\n6680/6680 [==============================] - 3s 464us/step - loss: 0.3399 - acc: 0.8888 - val_loss: 0.6685 - val_acc: 0.8048\nEpoch 4/20\n6620/6680 [============================>.] - ETA: 0s - loss: 0.2392 - acc: 0.9207Epoch 00004: val_loss did not improve\n6680/6680 [==============================] - 3s 468us/step - loss: 0.2392 - acc: 0.9205 - val_loss: 0.6301 - val_acc: 0.8299\nEpoch 5/20\n6620/6680 [============================>.] - ETA: 0s - loss: 0.1823 - acc: 0.9400Epoch 00005: val_loss did not improve\n6680/6680 [==============================] - 3s 470us/step - loss: 0.1833 - acc: 0.9397 - val_loss: 0.6935 - val_acc: 0.8251\nEpoch 6/20\n6580/6680 [============================>.] - ETA: 0s - loss: 0.1418 - acc: 0.9532Epoch 00006: val_loss did not improve\n6680/6680 [==============================] - 3s 470us/step - loss: 0.1417 - acc: 0.9534 - val_loss: 0.6660 - val_acc: 0.8503\nEpoch 7/20\n6660/6680 [============================>.] - ETA: 0s - loss: 0.1126 - acc: 0.9598Epoch 00007: val_loss did not improve\n6680/6680 [==============================] - 3s 482us/step - loss: 0.1124 - acc: 0.9599 - val_loss: 0.7253 - val_acc: 0.8479\nEpoch 8/20\n6560/6680 [============================>.] - ETA: 0s - loss: 0.0994 - acc: 0.9659Epoch 00008: val_loss did not improve\n6680/6680 [==============================] - 3s 478us/step - loss: 0.1003 - acc: 0.9660 - val_loss: 0.8654 - val_acc: 0.8299\nEpoch 9/20\n6640/6680 [============================>.] - ETA: 0s - loss: 0.0796 - acc: 0.9729Epoch 00009: val_loss did not improve\n6680/6680 [==============================] - 3s 487us/step - loss: 0.0794 - acc: 0.9729 - val_loss: 0.9712 - val_acc: 0.8323\nEpoch 10/20\n6580/6680 [============================>.] - ETA: 0s - loss: 0.0668 - acc: 0.9795Epoch 00010: val_loss did not improve\n6680/6680 [==============================] - 3s 466us/step - loss: 0.0675 - acc: 0.9789 - val_loss: 0.9099 - val_acc: 0.8395\nEpoch 11/20\n6660/6680 [============================>.] - ETA: 0s - loss: 0.0578 - acc: 0.9832Epoch 00011: val_loss did not improve\n6680/6680 [==============================] - 3s 465us/step - loss: 0.0588 - acc: 0.9829 - val_loss: 0.9476 - val_acc: 0.8311\nEpoch 12/20\n6580/6680 [============================>.] - ETA: 0s - loss: 0.0500 - acc: 0.9842Epoch 00012: val_loss did not improve\n6680/6680 [==============================] - 3s 469us/step - loss: 0.0498 - acc: 0.9841 - val_loss: 0.9565 - val_acc: 0.8407\nEpoch 13/20\n6620/6680 [============================>.] - ETA: 0s - loss: 0.0519 - acc: 0.9831Epoch 00013: val_loss did not improve\n6680/6680 [==============================] - 3s 467us/step - loss: 0.0520 - acc: 0.9831 - val_loss: 1.0281 - val_acc: 0.8383\nEpoch 14/20\n6640/6680 [============================>.] - ETA: 0s - loss: 0.0419 - acc: 0.9869Epoch 00014: val_loss did not improve\n6680/6680 [==============================] - 3s 469us/step - loss: 0.0419 - acc: 0.9868 - val_loss: 0.9843 - val_acc: 0.8395\nEpoch 15/20\n6600/6680 [============================>.] - ETA: 0s - loss: 0.0370 - acc: 0.9894Epoch 00015: val_loss did not improve\n6680/6680 [==============================] - 3s 468us/step - loss: 0.0386 - acc: 0.9892 - val_loss: 1.0208 - val_acc: 0.8371\nEpoch 16/20\n6600/6680 [============================>.] - ETA: 0s - loss: 0.0320 - acc: 0.9908Epoch 00016: val_loss did not improve\n6680/6680 [==============================] - 3s 470us/step - loss: 0.0319 - acc: 0.9907 - val_loss: 1.0197 - val_acc: 0.8455\nEpoch 17/20\n6620/6680 [============================>.] - ETA: 0s - loss: 0.0307 - acc: 0.9911Epoch 00017: val_loss did not improve\n6680/6680 [==============================] - 3s 471us/step - loss: 0.0305 - acc: 0.9912 - val_loss: 1.1360 - val_acc: 0.8371\nEpoch 18/20\n6560/6680 [============================>.] - ETA: 0s - loss: 0.0283 - acc: 0.9925Epoch 00018: val_loss did not improve\n6680/6680 [==============================] - 3s 471us/step - loss: 0.0299 - acc: 0.9924 - val_loss: 1.2179 - val_acc: 0.8335\nEpoch 19/20\n6580/6680 [============================>.] - ETA: 0s - loss: 0.0284 - acc: 0.9916Epoch 00019: val_loss did not improve\n6680/6680 [==============================] - 3s 467us/step - loss: 0.0280 - acc: 0.9918 - val_loss: 1.0624 - val_acc: 0.8443\nEpoch 20/20\n6620/6680 [============================>.] - ETA: 0s - loss: 0.0257 - acc: 0.9937Epoch 00020: val_loss did not improve\n6680/6680 [==============================] - 3s 470us/step - loss: 0.0256 - acc: 0.9936 - val_loss: 1.2019 - val_acc: 0.8263\n"
]
],
[
[
"### (IMPLEMENTATION) Load the Model with the Best Validation Loss",
"_____no_output_____"
]
],
[
[
"### TODO: Load the model weights with the best validation loss.\nXception_model.load_weights('saved_models/weights.best.Xception.hdf5')",
"_____no_output_____"
]
],
[
[
"### (IMPLEMENTATION) Test the Model\n\nTry out your model on the test dataset of dog images. Ensure that your test accuracy is greater than 60%.",
"_____no_output_____"
]
],
[
[
"### TODO: Calculate classification accuracy on the test dataset.\n# get index of predicted dog breed for each image in test set\nXception_predictions = [np.argmax(Xception_model.predict(np.expand_dims(feature, axis=0))) for feature in test_Xception]\n\n# report test accuracy\ntest_accuracy = 100*np.sum(np.array(Xception_predictions)==np.argmax(test_targets, axis=1))/len(Xception_predictions)\nprint('Test accuracy: %.4f%%' % test_accuracy)",
"Test accuracy: 82.5359%\n"
]
],
[
[
"### (IMPLEMENTATION) Predict Dog Breed with the Model\n\nWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan_hound`, etc) that is predicted by your model. \n\nSimilar to the analogous function in Step 5, your function should have three steps:\n1. Extract the bottleneck features corresponding to the chosen CNN model.\n2. Supply the bottleneck features as input to the model to return the predicted vector. Note that the argmax of this prediction vector gives the index of the predicted dog breed.\n3. Use the `dog_names` array defined in Step 0 of this notebook to return the corresponding breed.\n\nThe functions to extract the bottleneck features can be found in `extract_bottleneck_features.py`, and they have been imported in an earlier code cell. To obtain the bottleneck features corresponding to your chosen CNN architecture, you need to use the function\n\n extract_{network}\n \nwhere `{network}`, in the above filename, should be one of `VGG19`, `Resnet50`, `InceptionV3`, or `Xception`.",
"_____no_output_____"
]
],
[
[
"### TODO: Write a function that takes a path to an image as input\n### and returns the dog breed that is predicted by the model.\nfrom extract_bottleneck_features import *\n\ndef Xception_predict_breed(img_path):\n # extract bottleneck features\n bottleneck_feature = extract_Xception(path_to_tensor(img_path))\n # obtain predicted vector\n predicted_vector = Xception_model.predict(bottleneck_feature)\n # return dog breed that is predicted by the model\n return dog_names[np.argmax(predicted_vector)]",
"_____no_output_____"
]
],
[
[
"---\n<a id='step6'></a>\n## Step 6: Write your Algorithm\n\nWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,\n- if a __dog__ is detected in the image, return the predicted breed.\n- if a __human__ is detected in the image, return the resembling dog breed.\n- if __neither__ is detected in the image, provide output that indicates an error.\n\nYou are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 5 to predict dog breed. \n\nSome sample output for our algorithm is provided below, but feel free to design your own user experience!\n\n\n\n\n### (IMPLEMENTATION) Write your Algorithm",
"_____no_output_____"
]
],
[
[
"### TODO: Write your algorithm.\n### Feel free to use as many code cells as needed.\nimport cv2\nimport matplotlib.pyplot as plt \nimport numpy as np\n%matplotlib inline\n\ndef detect_breed(image_path):\n img = cv2.imread(image_path)\n cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n plt.imshow(cv_rgb)\n if face_detector(image_path):\n print(\"hello, human!\")\n plt.show()\n print(\"You look like a ...\")\n elif dog_detector(image_path):\n print(\"hello, dog!\")\n plt.show()\n print(\"Your breed is ...\")\n else:\n print(\"Your neither human nor dog! Who are you...\")\n return\n print(Xception_predict_breed(image_path))",
"_____no_output_____"
]
],
[
[
"---\n<a id='step7'></a>\n## Step 7: Test Your Algorithm\n\nIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that __you__ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog?\n\n### (IMPLEMENTATION) Test Your Algorithm on Sample Images!\n\nTest your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. \n\n__Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm.\n\n__Answer:__ Output is better than I expected. In order to improve accuracy, I could increase the number of epochs, increase the number of Convolution layers, decrease the learning rate, etc.",
"_____no_output_____"
]
],
[
[
"## TODO: Execute your algorithm from Step 6 on\n## at least 6 images on your computer.\n## Feel free to use as many code cells as needed.\ndetect_breed(\"dogImages/test/035.Boykin_spaniel/Boykin_spaniel_02484.jpg\")\ndetect_breed(\"dogImages/test/005.Alaskan_malamute/Alaskan_malamute_00309.jpg\")\ndetect_breed(\"dogImages/test/050.Chinese_shar-pei/Chinese_shar-pei_03546.jpg\")\n\ndetect_breed(\"lfw/Vladimir_Putin/Vladimir_Putin_0049.jpg\")\ndetect_breed(\"lfw/Donald_Trump/Donald_Trump_0001.jpg\")\ndetect_breed(\"lfw/Winston_Churchill/Winston_Churchill_0001.jpg\")",
"hello, dog!\n"
]
],
[
[
"# Please download your notebook to submit\n\nIn order to submit, please do the following:\n1. Download an HTML version of the notebook to your computer using 'File: Download as...'\n2. Click on the orange Jupyter circle on the top left of the workspace.\n3. Navigate into the dog-project folder to ensure that you are using the provided dog_images, lfw, and bottleneck_features folders; this means that those folders will *not* appear in the dog-project folder. If they do appear because you downloaded them, delete them.\n4. While in the dog-project folder, upload the HTML version of this notebook you just downloaded. The upload button is on the top right.\n5. Navigate back to the home folder by clicking on the two dots next to the folder icon, and then open up a terminal under the 'new' tab on the top right\n6. Zip the dog-project folder with the following command in the terminal:\n `zip -r dog-project.zip dog-project`\n7. Download the zip file by clicking on the square next to it and selecting 'download'. This will be the zip file you turn in on the next node after this workspace!",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.